2024-05-14 09:18:57 +00:00
|
|
|
import json
|
|
|
|
import os
|
|
|
|
import random
|
|
|
|
|
2024-05-14 14:46:44 +00:00
|
|
|
import pandas
|
|
|
|
import numpy
|
|
|
|
# import matplotlib
|
|
|
|
from sklearn.metrics.pairwise import cosine_similarity
|
|
|
|
|
2024-05-14 09:38:35 +00:00
|
|
|
from openai import OpenAI
|
|
|
|
|
|
|
|
os.environ["OPENAI_API_KEY"]= "sk-PRJ811XeKzEy20Ug3dA98a34Af8b40B5816dE15503D33599"
|
|
|
|
os.environ["OPENAI_BASE_URL"]= "http://154.9.28.247:3000/v1/"
|
2024-05-14 14:46:44 +00:00
|
|
|
client = OpenAI()
|
2024-05-14 09:38:35 +00:00
|
|
|
|
2024-05-14 09:18:57 +00:00
|
|
|
def batch():
|
|
|
|
scales = os.listdir("Scales")
|
|
|
|
items={}
|
|
|
|
for i in scales:
|
|
|
|
with open("Scales/"+i,"r") as scale:
|
|
|
|
tmp = json.load(scale)
|
|
|
|
for i in tmp["item"]:
|
|
|
|
items[i]=tmp["item"][i]
|
|
|
|
# print(items)
|
|
|
|
return items
|
|
|
|
|
|
|
|
def old_type(str):
|
|
|
|
with open(str,"r") as file:
|
|
|
|
scale=json.load(file)
|
|
|
|
new={"item":{}}
|
|
|
|
for i in scale:
|
|
|
|
new["item"][i["name"]]=i["label"]
|
|
|
|
# print(i["name"],i["label"])
|
|
|
|
with open(str,"w") as file:
|
|
|
|
file.write(json.dumps(new))
|
|
|
|
|
2024-05-14 14:46:44 +00:00
|
|
|
def calc_similarity(scale):
|
|
|
|
item=[]
|
|
|
|
vec=[]
|
|
|
|
for i in scale:
|
|
|
|
item.append(i)
|
|
|
|
vec.append(client.embeddings.create(
|
2024-05-14 17:17:50 +00:00
|
|
|
input=scale[i], model="text-embedding-3-large" # nomic-embed-text text-embedding-3-small
|
2024-05-14 14:46:44 +00:00
|
|
|
).data[0].embedding)
|
|
|
|
simi=cosine_similarity(vec)
|
|
|
|
que=[]
|
|
|
|
for i,v in enumerate(simi):
|
|
|
|
for j in range(0,i):
|
|
|
|
que.append({"from":item[j], "to":item[i], "similarity":simi[i][j]})
|
|
|
|
return que
|
|
|
|
|
|
|
|
def similarity(force:bool = False,sort:bool=True):
|
2024-05-14 09:38:35 +00:00
|
|
|
if force or os.path.getsize("Temp/items.json") == 0:
|
2024-05-14 14:46:44 +00:00
|
|
|
que=calc_similarity(batch())
|
2024-05-14 09:38:35 +00:00
|
|
|
with open("Temp/items.json","w") as items:
|
|
|
|
items.write(json.dumps(que))
|
|
|
|
else:
|
|
|
|
with open("Temp/items.json","r") as items:
|
|
|
|
que = json.load(items)
|
2024-05-14 14:46:44 +00:00
|
|
|
if sort:
|
|
|
|
return sorted(que, key = lambda t : t["similarity"], reverse=True)
|
|
|
|
else:
|
|
|
|
return que
|
2024-05-14 09:38:35 +00:00
|
|
|
|
2024-05-14 14:46:44 +00:00
|
|
|
def make_data():
|
2024-05-14 09:18:57 +00:00
|
|
|
s=""
|
|
|
|
item = batch()
|
|
|
|
for i in item:
|
|
|
|
s+=i+','
|
|
|
|
s=s[:-1]+'\n'
|
|
|
|
for i in range(0,1000):
|
|
|
|
s += str(random.randint(0,4))
|
|
|
|
for j in range(1,20):
|
|
|
|
s += ',' + str(random.randint(0,4))
|
|
|
|
s+='\n'
|
2024-05-14 17:17:50 +00:00
|
|
|
with open("data.csv","w") as data:
|
2024-05-14 09:18:57 +00:00
|
|
|
data.write(s)
|
2024-05-14 14:46:44 +00:00
|
|
|
|
2024-05-14 17:17:50 +00:00
|
|
|
def corelation():
|
|
|
|
data = pandas.read_csv("Work/data.csv")
|
|
|
|
que={}
|
2024-05-14 14:46:44 +00:00
|
|
|
for i in data:
|
|
|
|
for j in data:
|
2024-05-15 06:25:16 +00:00
|
|
|
if i!=j:
|
|
|
|
try:
|
|
|
|
que[i,j]=data[i].corr(data[j])
|
|
|
|
que[j,i]=que[i,j]
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
break
|
2024-05-14 17:17:50 +00:00
|
|
|
return que
|
2024-05-14 14:46:44 +00:00
|
|
|
|