fastdoi/main.py

62 lines
1.8 KiB
Python
Raw Permalink Normal View History

2024-08-21 04:39:30 +00:00
from fastapi import FastAPI
2024-08-22 04:08:39 +00:00
import urllib.parse
from config import __CONFIG__
2024-08-21 08:51:09 +00:00
import mysql_connector
2024-08-21 04:39:30 +00:00
from fastgpt_uploader import upload2fastgpt
from semanticscholar import search_paper
import rss
2024-08-21 04:39:30 +00:00
app = FastAPI()
@app.get("/fastdoi")
async def get_reference(questions):
print('Search: '+questions)
2024-08-21 04:39:30 +00:00
res = []
2024-08-23 05:21:49 +00:00
try:
2024-08-23 08:34:16 +00:00
list = search_paper(urllib.parse.quote(questions))
2024-08-23 05:21:49 +00:00
for i in list:
if mysql_connector.is_loaded(i['paperId']):
print(i['paperId'])
else:
print(i['citationStyles']['bibtex'])
res.append({
'id':i['paperId'],
'q':str(i['citationStyles']['bibtex']),
'a':str(i['abstract']),
'score':[]
})
print('New load: '+str(len(res))+'/'+str(len(list)))
except Exception as e:
print(str(e))
2024-08-22 04:08:39 +00:00
if(upload2fastgpt(res)):
for i in res:
2024-08-21 08:51:09 +00:00
mysql_connector.new_load(i['id'])
2024-08-22 04:08:39 +00:00
return res
2024-08-21 04:39:30 +00:00
2024-08-22 17:15:51 +00:00
@app.get("/rss")
async def load_rss():
count = 0
loaded = []
uploads = []
2024-08-24 05:40:12 +00:00
while entries := rss.load_content():
for entry in entries:
loaded.append(entry['id'])
uploads.append({
'q': entry['title'],
'a': entry['content']
})
count += 1
if len(loaded) >= 100 and upload2fastgpt(uploads):
rss.update(loaded)
loaded = []
uploads = []
if len(loaded) > 0 and upload2fastgpt(uploads):
rss.update(loaded)
print(f"Total entries loaded: {count}")
return f"Total entries loaded: {count}"
2024-08-22 17:15:51 +00:00
2024-08-21 04:39:30 +00:00
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8964)
2024-08-21 08:51:09 +00:00
mysql_connector.end_mysql()