from fastapi import FastAPI import urllib.parse from config import __CONFIG__ import mysql_connector from fastgpt_uploader import upload2fastgpt from semanticscholar import search_paper import rss app = FastAPI() @app.get("/fastdoi") async def get_reference(questions): print('Search: '+questions) questions = urllib.parse.quote(questions) res = [] try: list = search_paper(query) for i in list: if mysql_connector.is_loaded(i['paperId']): print(i['paperId']) else: print(i['citationStyles']['bibtex']) res.append({ 'id':i['paperId'], 'q':str(i['citationStyles']['bibtex']), 'a':str(i['abstract']), 'score':[] }) print('New load: '+str(len(res))+'/'+str(len(list))) except Exception as e: print(str(e)) if(upload2fastgpt(res)): for i in res: mysql_connector.new_load(i['id']) return res @app.get("/rss") async def load_rss(): count = 0 loaded = [] uploads = [] for entry in rss.load_content(): loaded.append(entry['id']) uploads.append({ 'q': entry['title'], 'a': entry['content'] }) count += 1 if len(loaded) >= 100 and upload2fastgpt(uploads): rss.update(loaded) loaded = [] uploads = [] if len(loaded) > 0 and upload2fastgpt(uploads): rss.update(loaded) print(f"Total entries loaded: {count}") return f"Total entries loaded: {count}" if __name__ == '__main__': import uvicorn uvicorn.run(app, host="127.0.0.1", port=8964) mysql_connector.end_mysql()