diff --git a/fastgpt_uploader.py b/fastgpt_uploader.py index 1f61034..6e4260a 100644 --- a/fastgpt_uploader.py +++ b/fastgpt_uploader.py @@ -1,6 +1,5 @@ import json import requests -from markdownify import markdownify as md from config import __CONFIG__ diff --git a/main.py b/main.py index f9a4945..f67316f 100644 --- a/main.py +++ b/main.py @@ -5,6 +5,7 @@ from config import __CONFIG__ import mysql_connector from fastgpt_uploader import upload2fastgpt from semanticscholar import search_paper +from rss import load_rss app = FastAPI() @@ -34,6 +35,10 @@ async def get_reference(questions): mysql_connector.new_load(i['id']) return res +@app.get("/rss") +async def miniflux_rss(): + load_rss() + if __name__ == '__main__': import uvicorn uvicorn.run(app, host="127.0.0.1", port=8964) diff --git a/rss.py b/rss.py new file mode 100644 index 0000000..ca88153 --- /dev/null +++ b/rss.py @@ -0,0 +1,51 @@ +import miniflux +import time + +from config import __CONFIG__ +from fastgpt_uploader import upload2fastgpt + +# Authentication using an API token +client = miniflux.Client(__CONFIG__['miniflux_host'], api_key=__CONFIG__['miniflux_api']) + +count = 0 + +def load_content(): + for i in range(1,10): + try: + tmp = client.get_entries(status=['unread'])['entries'] + print(f"Downloaded {len(tmp)}") + return tmp + except Exception as e: + print(f"Error downloading: {e}") + time.sleep(10) + return None + +def update(ids): + for i in range (1,10): + try: + client.update_entries(entry_ids=ids, status='read') + return True + except Exception as e: + print(f"Error uploading:{count} {e}") + time.sleep(60) + return False + +def load_rss(): + count = 0 + loaded = [] + uploads = [] + for entry in load_content(): + loaded.append(entry['id']) + uploads.append({ + 'q': entry['title'], + 'a': entry['content'], + 'score':[] + }) + count += 1 + if len(loaded) >= 100 and upload2fastgpt(uploads): + update(loaded) + loaded = [] + uploads = [] + if len(loaded) > 0: + update(loaded) + print(f"Total entries loaded: {count}")