diff --git a/.gitignore b/.gitignore index bcbc695..ff79ac5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ backend/__pycache__/ - +ollama +deployment/ollama diff --git a/README.md b/README.md index 9683429..86d8fa4 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,38 @@ # Ollama bot - After deploy: ## WebUI for Ollama: - * http://localhost:8888 * use to install models like llama2, llama3 (https://ollama.com/library) ## Frontend - * simple FE: http://localhost:5000/ +### Stack +* Gitea actions + Google lighthouse +* Gitea actions + Playright +* Nuxt.js + Bootstrap 5 + ## Backend: - * http://localhost:5000/openapi/swagger * http://localhost/backend/openapi/swagger +### Stack +* FastAPI +* RabbitMQ/Kafka? +* OpenSearch + ### Push image ```bash -sudo docker tag llm-python-backend nucberlin:5123/llm-python-backend +docker login registry.tobiasweise.dev +docker-compose push -sudo docker push nucberlin:5123/llm-python-backend +#sudo docker tag llm-python-backend nucberlin:5123/llm-python-backend +#sudo docker push nucberlin:5123/llm-python-backend ``` ---- diff --git a/backend/Dockerfile b/backend/Dockerfile index 6f77094..6ce6299 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -1,5 +1,5 @@ FROM python:3.12 - +RUN apt-get update && apt-get install -y firefox-esr #RUN curl https://ollama.ai/install.sh | sh #RUN ollama run llama2 @@ -8,4 +8,5 @@ RUN pip3 install -r requirements.txt COPY . . ENTRYPOINT ["python3", "app.py"] +#ENTRYPOINT ["fastapi", "run", "main.py", "--port", "8000"] diff --git a/backend/app.py b/backend/app.py index 5e6d495..c2ebd8c 100644 --- a/backend/app.py +++ b/backend/app.py @@ -214,7 +214,7 @@ for env_key, conf_key in env_to_conf.items(): app.config[conf_key] = x - +#TODO add history def ask_bot(question, bot_id): bot = Chatbot.get(id=bot_id) @@ -247,30 +247,18 @@ def sockcon(data): socket.emit('backend response', {'msg': f'Connected to room {room} !', "room": room}) # looks like iOS needs an answer +#TODO: pydantic message type validation + + @socket.on('client message') def handle_message(message): - #room = message["room"] - #stream_key = "chatbot_stream" - - #llm = Ollama( - # model="llama3", - # base_url="http://ollama:11434" - #) - - #system_prompt = "" - - #query = system_prompt + " " + message["data"] - #print(message["data"]) - - #for chunks in llm.stream(query): - # socket.emit('backend token', {'data': chunks, "done": False}, to=room) - - #socket.emit('backend token', {'done': True}, to=room) - + #try: room = message["room"] question = message["question"] bot_id = message["bot_id"] + #except: + # return for chunk in ask_bot(question, bot_id): socket.emit('backend token', {'data': chunk, "done": False}, to=room) @@ -585,15 +573,13 @@ def get_schema(): @app.route("/") #Index Verzeichnis def index(): - return send_from_directory('.', "index.html") + return send_from_directory('./public', "index.html") -#@app.route("/info") #spezielle Nutzer definierte Route -#def info(): -# return sys.version+" "+os.getcwd() @app.route('/') #generische Route (auch Unterordner) def catchAll(path): - return send_from_directory('.', path) + #return send_from_directory('.', path) + return send_from_directory('./public', path) diff --git a/backend/main.py b/backend/main.py new file mode 100644 index 0000000..96b8355 --- /dev/null +++ b/backend/main.py @@ -0,0 +1,126 @@ +from fastapi import FastAPI +from fastapi.responses import HTMLResponse +from jinja2 import Environment, FileSystemLoader +from pydantic import BaseModel + +from neo4j import GraphDatabase + +import os, sys +from multiprocessing import Pool +from bs4 import BeautifulSoup +import requests + + +from webbot import * #Bot, innerHTML +from xing import * + + +env = Environment(loader=FileSystemLoader('templates')) +app = FastAPI() + +class JobSearch(BaseModel): + location: str + language: str + + + +def xing_job_search(location: str, radius: int) -> list: + with Bot() as bot: + vars_ = { + "page": 1, + "filter.industry%5B%5D": 90000, + "filter.type%5B%5D": "FULL_TIME", + "filter.level%5B%5D": 2, + "location": location, + "radius": radius + } + start_url = "https://www.xing.com/jobs/search?" + "&".join([k + "=" + str(v) for k, v in vars_.items()]) + + + def kill_cookie_questions(): + bot.click_id("consent-accept-button") + + + def next_page(): + nav = bot.get_elements_by_tag_name("nav")[1] + next_site_link = get_elements_by_tag_name(nav, "a")[-1] + bot.click(next_site_link) + + def get_nr_pages(): + nav = bot.get_elements_by_tag_name("nav")[1] + return int(get_elements_by_tag_name(nav, "a")[-2].text) + + def get_items(): + rs = [] + for article in bot.get_elements_by_tag_name("article"): + rs.append( get_children(article)[0].get_attribute("href") ) + return rs + + return collect_pagination_items(bot, start_url, next_page, get_nr_pages, get_items, kill_cookie_questions) + + + + + +""" +pwd = "neo4j2" +proto = "bolt" +host = "192.168.99.101" + +driver = GraphDatabase.driver("%s://%s:7687" % (proto, host), auth=("neo4j", pwd), encrypted=False) + +def add_friend(tx, name, friend_name): + tx.run("MERGE (a:Person {name: $name}) " + "MERGE (a)-[:KNOWS]->(friend:Person {name: $friend_name})", + name=name, friend_name=friend_name) + +def print_friends(tx, name): + for record in tx.run("MATCH (a:Person)-[:KNOWS]->(friend) WHERE a.name = $name " + "RETURN friend.name ORDER BY friend.name", name=name): + print(record["friend.name"]) + +with driver.session() as session: + session.write_transaction(add_friend, "Arthur", "Guinevere") + session.write_transaction(add_friend, "Arthur", "Lancelot") + session.write_transaction(add_friend, "Arthur", "Merlin") + session.read_transaction(print_friends, "Arthur") + +driver.close() +""" + + +@app.post("/search") +def job_search(js: JobSearch): + + + #https://berlinstartupjobs.com/?s=python&page=3 + + location = "Berlin" + radius = 50 + + + with Bot() as bot: + vars_ = { + "page": 1, + "filter.industry%5B%5D": 90000, + "filter.type%5B%5D": "FULL_TIME", + "filter.level%5B%5D": 2, + "location": location, + "radius": radius + } + start_url = "https://www.xing.com/jobs/search?" + "&".join([k + "=" + str(v) for k, v in vars_.items()]) + + bot.set_url(start_url) + return bot.get_page_content() + + + + +@app.get("/") +async def root(): + template = env.get_template('index.twig') + html = template.render() + return HTMLResponse(html) + + + diff --git a/backend/public/favicon.svg b/backend/public/favicon.svg new file mode 100644 index 0000000..4f3ee2a --- /dev/null +++ b/backend/public/favicon.svg @@ -0,0 +1,82 @@ + + + + + + + + + + + + + C + + diff --git a/backend/index.html b/backend/public/index.html similarity index 64% rename from backend/index.html rename to backend/public/index.html index d6107ec..9bef2c5 100644 --- a/backend/index.html +++ b/backend/public/index.html @@ -3,6 +3,9 @@ Ollama Chatbot + + + @@ -19,12 +22,64 @@ -
-

Ollama Chatbot

-

Create and talk to chatbots!

+ + +
+

Creative Bots

+

Create and talk to chatbots!

+
+
@@ -35,8 +90,18 @@
+ + + + + + + + +
@@ -44,10 +109,14 @@
+ + + + + + + + + + + + + + + + + +
@@ -103,6 +201,9 @@

+
+ + @@ -163,6 +264,8 @@
--> + + @@ -198,7 +301,6 @@