From d35e9d7a21b34a1642f4bd11b46e4a6d338ddc5f Mon Sep 17 00:00:00 2001 From: Tobias Weise Date: Tue, 30 Apr 2024 01:08:06 +0200 Subject: [PATCH] add files --- .env | 2 + backend/Dockerfile | 11 + .../__pycache__/elastictools.cpython-310.pyc | Bin 0 -> 1621 bytes backend/__pycache__/funcs.cpython-310.pyc | Bin 0 -> 1206 bytes backend/app.py | 160 ++ backend/backend.env | 0 backend/elastictools.py | 45 + backend/funcs.py | 36 + backend/index.html | 77 + backend/models.py | 93 ++ backend/old_app.py | 1322 +++++++++++++++++ backend/requirements.txt | 14 + backend/w3.css | 361 +++++ docker-compose.yml | 116 ++ nginx.conf | 59 + 15 files changed, 2296 insertions(+) create mode 100644 .env create mode 100644 backend/Dockerfile create mode 100644 backend/__pycache__/elastictools.cpython-310.pyc create mode 100644 backend/__pycache__/funcs.cpython-310.pyc create mode 100644 backend/app.py create mode 100644 backend/backend.env create mode 100644 backend/elastictools.py create mode 100644 backend/funcs.py create mode 100644 backend/index.html create mode 100644 backend/models.py create mode 100644 backend/old_app.py create mode 100644 backend/requirements.txt create mode 100755 backend/w3.css create mode 100644 docker-compose.yml create mode 100644 nginx.conf diff --git a/.env b/.env new file mode 100644 index 0000000..8595209 --- /dev/null +++ b/.env @@ -0,0 +1,2 @@ +APP_PREFIX=llm + diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 0000000..6f77094 --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.12 + +#RUN curl https://ollama.ai/install.sh | sh +#RUN ollama run llama2 + +COPY requirements.txt requirements.txt +RUN pip3 install -r requirements.txt +COPY . . + +ENTRYPOINT ["python3", "app.py"] + diff --git a/backend/__pycache__/elastictools.cpython-310.pyc b/backend/__pycache__/elastictools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..719b3146f253e1773997d2cfb7482cf179dad739 GIT binary patch literal 1621 zcmZux&yU`uS; z9p`teyx#h#e1cv5iol%2VT8FQ;cnt`FY!1{DEAY;)q^D9ouotT8m|nK5Jy_B@ov)P zy`X;NyOKa^;Qp1-$*w2AQ|w@WRnk*A>T^2_;#|*caj~xo9yDc4)j+qCPb_c(Uh_w+rpU}%sX=8?elT> zNN^aixJOGVf3U1xh$&L2r^ZG?EZm?aN>^f?o~_UAJ>Z#41H86 zZRkOfYw4odg!i+u%qD0GzgXDwvhpx?O>`*qLs3`kfs{g;=s}q&UE~Tfna|N49JP`m z_u|lm2O_UIRQBLZgXw%$X_$dDozG$UjgZXv4@;4u`H`q*h91M2HoaU_6|6!qL0%Si zzh6`g&P-IWbXq{k(kkQV8LCgSvIY~igG#kX6=)Mk(6y{`tcO=^4W$tKeeCKn0wWiG zLti+T&Vlpw@UeRlTyz?rxlQnc!#qu1xJ~DhpiF=CKX5MmCjs*txA6*65L_3q&U2TA z&)ti#aZbF3G$C7i<|5Y)veG>?&UruAXZxuVwaj7v+#7u~x*~TC)gZYE7g~Ur7FZxWOY?cACqj=WS$+z*5mqIkg(%hjVrlyS`c|+W{z_Rls5>CH zh)XGP|H3bD3H>v;8usM7Xv`_X%NC$uiEN2`9|i3+PD2=Z=AIDd7tV^5pU8LQf+C^r zC6c8*q{w6R6(NDGUcXj&4^~;Zi6v1d&L|tsO%jp;mG9XV!Rqeh9x5-Jm2C>z zkVhzBg|+WigSE#z4BTq8aaS7Fn}2A0iPZzfn`ywX0$p&xEA3$%7~DP{5_{67&-hAb z5-mv9o9^z+K&Q5n?Cump28lPzMxyMiFe376*`7l*!1(HjFywYBK$^-P$L(v&3M%*E0xHFvHnfYdx_4an3Aw76G%zpP6`-_z8 z6r%DivMEqV7BdNw7f|w;mtO3ZA{M0|`<))dLD`FYKt5g(#-S8Ss6g>kP(0&1Y==o7 zEx3|_@)h4$vq1J_cq!s7xh4CTEbdG1gpF=oeA54JUZrhbS5c8q`|mLWHtIR=akX^* z&-12GPW+IY)`_D$Z8gWwE_{*{MKZ-t;g1b%O^T~i{zJqfIi+7-D#Pk5gKgZ))U8-@3nIp^bNjlRi zpSHP4XX9gCpQ*Gp=$Zew)ImHCtYVLrwR56W833w20f82U*G_Kg?9J|7JK1LMvWPyhe` literal 0 HcmV?d00001 diff --git a/backend/app.py b/backend/app.py new file mode 100644 index 0000000..e76b800 --- /dev/null +++ b/backend/app.py @@ -0,0 +1,160 @@ + +""" +OpenAPI access via http://localhost:5000/openapi/ on local docker-compose deployment + +""" +#import warnings +#warnings.filterwarnings("ignore") + +#std lib modules: +import os, sys, json +from typing import Any, Tuple, List, Dict, Any, Callable, Optional +from datetime import datetime, date +from collections import namedtuple +import hashlib, traceback, logging + +#llm +from langchain.callbacks.manager import CallbackManager +from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler +from langchain_community.llms import Ollama + +#import openai #even used? +import tiktoken +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.chains import RetrievalQA +#from langchain.callbacks import get_openai_callback +#from langchain_community.callbacks import get_openai_callback + +#from langchain_openai import ChatOpenAI, AzureChatOpenAI +#from langchain_openai import OpenAIEmbeddings, AzureOpenAIEmbeddings +from langchain_community.vectorstores.elasticsearch import ElasticsearchStore +#from langchain.document_loaders import PyPDFLoader, Docx2txtLoader +from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader + +from langchain.callbacks.base import BaseCallbackHandler, BaseCallbackManager +from langchain.prompts import PromptTemplate + +#ext libs +from elasticsearch import NotFoundError, Elasticsearch # for normal read/write without vectors +from elasticsearch_dsl import Search, A +from elasticsearch_dsl import Document, Date, Integer, Keyword, Float, Long, Text, connections + +from pydantic import BaseModel, Field + +#flask, openapi +from flask import Flask, send_from_directory, Response, request, jsonify +import sys, os +from flask_cors import CORS, cross_origin +from werkzeug.utils import secure_filename +from flask_openapi3 import Info, Tag, OpenAPI, Server, FileStorage +from flask_socketio import SocketIO, join_room, leave_room, rooms, send + + +#home grown +#from scraper import WebScraper +from funcs import group_by +#from elastictools import update_by_id, delete_by_id + +#TODO: implement some kind of logging mechanism +#logging.basicConfig(filename='record.log', level=logging.DEBUG) +#logging.basicConfig(level=logging.DEBUG) +logging.basicConfig(level=logging.WARN) + +app = Flask(__name__) + +from flask_cors import CORS #falls cross-orgin verwendet werden soll +CORS(app) + +socket = SocketIO(app, cors_allowed_origins="*") + +@socket.on('connect') +def sockcon(data): + """ + put every connection into it's own room + to avoid broadcasting messages + answer in callback only to room with sid + """ + room = request.sid + join_room(room) + socket.emit('backend response', {'msg': f'Connected to room {room} !', "room": room}) # looks like iOS needs an answer + + +class StreamingCallback(BaseCallbackHandler): + + def __init__(self, key: str, sid: str): + pass + + def on_llm_new_token(self, token: str, **kwargs): + pass + + def on_llm_end(self, response, **kwargs): + pass + + +@socket.on('client message') +def handle_message(message): + + room = message["room"] + stream_key = "chatbot_stream" + + llm = Ollama( + model="llama3", + #callback_manager=CallbackManager([StreamingCallback(stream_key, room)]), + base_url="http://ollama:11434" + ) + + + system_prompt = "Answer in the language of the question. Give a short answer. Answer all questions without reservations or advice." + + + query = system_prompt + " " + message["data"] + print(message["data"]) + + for chunks in llm.stream(query): + socket.emit('backend token', {'data': chunks, "done": False}, to=room) + + socket.emit('backend token', {'done': True}, to=room) + + + + +#==============Routes=============== + +@app.route("/") #Index Verzeichnis +def index(): + return send_from_directory('.', "index.html") + +@app.route("/info") #spezielle Nutzer definierte Route +def info(): + return sys.version+" "+os.getcwd() + +@app.route('/') #generische Route (auch Unterordner) +def catchAll(path): + return send_from_directory('.', path) + + + + +if __name__ == '__main__': + #Wenn HTTPS benötigt wird (Pfade für RHEL7/können je OS variieren) + #cert = "/etc/pki/tls/certs/cert-payment.pem" #cert + #key = "/etc/pki/tls/private/cert-payment-private.pem" #key + #context = (cert, key) + #app.run(debug=True, host='0.0.0.0', ssl_context=context) + app.run(debug=True, host='0.0.0.0') + #app.run(debug=True) + + """ + llm = Ollama( + model="llama2", + callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), + base_url="http://ollama:11434" + ) + + assume = "Answer the next question with either true or false and name an example." + question = "Can cats use guns?" + print(question) + s = llm.invoke(assume + " " + question) + + """ + diff --git a/backend/backend.env b/backend/backend.env new file mode 100644 index 0000000..e69de29 diff --git a/backend/elastictools.py b/backend/elastictools.py new file mode 100644 index 0000000..61400ec --- /dev/null +++ b/backend/elastictools.py @@ -0,0 +1,45 @@ +""" +Some helper functions to make querying easier +""" +from typing import Any, Tuple, List, Dict, Any, Callable, Optional +import json +from elasticsearch import NotFoundError, Elasticsearch # for normal read/write without vectors +from elasticsearch_dsl import Search, A +from elasticsearch_dsl import Document, Date, Integer, Keyword, Float, Long, Text, connections + + +def update_by_id(client: Elasticsearch, index: str, id_field_name: str, id_value: str, values_to_set: Dict[str, Any]) -> None: + #create painless insert script + source = "" + for k, v in values_to_set.items(): + source += f"ctx._source.{k} = {json.dumps(v)};" + + body = { + "query": { + "term": { + id_field_name: id_value + } + }, + "script": { + "source": source, + "lang": "painless" + } + } + client.update_by_query(index=index, body=body) + + + +def delete_by_id(client: Elasticsearch, index: str, id_field_name: str, id_value: str): + s = Search(using=client, index=index).filter("term", **{id_field_name: id_value}) + response = s.delete() + #if not response.success(): + # raise Exception("Unable to delete id '%s' in index '%' !" % (index, id_value)) + + print(response, flush=True) + + + +def get_datetime_interval(search: Search, start, end) -> Search: + return search.filter("range", timest={"gte": start}).filter("range", timest={"lte": end}) + + diff --git a/backend/funcs.py b/backend/funcs.py new file mode 100644 index 0000000..c958e32 --- /dev/null +++ b/backend/funcs.py @@ -0,0 +1,36 @@ +""" +Function lib +""" +from typing import Any, Tuple, List, Dict, Any, Callable, Optional +import json + +def group_by(fs: List[Callable[[Any], Any]], ls: List[Any]) -> Dict[Any, Any]: + """ + Recursivly divides a list into sublists according to a list of membership-defining functions + """ + match fs: + case [f]: + match ls: + case []: return {} + case _: + d = {} + for x in ls: + k = f(x) + if k in d: + d[k].append(x) + else: + d[k] = [x] + return d + case [f, *fs]: + d = group_by([f], ls) + return {k: group_by(fs, v) for k, v in d.items()} + + +def pretty(x): + """ + A convenience pretty printing function + """ + print( json.dumps(x, indent=4) ) + + + diff --git a/backend/index.html b/backend/index.html new file mode 100644 index 0000000..42fe761 --- /dev/null +++ b/backend/index.html @@ -0,0 +1,77 @@ + + + + Ollama Chatbot + + + + + + + + + + + +
Message
+ + + + + + diff --git a/backend/models.py b/backend/models.py new file mode 100644 index 0000000..5f47c9f --- /dev/null +++ b/backend/models.py @@ -0,0 +1,93 @@ +import os +from elasticsearch_dsl import Document, InnerDoc, Date, Integer, Keyword, Float, Long, Text, connections, Object + +# Define a default Elasticsearch client +connections.create_connection(hosts="http://localhost:9200") + +class Article(Document): + title = Text(analyzer='snowball', fields={'raw': Keyword()}) + body = Text(analyzer='snowball') + tags = Keyword() + published_from = Date() + lines = Integer() + + class Index: + name = 'blog' + settings = { + "number_of_shards": 1, + } + + def save(self, ** kwargs): + self.lines = len(self.body.split()) + return super(Article, self).save(** kwargs) + + +#======= nextsearch_log =========== + +class Sources(InnerDoc): + score = Float() + sourceFileId = Text() + sourceType = Text() + tags = Text() + +class NextsearchLog(Document): + a = Text() + chatbotid = Keyword() + durasecs = Float() + inCt = Float() + inToks = Long() + llm = Text() + outCt = Float() + outToks = Long() + q = Text() + queryid = Keyword() + rating = Long() + reason = Text() + reasontags = Text() + session = Keyword() + + sources = Object(Sources) #Text(analyzer='snowball') + temperature = Float() + totalCt = Float() + + timest = Date() #timestamp + date = Date() #iso date + + class Index: + #name = 'test_nextsearch_log' + name = 'nextsearch_log' + settings = { + "number_of_shards": 1, + } + + def save(self, ** kwargs): + self.lines = len(self.body.split()) + return super(NextsearchLog, self).save(** kwargs) + + +if __name__ == "__main__": + elastic_uri = os.getenv("ELASTIC_URI") + #elastic_uri = "http://localhost:9200" + assert elastic_uri + + # Define a default Elasticsearch client + connections.create_connection(hosts=elastic_uri) + #connections.create_connection(hosts) + + # create the mappings in elasticsearch + NextsearchLog.init() + + # create the mappings in elasticsearch + #Article.init() + + # create and save and article + #article = Article(meta={'id': 42}, title='Hello world!', tags=['test']) + #article.body = ''' looong text ''' + ##article.published_from = datetime.now() + #article.save() + + #article = Article.get(id=42) + #print(article.is_published()) + + # Display cluster health + #print(connections.get_connection().cluster.health()) diff --git a/backend/old_app.py b/backend/old_app.py new file mode 100644 index 0000000..4a95d00 --- /dev/null +++ b/backend/old_app.py @@ -0,0 +1,1322 @@ +""" +OpenAPI access via http://localhost/epdm/chat/bot/openapi/ on local docker-compose deployment + +""" +import warnings +warnings.filterwarnings("ignore") + +#std lib modules: +import os, sys, json +from typing import Any, Tuple, List, Dict, Any, Callable, Optional +from datetime import datetime, date +from collections import namedtuple +import hashlib, traceback, logging + +#ext libs: +import requests +import chardet +import codecs + +from elasticsearch import NotFoundError, Elasticsearch # for normal read/write without vectors +from elasticsearch_dsl import Search, A +from elasticsearch_dsl import Document, Date, Integer, Keyword, Float, Long, Text, connections + + +import openai #even used? +import tiktoken +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.chains import RetrievalQA + +#from langchain.callbacks import get_openai_callback +from langchain_community.callbacks import get_openai_callback + +from langchain_openai import ChatOpenAI, AzureChatOpenAI +from langchain_openai import OpenAIEmbeddings, AzureOpenAIEmbeddings +from langchain_community.vectorstores.elasticsearch import ElasticsearchStore + +#from langchain.document_loaders import PyPDFLoader, Docx2txtLoader +from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader + +from langchain.callbacks.base import BaseCallbackHandler, BaseCallbackManager +from langchain.prompts import PromptTemplate + + +from pydantic import BaseModel, Field + +#flask, openapi +from flask import Flask, request, jsonify +from flask_cors import CORS, cross_origin +from werkzeug.utils import secure_filename +from flask_openapi3 import Info, Tag, OpenAPI, Server, FileStorage +from flask_socketio import SocketIO, join_room, leave_room, rooms + +#home grown +from scraper import WebScraper +from funcs import group_by +from elastictools import update_by_id, delete_by_id + +#TODO: implement some kind of logging mechanism +#logging.basicConfig(filename='record.log', level=logging.DEBUG) +#logging.basicConfig(level=logging.DEBUG) +logging.basicConfig(level=logging.WARN) + +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") +AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT") +OPENAI_DEPLOYMENT_NAME = os.getenv("OPENAI_DEPLOYMENT_NAME") +OPENAI_MODEL_NAME = os.getenv("OPENAI_MODEL_NAME") +OPENAI_API_VERSION = os.getenv("OPENAI_API_VERSION") +OPENAI_API_TYPE = os.getenv("OPENAI_API_TYPE") +OPENAI_EMBEDDING = os.getenv("OPENAI_EMBEDDING") +OPENAI_TEXT_EMBEDDING = os.getenv("OPENAI_TEXT_EMBEDDING") +LLM_PAYLOAD = int(os.getenv("LLM_PAYLOAD")) +CHUNK_SIZE = int(os.getenv("CHUNK_SIZE")) +BOT_ROOT_PATH = os.getenv("BOT_ROOT_PATH") + + + +# required settings + +assert OPENAI_API_KEY +assert BOT_ROOT_PATH + +from models import NextsearchLog + +info = Info(title="Bot-API", version="1.0.0", + summary="", + description="chatGPT model: " + OPENAI_MODEL_NAME) +servers = [ + Server(url=BOT_ROOT_PATH ) #+ '/') +] +app = OpenAPI(__name__, info=info, servers=servers) + +# init app and env +#app = Flask(__name__) +index_prefix = "chatbot" +app.config['UPLOAD_FOLDER'] = 'uploads' +app.config['CORS_HEADERS'] = 'Content-Type' +app.config['CORS_METHODS'] = ["GET,POST,OPTIONS,DELETE,PUT"] + +# set cors +CORS(app) + +env_to_conf = { + "BACKEND_INTERNAL_URL": "api_url", + "ELASTIC_URI": "elastic_uri" +} + +#import values from env into flask config and do existence check +for env_key, conf_key in env_to_conf.items(): + x = os.getenv(env_key) + if not x: + msg = "Environment variable '%s' not set!" % env_key + app.logger.fatal(msg) + #raise Exception(msg) + sys.exit(1) + else: + app.config[conf_key] = x + + + +if OPENAI_API_TYPE == 'azure': + EMBEDDING = AzureOpenAIEmbeddings( + openai_api_key=OPENAI_API_KEY, + deployment=OPENAI_EMBEDDING, + model=OPENAI_TEXT_EMBEDDING, + openai_api_type=OPENAI_API_TYPE, + azure_endpoint=AZURE_OPENAI_ENDPOINT, + chunk_size = CHUNK_SIZE + ) +else: + EMBEDDING = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) + + + +# socket = SocketIO(app, cors_allowed_origins="*", path="/epdm/chat/bot/socket.io") +socket = SocketIO(app, cors_allowed_origins="*") + +@socket.on('connect') +def sockcon(data): + """ + put every connection into it's own room + to avoid broadcasting messages + answer in callback only to room with sid + """ + room = request.sid + join_room(room) + socket.emit('my response', {'data': 'Connected'}) # looks like iOS needs an answer + +class StreamingCallback(BaseCallbackHandler): + # def __init__(self, key, sourceFileId): + def __init__(self, key: str, sid: str): + # print("StreamingCallback") + self.key = key + self.sid = sid + self.text = "" + self.new_sentence = "" + + def on_llm_new_token(self, token: str, **kwargs): + # print("on_llm_new_token", token) + self.text += token + self.new_sentence += token + socket.emit(self.key, self.text, to=self.sid) + + def on_llm_end(self, response, **kwargs): + # print("on_llm_end", response) + self.text = "" + socket.emit(self.key, "[END]", to=self.sid) + + +#----------------- + +class RatingRequest(BaseModel): + queryId: str = Field(None, description='The query-id. Example: fa9d2f024698b723931fe633bfe065d3') + rating: int = Field(0, ge=-1, le=1, description='A rating value of: -1, 0 or 1 for bad, neutral or good') + reason: str = Field("", description='A short text by the user explaining the rating.') + reasonTags: List[str] = Field([], description='A list of tags flagging the rating. Examples: ["LAZY", "CLEVER", "COOL", "VILE", "BUGGED", "WRONG"]') #limit tag len to 64 chars + +@app.post('/bot/rating', summary="Allows for answers to be rated", tags=[]) +def rating(body: RatingRequest): + """ + Gives a rating to an answer. + """ + queryId = body.queryId + rating = body.rating + reason = body.reason + reasonTags = body.reasonTags + + client = Elasticsearch(app.config['elastic_uri']) + + try: + update_by_id(client, index="nextsearch_log", id_field_name="queryid", id_value=queryId, values_to_set={ + "rating": rating, + "reason": reason, + "reasonTags": reasonTags + }) + except NotFoundError: + return jsonify({ + 'status': 'error', + 'message': "Unknown id: '%s!'" % queryId + }), 404 + + return "", 201 + + +#-------------- + +def get_slugs_for_names(client: Elasticsearch): + s = Search(using=client, index="chatbot") + s = s[0:10000] + response = s.execute() + return { d["slug"]: d["chatbotName"] for d in (x.to_dict() for x in response.hits)} + + +#DEAD? +class BarChartRequest(BaseModel): + chatbots: List[str] = Field([], description="""A list of chatbot names to filter for""") + start: datetime = Field("2000-01-31T16:47+00:00", description="""The interval start datetime in ISO 8601 format""") + end: datetime = Field("2100-01-31T16:47+00:00", description="""The interval end datetime in ISO 8601 format""") + +@app.get('/bot/usage/activityPerDay', summary="", tags=[]) +def usage_activity_per_day(query: BarChartRequest): + """ + + """ + chatbots = query.chatbots + start = query.start + end = query.end + + + client = Elasticsearch(app.config['elastic_uri']) + + id2name = get_slugs_for_names(client) + + s = Search(using=client, index="nextsearch_log") \ + .filter("range", timest={"gte": start}) \ + .filter("range", timest={"lte": end}) + + s = s[0:10000] #if not used size is set to 10 results + + def maybe_id2name(id): + if id in id2name: + return id2name[id] + return id + + def agg_pretty_result(d): + ls = [] + for bucket in d["aggregations"]["day"]["buckets"]: + d = {} + d["date"] = bucket["key_as_string"] + d["bots"] = [] + for bot in bucket["chatbot"]["buckets"]: + d["bots"].append({ + "bot": maybe_id2name(bot["key"]), + "cost": bot["cost_per_bot"]["value"] + }) + ls.append(d) + return ls + + + s.aggs.bucket('day', 'terms', field='date') + s.aggs['day'].bucket('chatbot', 'terms', field='chatbotid').metric('cost_per_bot', 'sum', field='inCt') + + response = s.execute() + + r = response.to_dict() + del r["hits"] + + ls = agg_pretty_result(r) + + return jsonify(ls) + +#-------------- + + +#book_tag = Tag(name="book", description="Some Book") + +class DonutChartRequest(BaseModel): + chatbots: List[str] = Field([], description="""A list of chatbot names to filter for""") + start: datetime = Field("2000-01-31T16:47+00:00", description="""The interval start datetime in ISO 8601 format""") + end: datetime = Field("2100-01-31T16:47+00:00", description="""The interval end datetime in ISO 8601 format""") + +@app.get('/bot/usage/activity', summary="Takes an interval and gives back a summary of bots and their activity and cost.", tags=[]) +def usage_activity(query: DonutChartRequest): + """ + Use datetime in ISO 8601 format: 2007-08-31T16:47+00:00 + """ + chatbots = query.chatbots + start = query.start + end = query.end + + #group nextsearch_log by chatbotid and sum inCt + + client = Elasticsearch(app.config['elastic_uri']) + + id2name = get_slugs_for_names(client) + + s = Search(using=client, index="nextsearch_log") \ + .filter("range", timest={"gte": start}) \ + .filter("range", timest={"lte": end}) + + s = s[0:10000] #if not used size is set to 10 results + + a = A('terms', field='chatbotid') \ + .metric('cost_per_bot', 'sum', field='inCt') + + s.aggs.bucket('bots', a) + response = s.execute() + #print(response.aggregations.bots.buckets) + + def maybe_id2name(id): + if id in id2name: + return id2name[id] + return id + + match chatbots: + case []: + #ls = [d for d in (d.to_dict() for d in response.aggregations.bots.buckets)] + ls = [{**d, "chatbotname": maybe_id2name(d["key"].split("_")[0])} for d in (d.to_dict() for d in response.aggregations.bots.buckets)] + case _: + ls = [{**d, "chatbotname": maybe_id2name(d["key"].split("_")[0])} for d in (d.to_dict() for d in response.aggregations.bots.buckets) if d["key"] in id2name and id2name[d["key"]] in chatbots] + + d = { + "chart": { + "series": { + "data": ls + } + } + } + + return jsonify(d) + #return jsonify(ls) + #return jsonify(list(response.aggregations.bots.buckets)) + +#------------------ + +class RetrieveChatRequest(BaseModel): + sessionId: str = Field(None, description="""The session's id. Example: d73bccba29b6376c1869944f26c3b670""") + +@app.get('/bot/usage/conversation', summary="Takes a session-id and gives you all of it's content.", tags=[]) +def usage_conversation(query: RetrieveChatRequest): + """ + Example session-id: d73bccba29b6376c1869944f26c3b670 + """ + sessionId = query.sessionId + + client = Elasticsearch(app.config['elastic_uri']) + + s = Search(using=client, index="nextsearch_log") \ + .filter("term", session=sessionId) + + s = s[0:10000] #if not used size is set to 10 results + response = s.execute() + return jsonify([hit.to_dict() for hit in response]) + +#------------ + +class DialogTableRequest(BaseModel): + chatbots: List[str] = Field([], description="""A list of chatbot names to filter for""") + start: datetime = Field("2000-01-31T16:47+00:00", description="""The interval start datetime in ISO 8601 format""") + end: datetime = Field("2100-01-31T16:47+00:00", description="""The interval end datetime in ISO 8601 format""") + +@app.get('/bot/usage/conversations', summary="Takes an interval and gives you all chatbots and their sessions within.", tags=[]) +def usage_conversations(query: DialogTableRequest): + """ + Use datetime in ISO 8601 format: 2007-08-31T16:47+00:00 + """ + #GET /bot/usage/conversations?chatbots=robobot,cyberbot,gigabot&timeStart=2024-01-15&timeEnd=2024-01-15&timeStart=00:00&timeEnd=23:00 + chatbots = query.chatbots + start = query.start + end = query.end + + client = Elasticsearch(app.config['elastic_uri']) + + s = Search(using=client, index="nextsearch_log") \ + .filter("range", timest={"gte": start}) \ + .filter("range", timest={"lte": end}) + + s = s[0:10000] #if not used size is set to 10 results + + #a = A('terms', field='chatbotid') \ + # .metric('cost_per_bot', 'sum', field='inCt') + #s.aggs.bucket('bots', a) + + response = s.execute() + hits = (x.to_dict() for x in response.hits) + + id2name = get_slugs_for_names(client) + + match chatbots: + case []: + pass + case _: + + hits = filter(lambda d: (id2name[d["chatbotid"]] in chatbots) if d["chatbotid"] in id2name else False, hits) + + + d = group_by([lambda d: d["chatbotid"], lambda d: d["session"] ], hits) + + d2 = {} + for chatbotid, v in d.items(): + if chatbotid in id2name: + d2[id2name[chatbotid]] = v + + return jsonify(d2) + + +#------------------ + +class ExtractUrlRequest(BaseModel): + url: str = Field(None, description="""The URL to a website whose HTML-embedded URLs you'd like to have.""", strict=True) + +@app.post('/bot/extract-url', summary="Get URLs from a website via its URL", tags=[]) +def extract_url(body: ExtractUrlRequest): + """ + Takes a json of form {"url": "..."} and gives back a list of URLs found within the specified URL's HTML-sourcecode. + """ + url = body.url + if not url: + return jsonify({'status': 'error', 'message': 'Missing required parameter url!'}), 400 + + with WebScraper() as web_scraper: + return jsonify(web_scraper.extract_urls(url)) + +#------------------ + +def extract_data(links: List[Dict[str, str]]) -> List[Dict[str, str]]: + """ + Webscrape pages of the given links and return a list of texts + """ + with WebScraper() as web_scraper: + return web_scraper.extract_page_data(links) + + +def get_word_splits(word_file: str) -> List: + loader = Docx2txtLoader(word_file) + pages = loader.load_and_split() + txt_spliter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100, length_function=len) + doc_list = [] + for page in pages: + pg_splits = txt_spliter.split_text(page.page_content) + doc_list.extend(pg_splits) + return doc_list + + +def get_text_splits_from_file(text_file: str) -> List: + # Detect the file's encoding + with open(text_file, 'rb') as file: + encoding_result = chardet.detect(file.read()) + + # Use the detected encoding to read the file + detected_encoding = encoding_result['encoding'] + with codecs.open(text_file, 'r', encoding=detected_encoding, errors='replace') as file: + text = file.read() + + return get_text_splits(text) + + +def determine_index(chatbot_id: str) -> str: + return f"{index_prefix}_{chatbot_id.lower()}" + + +def embed_index(doc_list: List[Dict[str, str]], chatbot_id: str) -> None: + """ + Add source documents in chatbot_xyz index! + """ + index = determine_index(chatbot_id) + + #print(f"add documents to index {index}", flush=True) + app.logger.info(f"add documents to index {index}") + + + #ElasticsearchStore.from_documents(doc_list, EMBEDDING, index_name=index, es_url=elastic_uri) + ElasticsearchStore.from_documents(doc_list, EMBEDDING, index_name=index, es_url=app.config['elastic_uri']) + + + + + + +class TrainForm(BaseModel): + #url: str = Field(None, description="""""", strict=True) + chatbotSlug: str = Field(None, description="""""") + files: List[FileStorage] = Field(None, description="""Some files""") + text: str = Field(None, description="Some text") + #filesMetadata: List[Dict[str, str]] = Field(None, description="""""") + + filesMetadata: str = Field(None, description="""A JSON string""") #a json string: [ ... ] + links: str = Field(None, description="""A JSON string""") #a json? [ ... ] + + +#TODO: needs to be reimplemented with another mechanism like celeery to manage longer running tasks and give feedback to frontend +@app.post('/bot/train', summary="", tags=[]) +def upload(form: TrainForm): + """ + Caution: Long running request! + """ + #url = body.url + + #print(form.file.filename) + #print(form.file_type) + #form.file.save('test.jpg') + + #app.logger.info("TRAIN called!") + + # extract body + chatbot_id = form.chatbotSlug + files = form.files + text = form.text + files_metadata = form.filesMetadata + if files_metadata: + files_metadata = json.loads(files_metadata) + links = form.links + if links: + links = json.loads(links) #[{url: '...'}] ? + app.logger.debug(links) + + + # validate body + if not chatbot_id: + return jsonify({ + 'status': 'error', + 'message': 'chatbotId is required' + }), 400 + + if not files_metadata and not text and not links: + return jsonify({ + 'status': 'error', + 'message': 'No data source found' + }), 400 + + if files_metadata and len(files) != len(files_metadata): + return jsonify({ + 'status': 'error', + 'message': 'Number of uploaded files metadata and files should be same' + }), 400 + + if links and len(links) == 0: + return jsonify({ + 'status': 'error', + 'message': 'No links found' + }), 400 + + + + try: + + # store raw data and extract doc_list + os.makedirs(f"{app.config['UPLOAD_FOLDER']}/{chatbot_id}", exist_ok=True) + + #train with given files + for i, file in enumerate(files): + filename = files_metadata[i]["slug"] + "_" + secure_filename(file.filename) + file_path = os.path.join(app.config['UPLOAD_FOLDER'], chatbot_id, filename) + file.save(file_path) + + app.logger.info("File saved successfully!") + + doc_list = [] + match file.filename.split(".")[-1]: + case "pdf": + doc_list = get_pdf_splits(file_path) + doc_list = add_metadata( + doc_list=doc_list, + source_type="pdf_file", + chatbot_id=chatbot_id, + source_file_id=files_metadata[i]["slug"], + filename=file.filename + ) + case "txt": + doc_list = get_text_splits_from_file(file_path) + doc_list = add_metadata( + doc_list=doc_list, + source_type="text_file", + chatbot_id=chatbot_id, + source_file_id=files_metadata[i]["slug"], + filename=file.filename + ) + case "docx" | "doc": + doc_list = get_word_splits(file_path) + doc_list = add_metadata( + doc_list=doc_list, + source_type="word_file", + chatbot_id=chatbot_id, + source_file_id=files_metadata[i]["slug"], + filename=file.filename + ) + case _: + app.logger.error("Unknown file extension: '%s'!" % file.filename.split(".")[-1]) + + + # embed file doc_list + embed_index(doc_list=doc_list, chatbot_id=chatbot_id) + + #train with given text + if text: + doc_list = get_text_splits(text) + doc_list = add_metadata( + doc_list=doc_list, + source_type="text", + chatbot_id=chatbot_id, + source_file_id="text", + txt_id=hashlib.md5(text.encode()).hexdigest() + ) + + # embed raw text doc_list + embed_index(doc_list=doc_list, chatbot_id=chatbot_id) + + #train with given links + if links and len(links) > 0: + links_docs = extract_data(links) + for i, doc in enumerate(links_docs): + if not doc['text']: + app.logger.info(f"Document {i} '{doc['url']} of {len(links_docs)} doesn't contain text. Skip.") + + else: + app.logger.info(f"embed document {i + 1} '{doc['url']}' of {len(links_docs)}") + + doc_list = get_text_splits(doc["text"], "link") + doc_list = add_metadata(doc_list, "link", chatbot_id, doc["slug"], url=doc["url"]) + + #TODO: save url with source! + + + # embed html doc_list + embed_index(doc_list=doc_list, chatbot_id=chatbot_id) + + #TODO: js backend needs to be merged into this one + # ping status endpoint + + express_api_endpoint = f"{app.config['api_url']}/api/chatbot/status/{chatbot_id}" + + #express_api_endpoint = f"{api_url}/api/chatbot/status/{chatbot_id}" + + + + try: + response = requests.put(express_api_endpoint, json={'status': 'ready'}) + + if response.status_code == 200: + app.logger.info("Express API updated successfully!") + else: + app.logger.error(f"Failed to update Express API {express_api_endpoint}") + + except Exception as e: + app.logger.error(f"Failed to update Express API {express_api_endpoint}") + app.logger.error(e) + + + return 'Files uploaded successfully' + except Exception as e: + app.logger.error(e) + + #TODO: log traceback! + traceback.print_exc() + return jsonify({'status': 'error', 'message': 'Something went wrong!'}), 400 + + +#------------------ + +class ReviseAnswerRequest(BaseModel): + revisedText: str = Field(None, description="""The new revised text""") + chatbotSlug: str = Field(None, description="""The chatbot id""") + +@app.post('/bot/revise-answer', summary="", tags=[]) +def revise2(body: ReviseAnswerRequest): + """ + + """ + revised_text = body.revisedText + chatbot_id = body.chatbotSlug + + if not revised_text: + return jsonify({ + 'status': 'error', + 'message': 'Missing required parameter revisedText!' + }), 400 + + if not chatbot_id: + return jsonify({ + 'status': 'error', + 'message': 'Missing required parameter chatbotSlug!' + }), 400 + + doc_list = get_text_splits(revised_text) + doc_list = add_metadata(doc_list, "revised_text", chatbot_id, "text") + embed_index(doc_list=doc_list, chatbot_id=chatbot_id) + return jsonify({ + 'status': 'success', + 'message': 'Answer revised successfully!' + }) + +#------------------ + +def clean_history(hist: List[Dict[str, str]]) -> str: + out = '' + for qa in hist[-5:]: # only the last 5 + if len(qa['bot']) < 2: + continue + out += 'user: ' + qa['user'] + '\nassistant: ' + qa['bot'] + "\n\n" + return out + + + +def get_prices(model_name) -> Dict[str, float]: + """ + prices in Ct. per 1000 tokens + """ + match model_name: + + # Azure OpenAI + case 'gpt-35-turbo': + inCt = 0.15 + outCt = 0.2 + + # OpenAI + case 'gpt-3.5-turbo-16k': + inCt = 0.3 + outCt = 0.4 + + case 'gpt-3.5-turbo-0125': + inCt = 0.05 + outCt = 0.15 + + case 'gpt-4': + inCt = 3.0 + outCt = 6.0 + + case 'gpt-4-32k': + inCt = 6.0 + outCt = 12.0 + + case 'gpt-4-0125-preview': + inCt = 1.0 + outCt = 3.0 + + case _: + inCt = 1.0 + outCt = 1.0 + + return { + "inCt": inCt, + "outCt": outCt + } + + + +def query_log(chatbot_id, queryId, sess, temperature, q, a, rating, llm, dura, sources, inputTokens, inCt, outputTokens, outCt): + """ + Add a doc to nextsearch_log + """ + + connections.create_connection(hosts=app.config['elastic_uri']) + + # create the mappings in elasticsearch + NextsearchLog.init() + + totalCt = ((inputTokens / 1000) * inCt) + ((outputTokens / 1000) * outCt) + esdoc = { + 'queryid': queryId, + 'chatbotid': chatbot_id, + 'timest': datetime.now(), + + 'date': date.today().isoformat(), + + 'session': sess, + 'temperature': temperature, + 'q': q, + 'a': a, + 'rating': rating, + 'reason': '', + 'reasontags': '', + 'llm': llm, + 'durasecs': dura, + 'sources': sources, + 'inToks': inputTokens, + 'inCt': inCt, + 'outToks': outputTokens, + 'outCt': outCt, + 'totalCt': totalCt + } + + client = Elasticsearch(app.config['elastic_uri']) + + resp = client.index(index='nextsearch_log', document=esdoc) + #TODO: check resp for success + + #print(resp) + app.logger.info(resp) + + return resp + + + + +def get_llm(temperature: float, stream_key: str, sid: str): + """ + Get the right LLM + """ + if OPENAI_API_TYPE == 'azure': + llm = AzureChatOpenAI( + openai_api_version=OPENAI_API_VERSION, + deployment_name=OPENAI_DEPLOYMENT_NAME, + azure_endpoint=AZURE_OPENAI_ENDPOINT, + + openai_api_key=OPENAI_API_KEY, + model_name=OPENAI_MODEL_NAME, + temperature=temperature, + streaming=True, + callbacks=BaseCallbackManager([StreamingCallback(stream_key, sid)]) + ) + else: + llm = ChatOpenAI( + openai_api_key=OPENAI_API_KEY, + model_name=OPENAI_MODEL_NAME, + temperature=temperature, + streaming=True, + callbacks=BaseCallbackManager([StreamingCallback(stream_key, sid)]) + ) + + return llm + + +class QueryRequest(BaseModel): + queryId: str = Field("", description="""The query id""") #generated by the js backend atm + key: str = Field("", description="""String used for the streaming of the chat""") + + prompt: str = Field(None, description="""The prompt/question to the bot""") + history: List[Dict[str,str]] = Field([], description="""""") + chatbotSlug: str = Field(None, description="""The chatbot id. Example: 'MyBot_c2wun1'""") + temprature: float = Field(0.1, description="""The temperature value passed to OpenAI affecting the strictness of it#s answers""") + sid: str = Field(None, description="""String used for the streaming of the chat""") + systemPrompt: str = Field("Antworte freundlich, mit einer ausführlichen Erklärung, sofern vorhanden auf Basis der folgenden Informationen. Please answer in the language of the question.", description="""A prompt always contextualizing the query used""") + + +@app.post('/bot/query', summary="Query the bot via prompt", tags=[]) +def bot_query(body: QueryRequest): + """ + The main route to use the chatbots LLM with a given prompt string, temperature, system prompt and history context + """ + dura = datetime.now().timestamp() + + queryId = body.queryId + prompt = body.prompt + history = clean_history(body.history) + chatbot_id = body.chatbotSlug + system_prompt = body.systemPrompt + temperature = body.temprature #typo in 'temprature' instead of is key temperature + key = body.key + sid = body.sid + + stream_key = key if key else f"{chatbot_id}_stream" + + sess = str(request.user_agent) + ' ' + str(request.environ.get('HTTP_X_REAL_IP', request.remote_addr)) +' '+ str(request.remote_addr) + sessMD5 = hashlib.md5(sess.encode()).hexdigest() + + #TODO: we need a better way to create these ids... it seems kind of random + if (queryId == None) or (queryId == ''): + queryId = sessMD5 + + + encoding = tiktoken.encoding_for_model(OPENAI_MODEL_NAME) + + if not chatbot_id: + return jsonify({ + 'status': 'error', + 'message': 'Missing required parameter chatbotSlug!' + }), 400 + if not prompt: + return jsonify({ + 'status': 'error', + 'message': 'Missing required parameter prompt!' + }), 400 + if not sid: + return jsonify({ + 'status': 'error', + 'message': 'Missing required parameter sid in query!' + }), 400 + + + default_temperature = 0.1 + temperature = temperature if temperature is not None else default_temperature + + llm = get_llm(temperature, stream_key, sid) + + prompt_template = system_prompt + """ + + {context} + + + """ + history + """ + + Question: {question} + """ + + chat_prompt = PromptTemplate( + template=prompt_template, input_variables=["context", "question"] + ) + + index = determine_index(chatbot_id) + + db = ElasticsearchStore( + es_url=app.config['elastic_uri'], + index_name=index, + distance_strategy="COSINE", + embedding=EMBEDDING + ) + + k = int(LLM_PAYLOAD / CHUNK_SIZE) - 1 + if (k < 2): + k = 2 + + scoredocs = db.similarity_search_with_score(prompt, k=k+10) + + query = RetrievalQA.from_chain_type( + llm=llm, + chain_type="stuff", + verbose=False, + return_source_documents=True, + retriever=db.as_retriever(search_kwargs={'k': k}), + chain_type_kwargs={"prompt": chat_prompt} + ) + + inputTokens = 0 + outputTokens = 0 + + with get_openai_callback() as cb: + qares = query.invoke({'query': prompt}) + qadocs = qares['source_documents'] #TODO: STS: deliver doc names and page numbers in the future + inputDocTxt = '' + + sources = [] + count = 0 + for qadoc in qadocs: + mdata = qadoc.metadata + if 'chatbotId' in mdata: + del mdata['chatbotId'] + + nextScore = 0.0 + for scoredoc in scoredocs: + if (len(qadoc.page_content) > 20) and (len(scoredoc[0].page_content) > 20) and (qadoc.page_content[:20] == scoredoc[0].page_content[:20]): + nextScore = scoredoc[1] + inputDocTxt += ' ' + qadoc.page_content + break + + # Lets make Percent of the score, only look at 0.6-1.0 + nextScore = float((nextScore - 0.6) * 250) + if nextScore < 1.0: + nextScore = 1.0 + if nextScore > 99.99: + nextScore = 99.99 + + mdata['score'] = round(nextScore, 2) + sources.append(mdata) + count += 1 + + answer = qares['result'] + #print(f"Total Tokens: {cb.total_tokens}") + #print(f"Prompt Tokens: {cb.prompt_tokens}") + #print(f"Completion Tokens: {cb.completion_tokens}") + + app.logger.info("ANSWER: " + answer) + + #print(ans, flush=True) + + inputTokens = len(encoding.encode(inputDocTxt + ' ' + prompt_template)) + outputTokens = len(encoding.encode(answer)) + + app.logger.info(f"Input Tokens: {inputTokens}") + app.logger.info(f"Output Tokens: {outputTokens}") + app.logger.info(f"Total Cost (USD): ${cb.total_cost}") + + + + d = get_prices(OPENAI_MODEL_NAME) + inCt = d["inCt"] + outCt = d["outCt"] + + # log question/answer + dura = round(datetime.now().timestamp() - dura, 2) + resp = query_log(chatbot_id, + queryId, + sessMD5, + temperature, + prompt, + answer, + 0, + OPENAI_MODEL_NAME, + dura, + sources, + inputTokens, + inCt, + outputTokens, + outCt) + + app.logger.info(resp) + + sources_index = "chatbot_" + chatbot_id + + client = Elasticsearch(app.config['elastic_uri']) + + s = Search(using=client, index=sources_index) + s = s[0:10000] + response = s.execute() + srcs = (x.to_dict() for x in response.hits) + src_grps = group_by([lambda d: d["metadata"]["sourceType"] ], srcs) + + #def print_type(x): + + new_sources = [] + for source in sources: + app.logger.info("Source: " + repr(source)) + match source["sourceType"]: + case "text": + if "txt_id" in source: + source["text"] = "" + d2 = group_by([lambda d: d["metadata"]["txt_id"] ], src_grps["text"]) + for src_item in d2[source["txt_id"]]: + source["text"] += " " + src_item["text"] + + new_sources.append(source) + + case "link": + if "sourceFileId" in source: + source["text"] = "" + d2 = group_by([lambda d: d["metadata"]["sourceFileId"] ], src_grps["link"]) + for src_item in d2[source["sourceFileId"]]: + source["text"] += " " + src_item["text"] + if "url" in src_item: + source["url"] = src_item["url"] + + new_sources.append(source) + + case "file": + if "sourceFileId" in source: + source["text"] = "" + d2 = group_by([lambda d: d["metadata"]["sourceFileId"] ], src_grps["file"]) + for src_item in d2[source["sourceFileId"]]: + source["text"] += " " + src_item["text"] + if "filename" in src_item: + source["filename"] = src_item["filename"] + + new_sources.append(source) + + + if resp.body["result"] == "created": + return jsonify({ + 'status': 'success', + 'answer': answer, + 'query_id': queryId, + 'sources': new_sources #sources + }) + else: + return jsonify({ + 'status': 'error', + 'message': resp.body["result"] + }), 400 + + +#------------------ + +#TODO create separate delete bot route + +class DeleteBotRequest(BaseModel): + chatbot_id: str = Field(None, description="""Chatbot id""") + +@app.delete('/bot', summary="", tags=[]) +def delete_bot(body: DeleteBotRequest): + """ + Not implemented yet + + Delete a chatbot via it's id + """ + chatbot_id = body.chatbot_id + + # Ensure chatbotId is provided + if not chatbot_id: + app.logger.error('Missing required parameter chatbotSlug!') + return jsonify({ + 'status': 'error', + 'message': 'Missing required parameter chatbotSlug!' + }), 400 + + client = Elasticsearch(app.config['elastic_uri']) + id2name = get_slugs_for_names(client) + index = determine_index(chatbot_id) + + + if not chatbot_id in id2name: + app.logger.error("Missing associated chatbot name of this id: '%s'!" % chatbot_id) + return jsonify({ + 'status': 'error', + 'message': 'Chatbot id not found!' + }), 404 + else: + chatbot_name = id_value=id2name[chatbot_id] + + + #TODO: delete index chatbot_ + try: + client.indices.delete(index=index) + app.logger.info("Deleted index '%s' !" % index) + except: + app.logger.error("Could not delete index '%s' !" % index) + + + #TODO: delete associated doc from index chatbot + #try: + delete_by_id(client, index="chatbot", id_field_name="slug", id_value=chatbot_id) + # app.logger.info("Deleted chatbot '%s' data from index '%s' !" % (chatbot_id, "chatbot")) + #except: + # app.logger.error("Could not delete data for '%s' in index 'chatbot' !" % chatbot_id) + + + #TODO: delete associated doc from index settings + #try: + delete_by_id(client, index="settings", id_field_name="displayName", id_value=chatbot_name) + # app.logger.info("Deleted chatbot '%s' data from index '%s' !" % (id2name[chatbot_id], "settings")) + #except: + # app.logger.error("Could not delete data for '%s' in index 'settings' !" % id2name[chatbot_id]) + + + #TODO: delete associated doc from index nextsearch_log + #try: + delete_by_id(client, index="nextsearch_log", id_field_name="chatbotid", id_value=chatbot_id) + # app.logger.info("Deleted chatbot '%s' data from index '%s' !" % (chatbot_id, "nextsearch_log")) + #except: + # app.logger.error("Could not delete data for '%s' in index 'nextsearch_log' !" % chatbot_id) + + return "", 202 + + +#------------------ + +#TODO: overloaded route... split into two or three, one for each resource +#server/routes/api/chatbot.js + +#FE calls js BE: /api/chatbot/resources/abotycrsh1 +#which calls bot BE + + + +class DeleteResourceRequest(BaseModel): + sourceType: str = Field(None, description="""Source type: ...link, text, file ?""") + sourceId: str = Field(None, description="""Source id?""") + chatbotSlug: str = Field(None, description="""Chatbot id""") + +@app.delete('/bot/resources', summary="delete a bot or resource via it's id", tags=[]) +def delete_resource(body: DeleteResourceRequest): + """ + * delete a bot via it's id + * delete files used as training source + + or other resources... unclear atm + """ + source_type = body.sourceType + source_id = body.sourceId + chatbot_id = body.chatbotSlug + + # Validate presence of sourceType + if not source_type: + msg = 'sourceType is required!' + app.logger.error(msg) + return jsonify({ + 'status': 'error', + 'message': msg + }), 400 + + # Ensure chatbotId is provided + if not chatbot_id: + app.logger.error('Missing required parameter chatbotSlug!') + return jsonify({ + 'status': 'error', + 'message': 'Missing required parameter chatbotSlug!' + }), 400 + + # Apply criteria based on sourceType + filter_criteria = { + "bool": { + "must": [ + {"match": {"metadata.sourceType": source_type}}, + {"match": {"metadata.chatbotId": chatbot_id}}, + ] + } + } + if source_type != 'text': + if not source_id: + app.logger.error('Missing required parameter sourceId!') + return jsonify({ + 'status': 'error', + 'message': 'Missing required parameter sourceId!' + }), 400 + new_match: Dict[str, Dict[str, Any]] = { + "match": { + "metadata.sourceFileId": source_id + } + } + filter_criteria["bool"]["must"].append(new_match) + + try: + # Assuming delete method returns a status or raises an exception on failure + app.logger.info(filter_criteria) + + index = determine_index(chatbot_id) + + store = ElasticsearchStore( + es_url=app.config['elastic_uri'], + index_name=index, + embedding=EMBEDDING + ) + + store.client.delete_by_query(index=index, query=filter_criteria) + + # isDeleted = index.delete(filter=filter_criteria) + except Exception as e: + #TODO: Handle specific exceptions if possible + + app.logger.error(str(e)) + + return jsonify({ + 'status': 'error', + 'message': str(e) + }), 500 + + + msg = 'Resource deleted successfully!' + app.logger.info(msg) + return jsonify({ + 'status': 'success', + 'message': msg + }) + + +#------------------ + + +# Splits the text into small chunks of 150 characters +def get_pdf_splits(pdf_file: str) -> List: + loader = PyPDFLoader(pdf_file) + pages = loader.load_and_split() + text_split = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100, length_function=len) + doc_list = [] + for pg in pages: + pg_splits = text_split.split_text(pg.page_content) + doc_list.extend(pg_splits) + return doc_list + + +def get_text_splits(text: str, source: str="text") -> List: + chunk_size = 1536 + chunk_overlap = 200 + + #if source == "link": + # chunk_size = 1536 + # chunk_overlap = 200 + + text_split = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len) + doc_list = text_split.split_text(text) + return doc_list + + +ESDocument = namedtuple('Document', ['page_content', 'metadata']) + +def add_metadata(doc_list: List[str], source_type: str, chatbot_id: str, source_file_id, tags=[], url=None, filename=None, txt_id=None) -> List[ESDocument]: + """ + + """ + for i, doc in enumerate(doc_list): + # If doc is a string, convert it to the Document format + if isinstance(doc, str): + doc = ESDocument(page_content=doc, metadata={}) + doc_list[i] = doc + + # Update the metadata + updated_metadata = doc.metadata.copy() + updated_metadata["chatbotId"] = chatbot_id + updated_metadata["tags"] = ' | '.join(tags) + + match source_type: + case "text": + updated_metadata["sourceType"] = "text" + if txt_id is not None: + updated_metadata["txt_id"] = txt_id + + case "revised_text": + updated_metadata["sourceType"] = "revised_text" + + case "pdf_file" | "word_file" | "text_file": + updated_metadata["sourceType"] = "file" + updated_metadata["sourceFileId"] = source_file_id + if filename is not None: + updated_metadata["filename"] = filename + + case "link": + updated_metadata["sourceType"] = "link" + updated_metadata["sourceFileId"] = source_file_id + if url is not None: + updated_metadata["url"] = url + + # Update the document in the doc_list with new metadata + doc_list[i] = ESDocument( + page_content=doc.page_content, + metadata=updated_metadata + ) + + return doc_list + + + +@app.errorhandler(500) +def server_error(error): + app.logger.exception('An exception occurred during a request: ' + str(error)) + return 'Internal Server Error', 500 + + + +#JS Backend routes to reimplement: +# http://localhost:8000/api/chatbot/add-resources + +if __name__ == '__main__': + app.run(debug=True, host="0.0.0.0", port=5000) + + + diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 0000000..8377bbe --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,14 @@ + +elasticsearch +elasticsearch-dsl +langchain +tiktoken + +pydantic +Werkzeug +flask +Flask-Cors +Flask-SocketIO +flask-openapi3 + +minio diff --git a/backend/w3.css b/backend/w3.css new file mode 100755 index 0000000..210eb61 --- /dev/null +++ b/backend/w3.css @@ -0,0 +1,361 @@ +/* W3.CSS 2.99 Mar 2017 by Jan Egil and Borge Refsnes */ +html{box-sizing:border-box}*,*:before,*:after{box-sizing:inherit} +/* Extract from normalize.css by Nicolas Gallagher and Jonathan Neal git.io/normalize */ +html{-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0} +article,aside,details,figcaption,figure,footer,header,main,menu,nav,section,summary{display:block} +audio,canvas,progress,video{display:inline-block}progress{vertical-align:baseline} +audio:not([controls]){display:none;height:0}[hidden],template{display:none} +a{background-color:transparent;-webkit-text-decoration-skip:objects} +a:active,a:hover{outline-width:0}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted} +dfn{font-style:italic}mark{background:#ff0;color:#000} +small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline} +sub{bottom:-0.25em}sup{top:-0.5em}figure{margin:1em 40px} +img{border-style:none}svg:not(:root){overflow:hidden} +code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em} +hr{box-sizing:content-box;height:0;overflow:visible} +button,input,select,textarea{font:inherit;margin:0}optgroup{font-weight:bold} +button,input{overflow:visible}button,select{text-transform:none} +button,html [type=button],[type=reset],[type=submit]{-webkit-appearance:button} +button::-moz-focus-inner, [type=button]::-moz-focus-inner, [type=reset]::-moz-focus-inner, [type=submit]::-moz-focus-inner{border-style:none;padding:0} +button:-moz-focusring, [type=button]:-moz-focusring, [type=reset]:-moz-focusring, [type=submit]:-moz-focusring{outline:1px dotted ButtonText} +fieldset{border:1px solid #c0c0c0;margin:0 2px;padding:.35em .625em .75em} +legend{color:inherit;display:table;max-width:100%;padding:0;white-space:normal}textarea{overflow:auto} +[type=checkbox],[type=radio]{padding:0} +[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto} +[type=search]{-webkit-appearance:textfield;outline-offset:-2px} +[type=search]::-webkit-search-cancel-button,[type=search]::-webkit-search-decoration{-webkit-appearance:none} +::-webkit-input-placeholder{color:inherit;opacity:0.54} +::-webkit-file-upload-button{-webkit-appearance:button;font:inherit} +/* End extract */ +html,body{font-family:Verdana,sans-serif;font-size:15px;line-height:1.5}html{overflow-x:hidden} +h1,h2,h3,h4,h5,h6,.w3-slim,.w3-wide{font-family:"Segoe UI",Arial,sans-serif} +h1{font-size:36px}h2{font-size:30px}h3{font-size:24px}h4{font-size:20px}h5{font-size:18px}h6{font-size:16px} +.w3-serif{font-family:"Times New Roman",Times,serif} +h1,h2,h3,h4,h5,h6{font-weight:400;margin:10px 0}.w3-wide{letter-spacing:4px} +h1 a,h2 a,h3 a,h4 a,h5 a,h6 a{font-weight:inherit} +hr{border:0;border-top:1px solid #eee;margin:20px 0} +img{margin-bottom:-5px}a{color:inherit} +.w3-image{max-width:100%;height:auto} +.w3-table,.w3-table-all{border-collapse:collapse;border-spacing:0;width:100%;display:table} +.w3-table-all{border:1px solid #ccc} +.w3-bordered tr,.w3-table-all tr{border-bottom:1px solid #ddd} +.w3-striped tbody tr:nth-child(even){background-color:#f1f1f1} +.w3-table-all tr:nth-child(odd){background-color:#fff} +.w3-table-all tr:nth-child(even){background-color:#f1f1f1} +.w3-hoverable tbody tr:hover,.w3-ul.w3-hoverable li:hover{background-color:#ccc} +.w3-centered tr th,.w3-centered tr td{text-align:center} +.w3-table td,.w3-table th,.w3-table-all td,.w3-table-all th{padding:8px 8px;display:table-cell;text-align:left;vertical-align:top} +.w3-table th:first-child,.w3-table td:first-child,.w3-table-all th:first-child,.w3-table-all td:first-child{padding-left:16px} +.w3-btn,.w3-btn-block,.w3-button{border:none;display:inline-block;outline:0;padding:6px 16px;vertical-align:middle;overflow:hidden;text-decoration:none!important;color:#fff;background-color:#000;text-align:center;cursor:pointer;white-space:nowrap} +.w3-btn:hover,.w3-btn-block:hover,.w3-btn-floating:hover,.w3-btn-floating-large:hover{box-shadow:0 8px 16px 0 rgba(0,0,0,0.2),0 6px 20px 0 rgba(0,0,0,0.19)} +.w3-button{color:#000;background-color:#f1f1f1;padding:8px 16px}.w3-button:hover{color:#000!important;background-color:#ccc!important} +.w3-btn,.w3-btn-floating,.w3-btn-floating-large,.w3-closenav,.w3-opennav,.w3-btn-block,.w3-button{-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none} +.w3-btn-floating,.w3-btn-floating-large{display:inline-block;text-align:center;color:#fff;background-color:#000;position:relative;overflow:hidden;z-index:1;padding:0;border-radius:50%;cursor:pointer;font-size:24px} +.w3-btn-floating{width:40px;height:40px;line-height:40px}.w3-btn-floating-large{width:56px;height:56px;line-height:56px} +.w3-disabled,.w3-btn:disabled,.w3-button:disabled,.w3-btn-floating:disabled,.w3-btn-floating-large:disabled{cursor:not-allowed;opacity:0.3}.w3-disabled *,:disabled *{pointer-events:none} +.w3-btn.w3-disabled:hover,.w3-btn-block.w3-disabled:hover,.w3-btn:disabled:hover,.w3-btn-floating.w3-disabled:hover,.w3-btn-floating:disabled:hover, +.w3-btn-floating-large.w3-disabled:hover,.w3-btn-floating-large:disabled:hover{box-shadow:none} +.w3-btn-group .w3-btn{float:left}.w3-btn-block{width:100%} +.w3-btn-bar .w3-btn{box-shadow:none;background-color:inherit;color:inherit;float:left}.w3-btn-bar .w3-btn:hover{background-color:#ccc} +.w3-badge,.w3-tag,.w3-sign{background-color:#000;color:#fff;display:inline-block;padding-left:8px;padding-right:8px;text-align:center} +.w3-badge{border-radius:50%} +ul.w3-ul{list-style-type:none;padding:0;margin:0}ul.w3-ul li{padding:6px 2px 6px 16px;border-bottom:1px solid #ddd}ul.w3-ul li:last-child{border-bottom:none} +.w3-tooltip,.w3-display-container{position:relative}.w3-tooltip .w3-text{display:none}.w3-tooltip:hover .w3-text{display:inline-block} +.w3-navbar{list-style-type:none;margin:0;padding:0;overflow:hidden} +.w3-navbar li{float:left}.w3-navbar li a,.w3-navitem,.w3-navbar li .w3-btn,.w3-navbar li .w3-input{display:block;padding:8px 16px}.w3-navbar li .w3-btn,.w3-navbar li .w3-input{border:none;outline:none;width:100%} +.w3-navbar li a:hover{color:#000;background-color:#ccc} +.w3-navbar .w3-dropdown-hover,.w3-navbar .w3-dropdown-click{position:static} +.w3-navbar .w3-dropdown-hover:hover,.w3-navbar .w3-dropdown-hover:first-child,.w3-navbar .w3-dropdown-click:hover{background-color:#ccc;color:#000} +.w3-navbar a,.w3-topnav a,.w3-sidenav a,.w3-dropdown-content a,.w3-accordion-content a,.w3-dropnav a,.w3-navblock a{text-decoration:none!important} +.w3-navbar .w3-opennav.w3-right{float:right!important}.w3-topnav{padding:8px 8px} +.w3-navblock .w3-dropdown-hover:hover,.w3-navblock .w3-dropdown-hover:first-child,.w3-navblock .w3-dropdown-click:hover{background-color:#ccc;color:#000} +.w3-navblock .w3-dropdown-hover,.w3-navblock .w3-dropdown-click{width:100%}.w3-navblock .w3-dropdown-hover .w3-dropdown-content,.w3-navblock .w3-dropdown-click .w3-dropdown-content{min-width:100%} +.w3-topnav a{padding:0 8px;border-bottom:3px solid transparent;-webkit-transition:border-bottom .25s;transition:border-bottom .25s} +.w3-topnav a:hover{border-bottom:3px solid #fff}.w3-topnav .w3-dropdown-hover a{border-bottom:0} +.w3-opennav,.w3-closenav{color:inherit}.w3-opennav:hover,.w3-closenav:hover{cursor:pointer;opacity:0.8} +.w3-btn,.w3-btn-floating,.w3-dropnav a,.w3-btn-floating-large,.w3-btn-block, .w3-navbar a,.w3-navblock a,.w3-sidenav a,.w3-pagination li a,.w3-hoverable tbody tr,.w3-hoverable li, +.w3-accordion-content a,.w3-dropdown-content a,.w3-dropdown-click:hover,.w3-dropdown-hover:hover,.w3-opennav,.w3-closenav,.w3-closebtn,*[class*="w3-hover-"] +{-webkit-transition:background-color .25s,color .15s,box-shadow .25s,opacity 0.25s,filter 0.25s,border 0.15s;transition:background-color .25s,color .15s,box-shadow .15s,opacity .25s,filter .25s,border .15s} +.w3-ripple:active{opacity:0.5}.w3-ripple{-webkit-transition:opacity 0s;transition:opacity 0s} +.w3-sidenav,.w3-sidebar{height:100%;width:200px;background-color:#fff;position:fixed!important;z-index:1;overflow:auto} +.w3-sidenav a,.w3-navblock a{padding:4px 2px 4px 16px}.w3-sidenav a:hover,.w3-navblock a:hover{background-color:#ccc;color:#000}.w3-sidenav a,.w3-dropnav a,.w3-navblock a{display:block} +.w3-sidenav .w3-dropdown-hover:hover,.w3-sidenav .w3-dropdown-hover:first-child,.w3-sidenav .w3-dropdown-click:hover,.w3-dropnav a:hover{background-color:#ccc;color:#000} +.w3-sidenav .w3-dropdown-hover,.w3-sidenav .w3-dropdown-click,.w3-bar-block .w3-dropdown-hover,.w3-bar-block .w3-dropdown-click{width:100%} +.w3-sidenav .w3-dropdown-hover .w3-dropdown-content,.w3-sidenav .w3-dropdown-click .w3-dropdown-content,.w3-bar-block .w3-dropdown-hover .w3-dropdown-content,.w3-bar-block .w3-dropdown-click .w3-dropdown-content{min-width:100%} +.w3-bar-block .w3-dropdown-hover .w3-button,.w3-bar-block .w3-dropdown-click .w3-button{width:100%;text-align:left;background-color:inherit;color:inherit;padding:6px 2px 6px 16px} +.w3-main,#main{transition:margin-left .4s} +.w3-modal{z-index:3;display:none;padding-top:100px;position:fixed;left:0;top:0;width:100%;height:100%;overflow:auto;background-color:rgb(0,0,0);background-color:rgba(0,0,0,0.4)} +.w3-modal-content{margin:auto;background-color:#fff;position:relative;padding:0;outline:0;width:600px}.w3-closebtn{text-decoration:none;float:right;font-size:24px;font-weight:bold;color:inherit} +.w3-closebtn:hover,.w3-closebtn:focus{color:#000;text-decoration:none;cursor:pointer} +.w3-pagination{display:inline-block;padding:0;margin:0}.w3-pagination li{display:inline} +.w3-pagination li a{text-decoration:none;color:#000;float:left;padding:8px 16px} +.w3-pagination li a:hover{background-color:#ccc} +.w3-input-group,.w3-group{margin-top:24px;margin-bottom:24px} +.w3-input{padding:8px;display:block;border:none;border-bottom:1px solid #808080;width:100%} +.w3-label{color:#009688}.w3-input:not(:valid)~.w3-validate{color:#f44336} +.w3-select{padding:9px 0;width:100%;color:#000;border:1px solid transparent;border-bottom:1px solid #009688} +.w3-select select:focus{color:#000;border:1px solid #009688}.w3-select option[disabled]{color:#009688} +.w3-dropdown-click,.w3-dropdown-hover{position:relative;display:inline-block;cursor:pointer} +.w3-dropdown-hover:hover .w3-dropdown-content{display:block;z-index:1} +.w3-dropdown-hover:first-child,.w3-dropdown-click:hover{background-color:#ccc;color:#000} +.w3-dropdown-hover:hover > .w3-button:first-child,.w3-dropdown-click:hover > .w3-button:first-child{background-color:#ccc;color:#000} +.w3-dropdown-content{cursor:auto;color:#000;background-color:#fff;display:none;position:absolute;min-width:160px;margin:0;padding:0} +.w3-dropdown-content a{padding:6px 16px;display:block} +.w3-dropdown-content a:hover{background-color:#ccc} +.w3-accordion{width:100%;cursor:pointer} +.w3-accordion-content{cursor:auto;display:none;position:relative;width:100%;margin:0;padding:0} +.w3-accordion-content a{padding:6px 16px;display:block}.w3-accordion-content a:hover{background-color:#ccc} +.w3-progress-container{width:100%;height:1.5em;position:relative;background-color:#f1f1f1} +.w3-progressbar{background-color:#757575;height:100%;position:absolute;line-height:inherit} +input[type=checkbox].w3-check,input[type=radio].w3-radio{width:24px;height:24px;position:relative;top:6px} +input[type=checkbox].w3-check:checked+.w3-validate,input[type=radio].w3-radio:checked+.w3-validate{color:#009688} +input[type=checkbox].w3-check:disabled+.w3-validate,input[type=radio].w3-radio:disabled+.w3-validate{color:#aaa} +.w3-bar{width:100%;overflow:hidden}.w3-center .w3-bar{display:inline-block;width:auto} +.w3-bar .w3-bar-item{padding:8px 16px;float:left;background-color:inherit;color:inherit;width:auto;border:none;outline:none;display:block} +.w3-bar .w3-dropdown-hover,.w3-bar .w3-dropdown-click{position:static;float:left} +.w3-bar .w3-button{background-color:inherit;color:inherit;white-space:normal} +.w3-bar-block .w3-bar-item{width:100%;display:block;padding:8px 16px;text-align:left;background-color:inherit;color:inherit;border:none;outline:none;white-space:normal} +.w3-bar-block.w3-center .w3-bar-item{text-align:center} +.w3-block{display:block;width:100%} +.w3-responsive{overflow-x:auto} +.w3-container:after,.w3-container:before,.w3-panel:after,.w3-panel:before,.w3-row:after,.w3-row:before,.w3-row-padding:after,.w3-row-padding:before,.w3-cell-row:before,.w3-cell-row:after, +.w3-topnav:after,.w3-topnav:before,.w3-clear:after,.w3-clear:before,.w3-btn-group:before,.w3-btn-group:after,.w3-btn-bar:before,.w3-btn-bar:after,.w3-bar:before,.w3-bar:after +{content:"";display:table;clear:both} +.w3-col,.w3-half,.w3-third,.w3-twothird,.w3-threequarter,.w3-quarter{float:left;width:100%} +.w3-col.s1{width:8.33333%} +.w3-col.s2{width:16.66666%} +.w3-col.s3{width:24.99999%} +.w3-col.s4{width:33.33333%} +.w3-col.s5{width:41.66666%} +.w3-col.s6{width:49.99999%} +.w3-col.s7{width:58.33333%} +.w3-col.s8{width:66.66666%} +.w3-col.s9{width:74.99999%} +.w3-col.s10{width:83.33333%} +.w3-col.s11{width:91.66666%} +.w3-col.s12,.w3-half,.w3-third,.w3-twothird,.w3-threequarter,.w3-quarter{width:99.99999%} +@media (min-width:601px){ +.w3-col.m1{width:8.33333%} +.w3-col.m2{width:16.66666%} +.w3-col.m3,.w3-quarter{width:24.99999%} +.w3-col.m4,.w3-third{width:33.33333%} +.w3-col.m5{width:41.66666%} +.w3-col.m6,.w3-half{width:49.99999%} +.w3-col.m7{width:58.33333%} +.w3-col.m8,.w3-twothird{width:66.66666%} +.w3-col.m9,.w3-threequarter{width:74.99999%} +.w3-col.m10{width:83.33333%} +.w3-col.m11{width:91.66666%} +.w3-col.m12{width:99.99999%}} +@media (min-width:993px){ +.w3-col.l1{width:8.33333%} +.w3-col.l2{width:16.66666%} +.w3-col.l3,.w3-quarter{width:24.99999%} +.w3-col.l4,.w3-third{width:33.33333%} +.w3-col.l5{width:41.66666%} +.w3-col.l6,.w3-half{width:49.99999%} +.w3-col.l7{width:58.33333%} +.w3-col.l8,.w3-twothird{width:66.66666%} +.w3-col.l9,.w3-threequarter{width:74.99999%} +.w3-col.l10{width:83.33333%} +.w3-col.l11{width:91.66666%} +.w3-col.l12{width:99.99999%}} +.w3-content{max-width:980px;margin:auto} +.w3-rest{overflow:hidden} +.w3-layout-container,.w3-cell-row{display:table;width:100%}.w3-layout-row{display:table-row}.w3-layout-cell,.w3-layout-col,.w3-cell{display:table-cell} +.w3-layout-top,.w3-cell-top{vertical-align:top}.w3-layout-middle,.w3-cell-middle{vertical-align:middle}.w3-layout-bottom,.w3-cell-bottom{vertical-align:bottom} +.w3-hide{display:none!important}.w3-show-block,.w3-show{display:block!important}.w3-show-inline-block{display:inline-block!important} +@media (max-width:600px){.w3-modal-content{margin:0 10px;width:auto!important}.w3-modal{padding-top:30px} +.w3-topnav a{display:block}.w3-navbar li:not(.w3-opennav){float:none;width:100%!important}.w3-navbar li.w3-right{float:none!important} +.w3-topnav .w3-dropdown-hover .w3-dropdown-content,.w3-navbar .w3-dropdown-click .w3-dropdown-content,.w3-navbar .w3-dropdown-hover .w3-dropdown-content,.w3-dropdown-hover.w3-mobile .w3-dropdown-content,.w3-dropdown-click.w3-mobile .w3-dropdown-content{position:relative} +.w3-topnav,.w3-navbar{text-align:center}.w3-hide-small{display:none!important}.w3-layout-col,.w3-mobile{display:block;width:100%!important}.w3-bar-item.w3-mobile,.w3-dropdown-hover.w3-mobile,.w3-dropdown-click.w3-mobile{text-align:center} +.w3-dropdown-hover.w3-mobile,.w3-dropdown-hover.w3-mobile .w3-btn,.w3-dropdown-hover.w3-mobile .w3-button,.w3-dropdown-click.w3-mobile,.w3-dropdown-click.w3-mobile .w3-btn,.w3-dropdown-click.w3-mobile .w3-button{width:100%}} +@media (max-width:768px){.w3-modal-content{width:500px}.w3-modal{padding-top:50px}} +@media (min-width:993px){.w3-modal-content{width:900px}.w3-hide-large{display:none!important}.w3-sidenav.w3-collapse,.w3-sidebar.w3-collapse{display:block!important}} +@media (max-width:992px) and (min-width:601px){.w3-hide-medium{display:none!important}} +@media (max-width:992px){.w3-sidenav.w3-collapse,.w3-sidebar.w3-collapse{display:none}.w3-main{margin-left:0!important;margin-right:0!important}} +.w3-top,.w3-bottom{position:fixed;width:100%;z-index:1}.w3-top{top:0}.w3-bottom{bottom:0} +.w3-overlay{position:fixed;display:none;width:100%;height:100%;top:0;left:0;right:0;bottom:0;background-color:rgba(0,0,0,0.5);z-index:2} +.w3-left{float:left!important}.w3-right{float:right!important} +.w3-tiny{font-size:10px!important}.w3-small{font-size:12px!important} +.w3-medium{font-size:15px!important}.w3-large{font-size:18px!important} +.w3-xlarge{font-size:24px!important}.w3-xxlarge{font-size:36px!important} +.w3-xxxlarge{font-size:48px!important}.w3-jumbo{font-size:64px!important} +.w3-vertical{word-break:break-all;line-height:1;text-align:center;width:0.6em} +.w3-left-align{text-align:left!important}.w3-right-align{text-align:right!important} +.w3-justify{text-align:justify!important}.w3-center{text-align:center!important} +.w3-display-topleft{position:absolute;left:0;top:0}.w3-display-topright{position:absolute;right:0;top:0} +.w3-display-bottomleft{position:absolute;left:0;bottom:0}.w3-display-bottomright{position:absolute;right:0;bottom:0} +.w3-display-middle{position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);-ms-transform:translate(-50%,-50%)} +.w3-display-left{position:absolute;top:50%;left:0%;transform:translate(0%,-50%);-ms-transform:translate(-0%,-50%)} +.w3-display-right{position:absolute;top:50%;right:0%;transform:translate(0%,-50%);-ms-transform:translate(0%,-50%)} +.w3-display-topmiddle{position:absolute;left:50%;top:0;transform:translate(-50%,0%);-ms-transform:translate(-50%,0%)} +.w3-display-bottommiddle{position:absolute;left:50%;bottom:0;transform:translate(-50%,0%);-ms-transform:translate(-50%,0%)} +.w3-display-container:hover .w3-display-hover{display:block}.w3-display-container:hover span.w3-display-hover{display:inline-block}.w3-display-hover{display:none} +.w3-display-position{position:absolute} +.w3-circle{border-radius:50%!important} +.w3-round-small{border-radius:2px!important}.w3-round,.w3-round-medium{border-radius:4px!important} +.w3-round-large{border-radius:8px!important}.w3-round-xlarge{border-radius:16px!important} +.w3-round-xxlarge{border-radius:32px!important}.w3-round-jumbo{border-radius:64px!important} +.w3-border-0{border:0!important}.w3-border{border:1px solid #ccc!important} +.w3-border-top{border-top:1px solid #ccc!important}.w3-border-bottom{border-bottom:1px solid #ccc!important} +.w3-border-left{border-left:1px solid #ccc!important}.w3-border-right{border-right:1px solid #ccc!important} +.w3-margin{margin:16px!important}.w3-margin-0{margin:0!important} +.w3-margin-top{margin-top:16px!important}.w3-margin-bottom{margin-bottom:16px!important} +.w3-margin-left{margin-left:16px!important}.w3-margin-right{margin-right:16px!important} +.w3-section{margin-top:16px!important;margin-bottom:16px!important} +.w3-padding-tiny{padding:2px 4px!important}.w3-padding-small{padding:4px 8px!important} +.w3-padding-medium,.w3-padding,.w3-form{padding:8px 16px!important} +.w3-padding-large{padding:12px 24px!important}.w3-padding-xlarge{padding:16px 32px!important} +.w3-padding-xxlarge{padding:24px 48px!important}.w3-padding-jumbo{padding:32px 64px!important} +.w3-padding-4{padding-top:4px!important;padding-bottom:4px!important} +.w3-padding-8{padding-top:8px!important;padding-bottom:8px!important} +.w3-padding-12{padding-top:12px!important;padding-bottom:12px!important} +.w3-padding-16{padding-top:16px!important;padding-bottom:16px!important} +.w3-padding-24{padding-top:24px!important;padding-bottom:24px!important} +.w3-padding-32{padding-top:32px!important;padding-bottom:32px!important} +.w3-padding-48{padding-top:48px!important;padding-bottom:48px!important} +.w3-padding-64{padding-top:64px!important;padding-bottom:64px!important} +.w3-padding-128{padding-top:128px!important;padding-bottom:128px!important} +.w3-padding-0{padding:0!important} +.w3-padding-top{padding-top:8px!important}.w3-padding-bottom{padding-bottom:8px!important} +.w3-padding-left{padding-left:16px!important}.w3-padding-right{padding-right:16px!important} +.w3-topbar{border-top:6px solid #ccc!important}.w3-bottombar{border-bottom:6px solid #ccc!important} +.w3-leftbar{border-left:6px solid #ccc!important}.w3-rightbar{border-right:6px solid #ccc!important} +.w3-row-padding,.w3-row-padding>.w3-half,.w3-row-padding>.w3-third,.w3-row-padding>.w3-twothird,.w3-row-padding>.w3-threequarter,.w3-row-padding>.w3-quarter,.w3-row-padding>.w3-col{padding:0 8px} +.w3-spin{animation:w3-spin 2s infinite linear;-webkit-animation:w3-spin 2s infinite linear} +@-webkit-keyframes w3-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}} +@keyframes w3-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}} +.w3-container{padding:0.01em 16px} +.w3-panel{padding:0.01em 16px;margin-top:16px!important;margin-bottom:16px!important} +.w3-example{background-color:#f1f1f1;padding:0.01em 16px} +.w3-code,.w3-codespan{font-family:Consolas,"courier new";font-size:16px} +.w3-code{line-height:1.4;width:auto;background-color:#fff;padding:8px 12px;border-left:4px solid #4CAF50;word-wrap:break-word} +.w3-codespan{color:crimson;background-color:#f1f1f1;padding-left:4px;padding-right:4px;font-size:110%} +.w3-example,.w3-code{margin:20px 0}.w3-card{border:1px solid #ccc} +.w3-card-2,.w3-example{box-shadow:0 2px 4px 0 rgba(0,0,0,0.16),0 2px 10px 0 rgba(0,0,0,0.12)!important} +.w3-card-4,.w3-hover-shadow:hover{box-shadow:0 4px 8px 0 rgba(0,0,0,0.2),0 6px 20px 0 rgba(0,0,0,0.19)!important} +.w3-card-8{box-shadow:0 8px 16px 0 rgba(0,0,0,0.2),0 6px 20px 0 rgba(0,0,0,0.19)!important} +.w3-card-12{box-shadow:0 12px 16px 0 rgba(0,0,0,0.24),0 17px 50px 0 rgba(0,0,0,0.19)!important} +.w3-card-16{box-shadow:0 16px 24px 0 rgba(0,0,0,0.22),0 25px 55px 0 rgba(0,0,0,0.21)!important} +.w3-card-24{box-shadow:0 24px 24px 0 rgba(0,0,0,0.2),0 40px 77px 0 rgba(0,0,0,0.22)!important} +.w3-animate-fading{-webkit-animation:fading 10s infinite;animation:fading 10s infinite} +@-webkit-keyframes fading{0%{opacity:0}50%{opacity:1}100%{opacity:0}} +@keyframes fading{0%{opacity:0}50%{opacity:1}100%{opacity:0}} +.w3-animate-opacity{-webkit-animation:opac 0.8s;animation:opac 0.8s} +@-webkit-keyframes opac{from{opacity:0} to{opacity:1}} +@keyframes opac{from{opacity:0} to{opacity:1}} +.w3-animate-top{position:relative;-webkit-animation:animatetop 0.4s;animation:animatetop 0.4s} +@-webkit-keyframes animatetop{from{top:-300px;opacity:0} to{top:0;opacity:1}} +@keyframes animatetop{from{top:-300px;opacity:0} to{top:0;opacity:1}} +.w3-animate-left{position:relative;-webkit-animation:animateleft 0.4s;animation:animateleft 0.4s} +@-webkit-keyframes animateleft{from{left:-300px;opacity:0} to{left:0;opacity:1}} +@keyframes animateleft{from{left:-300px;opacity:0} to{left:0;opacity:1}} +.w3-animate-right{position:relative;-webkit-animation:animateright 0.4s;animation:animateright 0.4s} +@-webkit-keyframes animateright{from{right:-300px;opacity:0} to{right:0;opacity:1}} +@keyframes animateright{from{right:-300px;opacity:0} to{right:0;opacity:1}} +.w3-animate-bottom{position:relative;-webkit-animation:animatebottom 0.4s;animation:animatebottom 0.4s} +@-webkit-keyframes animatebottom{from{bottom:-300px;opacity:0} to{bottom:0px;opacity:1}} +@keyframes animatebottom{from{bottom:-300px;opacity:0} to{bottom:0;opacity:1}} +.w3-animate-zoom {-webkit-animation:animatezoom 0.6s;animation:animatezoom 0.6s} +@-webkit-keyframes animatezoom{from{-webkit-transform:scale(0)} to{-webkit-transform:scale(1)}} +@keyframes animatezoom{from{transform:scale(0)} to{transform:scale(1)}} +.w3-animate-input{-webkit-transition:width 0.4s ease-in-out;transition:width 0.4s ease-in-out}.w3-animate-input:focus{width:100%!important} +.w3-opacity,.w3-hover-opacity:hover{opacity:0.60;-webkit-backface-visibility:hidden} +.w3-opacity-off,.w3-hover-opacity-off:hover{opacity:1;-webkit-backface-visibility:hidden} +.w3-opacity-max{opacity:0.25;-webkit-backface-visibility:hidden} +.w3-opacity-min{opacity:0.75;-webkit-backface-visibility:hidden} +.w3-greyscale-max,.w3-grayscale-max,.w3-hover-greyscale:hover,.w3-hover-grayscale:hover{-webkit-filter:grayscale(100%);filter:grayscale(100%)} +.w3-greyscale,.w3-grayscale{-webkit-filter:grayscale(75%);filter:grayscale(75%)} +.w3-greyscale-min,.w3-grayscale-min{-webkit-filter:grayscale(50%);filter:grayscale(50%)} +.w3-sepia{-webkit-filter:sepia(75%);filter:sepia(75%)} +.w3-sepia-max,.w3-hover-sepia:hover{-webkit-filter:sepia(100%);filter:sepia(100%)} +.w3-sepia-min{-webkit-filter:sepia(50%);filter:sepia(50%)} +.w3-text-shadow{text-shadow:1px 1px 0 #444}.w3-text-shadow-white{text-shadow:1px 1px 0 #ddd} +.w3-transparent{background-color:transparent!important} +.w3-hover-none:hover{box-shadow:none!important;background-color:transparent!important} +/* Colors */ +.w3-amber,.w3-hover-amber:hover{color:#000!important;background-color:#ffc107!important} +.w3-aqua,.w3-hover-aqua:hover{color:#000!important;background-color:#00ffff!important} +.w3-blue,.w3-hover-blue:hover{color:#fff!important;background-color:#2196F3!important} +.w3-light-blue,.w3-hover-light-blue:hover{color:#000!important;background-color:#87CEEB!important} +.w3-brown,.w3-hover-brown:hover{color:#fff!important;background-color:#795548!important} +.w3-cyan,.w3-hover-cyan:hover{color:#000!important;background-color:#00bcd4!important} +.w3-blue-grey,.w3-hover-blue-grey:hover,.w3-blue-gray,.w3-hover-blue-gray:hover{color:#fff!important;background-color:#607d8b!important} +.w3-green,.w3-hover-green:hover{color:#fff!important;background-color:#4CAF50!important} +.w3-light-green,.w3-hover-light-green:hover{color:#000!important;background-color:#8bc34a!important} +.w3-indigo,.w3-hover-indigo:hover{color:#fff!important;background-color:#3f51b5!important} +.w3-khaki,.w3-hover-khaki:hover{color:#000!important;background-color:#f0e68c!important} +.w3-lime,.w3-hover-lime:hover{color:#000!important;background-color:#cddc39!important} +.w3-orange,.w3-hover-orange:hover{color:#000!important;background-color:#ff9800!important} +.w3-deep-orange,.w3-hover-deep-orange:hover{color:#fff!important;background-color:#ff5722!important} +.w3-pink,.w3-hover-pink:hover{color:#fff!important;background-color:#e91e63!important} +.w3-purple,.w3-hover-purple:hover{color:#fff!important;background-color:#9c27b0!important} +.w3-deep-purple,.w3-hover-deep-purple:hover{color:#fff!important;background-color:#673ab7!important} +.w3-red,.w3-hover-red:hover{color:#fff!important;background-color:#f44336!important} +.w3-sand,.w3-hover-sand:hover{color:#000!important;background-color:#fdf5e6!important} +.w3-teal,.w3-hover-teal:hover{color:#fff!important;background-color:#009688!important} +.w3-yellow,.w3-hover-yellow:hover{color:#000!important;background-color:#ffeb3b!important} +.w3-white,.w3-hover-white:hover{color:#000!important;background-color:#fff!important} +.w3-black,.w3-hover-black:hover{color:#fff!important;background-color:#000!important} +.w3-grey,.w3-hover-grey:hover,.w3-gray,.w3-hover-gray:hover{color:#000!important;background-color:#9e9e9e!important} +.w3-light-grey,.w3-hover-light-grey:hover,.w3-light-gray,.w3-hover-light-gray:hover{color:#000!important;background-color:#f1f1f1!important} +.w3-dark-grey,.w3-hover-dark-grey:hover,.w3-dark-gray,.w3-hover-dark-gray:hover{color:#fff!important;background-color:#616161!important} +.w3-pale-red,.w3-hover-pale-red:hover{color:#000!important;background-color:#ffdddd!important} +.w3-pale-green,.w3-hover-pale-green:hover{color:#000!important;background-color:#ddffdd!important} +.w3-pale-yellow,.w3-hover-pale-yellow:hover{color:#000!important;background-color:#ffffcc!important} +.w3-pale-blue,.w3-hover-pale-blue:hover{color:#000!important;background-color:#ddffff!important} +.w3-text-amber,.w3-hover-text-amber:hover{color:#ffc107!important} +.w3-text-aqua,.w3-hover-text-aqua:hover{color:#00ffff!important} +.w3-text-blue,.w3-hover-text-blue:hover{color:#2196F3!important} +.w3-text-light-blue,.w3-hover-text-light-blue:hover{color:#87CEEB!important} +.w3-text-brown,.w3-hover-text-brown:hover{color:#795548!important} +.w3-text-cyan,.w3-hover-text-cyan:hover{color:#00bcd4!important} +.w3-text-blue-grey,.w3-hover-text-blue-grey:hover,.w3-text-blue-gray,.w3-hover-text-blue-gray:hover{color:#607d8b!important} +.w3-text-green,.w3-hover-text-green:hover{color:#4CAF50!important} +.w3-text-light-green,.w3-hover-text-light-green:hover{color:#8bc34a!important} +.w3-text-indigo,.w3-hover-text-indigo:hover{color:#3f51b5!important} +.w3-text-khaki,.w3-hover-text-khaki:hover{color:#b4aa50!important} +.w3-text-lime,.w3-hover-text-lime:hover{color:#cddc39!important} +.w3-text-orange,.w3-hover-text-orange:hover{color:#ff9800!important} +.w3-text-deep-orange,.w3-hover-text-deep-orange:hover{color:#ff5722!important} +.w3-text-pink,.w3-hover-text-pink:hover{color:#e91e63!important} +.w3-text-purple,.w3-hover-text-purple:hover{color:#9c27b0!important} +.w3-text-deep-purple,.w3-hover-text-deep-purple:hover{color:#673ab7!important} +.w3-text-red,.w3-hover-text-red:hover{color:#f44336!important} +.w3-text-sand,.w3-hover-text-sand:hover{color:#fdf5e6!important} +.w3-text-teal,.w3-hover-text-teal:hover{color:#009688!important} +.w3-text-yellow,.w3-hover-text-yellow:hover{color:#d2be0e!important} +.w3-text-white,.w3-hover-text-white:hover{color:#fff!important} +.w3-text-black,.w3-hover-text-black:hover{color:#000!important} +.w3-text-grey,.w3-hover-text-grey:hover,.w3-text-gray,.w3-hover-text-gray:hover{color:#757575!important} +.w3-text-light-grey,.w3-hover-text-light-grey:hover,.w3-text-light-gray,.w3-hover-text-light-gray:hover{color:#f1f1f1!important} +.w3-text-dark-grey,.w3-hover-text-dark-grey:hover,.w3-text-dark-gray,.w3-hover-text-dark-gray:hover{color:#3a3a3a!important} +.w3-border-amber,.w3-hover-border-amber:hover{border-color:#ffc107!important} +.w3-border-aqua,.w3-hover-border-aqua:hover{border-color:#00ffff!important} +.w3-border-blue,.w3-hover-border-blue:hover{border-color:#2196F3!important} +.w3-border-light-blue,.w3-hover-border-light-blue:hover{border-color:#87CEEB!important} +.w3-border-brown,.w3-hover-border-brown:hover{border-color:#795548!important} +.w3-border-cyan,.w3-hover-border-cyan:hover{border-color:#00bcd4!important} +.w3-border-blue-grey,.w3-hover-border-blue-grey:hover,.w3-border-blue-gray,.w3-hover-border-blue-gray:hover{border-color:#607d8b!important} +.w3-border-green,.w3-hover-border-green:hover{border-color:#4CAF50!important} +.w3-border-light-green,.w3-hover-border-light-green:hover{border-color:#8bc34a!important} +.w3-border-indigo,.w3-hover-border-indigo:hover{border-color:#3f51b5!important} +.w3-border-khaki,.w3-hover-border-khaki:hover{border-color:#f0e68c!important} +.w3-border-lime,.w3-hover-border-lime:hover{border-color:#cddc39!important} +.w3-border-orange,.w3-hover-border-orange:hover{border-color:#ff9800!important} +.w3-border-deep-orange,.w3-hover-border-deep-orange:hover{border-color:#ff5722!important} +.w3-border-pink,.w3-hover-border-pink:hover{border-color:#e91e63!important} +.w3-border-purple,.w3-hover-border-purple:hover{border-color:#9c27b0!important} +.w3-border-deep-purple,.w3-hover-border-deep-purple:hover{border-color:#673ab7!important} +.w3-border-red,.w3-hover-border-red:hover{border-color:#f44336!important} +.w3-border-sand,.w3-hover-border-sand:hover{border-color:#fdf5e6!important} +.w3-border-teal,.w3-hover-border-teal:hover{border-color:#009688!important} +.w3-border-yellow,.w3-hover-border-yellow:hover{border-color:#ffeb3b!important} +.w3-border-white,.w3-hover-border-white:hover{border-color:#fff!important} +.w3-border-black,.w3-hover-border-black:hover{border-color:#000!important} +.w3-border-grey,.w3-hover-border-grey:hover,.w3-border-gray,.w3-hover-border-gray:hover{border-color:#9e9e9e!important} +.w3-border-light-grey,.w3-hover-border-light-grey:hover,.w3-border-light-gray,.w3-hover-border-light-gray:hover{border-color:#f1f1f1!important} +.w3-border-dark-grey,.w3-hover-border-dark-grey:hover,.w3-border-dark-gray,.w3-hover-border-dark-gray:hover{border-color:#616161!important} +.w3-border-pale-red,.w3-hover-border-pale-red:hover{border-color:#ffe7e7!important}.w3-border-pale-green,.w3-hover-border-pale-green:hover{border-color:#e7ffe7!important} +.w3-border-pale-yellow,.w3-hover-border-pale-yellow:hover{border-color:#ffffcc!important}.w3-border-pale-blue,.w3-hover-border-pale-blue:hover{border-color:#e7ffff!important} \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..09d6c44 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,116 @@ +version: "3.9" +networks: + llm_network: + driver: bridge +services: + nginx: + container_name: ${APP_PREFIX}_reverseproxy + image: nginxinc/nginx-unprivileged #:1.25-alpine + restart: always + ports: + - "81:8080" + volumes: + - type: bind + source: ./nginx.conf + target: /etc/nginx/nginx.conf + networks: + - llm_network + #minio: + # container_name: ${APP_PREFIX}_minio + # #image: docker.io/bitnami/minio #:2022 + # image: minio/minio + # ports: + # - '9000:9000' + # - '9001:9001' + # networks: + # - llm_network + # volumes: + # - 'minio_data:/data' + # environment: + # - MINIO_ROOT_USER=root + # - MINIO_ROOT_PASSWORD=rootrootroot + # - MINIO_DEFAULT_BUCKETS=defaultbucket + # command: server --console-address ":9001" /data + + elasticsearch: + container_name: ${APP_PREFIX}_elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0 + restart: always + ports: + - "9200:9200" + environment: + - discovery.type=single-node + - xpack.security.enabled=false + - logger.level=ERROR + volumes: + - esdata:/usr/share/elasticsearch/data + ulimits: + memlock: + soft: -1 + hard: -1 + networks: + - llm_network + + + ollama: + container_name: ${APP_PREFIX}_ollama + image: ollama/ollama:latest + restart: always + ports: + - "11434:11434" + volumes: + - .:/code + - ./ollama/ollama:/root/.ollama + networks: + - llm_network + #command: "ollama pull llama2" + + + ollama-webui: + image: ghcr.io/ollama-webui/ollama-webui:main + container_name: ollama-webui + volumes: + - ./ollama/ollama-webui:/app/backend/data + depends_on: + - ollama + ports: + - 8888:8080 + environment: + - '/ollama/api=http://ollama:11434/api' + extra_hosts: + - host.docker.internal:host-gateway + restart: unless-stopped + networks: + - llm_network + + + + #frontend: + # container_name: ${APP_PREFIX}_frontend + # image: ${APP_PREFIX}-vue-frontend + # restart: always + # ports: + # - "3000:3000" + # build: ./frontend + backend: + container_name: ${APP_PREFIX}_backend + image: ${APP_PREFIX}-python-backend + restart: always + ports: + - "5000:5000" + env_file: + - backend/backend.env + build: ./backend + networks: + - llm_network + depends_on: + - elasticsearch + - ollama + #- minio + +volumes: + esdata: + driver: local + filedata: + minio_data: + driver: local diff --git a/nginx.conf b/nginx.conf new file mode 100644 index 0000000..c86ff9e --- /dev/null +++ b/nginx.conf @@ -0,0 +1,59 @@ +worker_processes 1; +pid /tmp/nginx.pid; + +events { + worker_connections 1024; +} + +http { + log_format main '$time_iso8601 :: $status :: $request'; + access_log /dev/stdout main; + error_log /dev/stderr error; + + client_body_temp_path /tmp/client_temp; + proxy_temp_path /tmp/proxy_temp_path; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + + default_type application/octet-stream; + + sendfile on; + send_timeout 600; + tcp_nopush on; + proxy_read_timeout 300; + client_max_body_size 100m; + + server { + server_name localhost; + listen 8080; + resolver 127.0.0.11; + + # Frontend + # location / { + # proxy_pass http://frontend:3000; + # } + + #location /epdm/chat/frontend { + # rewrite ^/epdm/chat/frontend/assets/(.*) $1 break; + # proxy_pass http://frontend:3000; + #} + + #location /epdm/chat/frontend/assets { + # rewrite ^/epdm/chat/frontend/assets/(.*) $1 break; + # proxy_pass http://frontend:3000/assets/$uri$is_args$args; + #} + + # Node Backend API Server + #location /epdm/chat/backend { + # rewrite ^/epdm/chat/backend(.*) $1 break; + # proxy_pass http://backend:8000$uri$is_args$args; + #} + + # Python Backend + location /backend { + rewrite ^/backend(.*) $1 break; + proxy_pass http://backend:5000$uri$is_args$args; + } + } +}