From 7623f5a82a046280cd0f04d3cd02e7496d45f689 Mon Sep 17 00:00:00 2001 From: Tobias Weise Date: Thu, 5 Sep 2024 01:10:50 +0200 Subject: [PATCH] some clean up --- backend/amdgpu-install_6.1.60103-1_all.deb | Bin 16948 -> 0 bytes backend/old_app.py | 1322 -------------------- 2 files changed, 1322 deletions(-) delete mode 100644 backend/amdgpu-install_6.1.60103-1_all.deb delete mode 100644 backend/old_app.py diff --git a/backend/amdgpu-install_6.1.60103-1_all.deb b/backend/amdgpu-install_6.1.60103-1_all.deb deleted file mode 100644 index 7c0b124cf5016612c623b047d1fa950c05121436..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16948 zcmbrkL$EMR45oW*+qP}n#&>Mnwr$(CZQHhOYyNxh+*!yXXJ;JtHIA{|uoI^~3J@PZIzD+@j7|2Vt*jMOWy=v5XZ(IL!kdyXl4(!-@2l3Wd`|u{NZRu#CY_xIFU?;^0)n zOcQw(zDVgpRuRa&@L?`!PuXf0 zR2Kl6#nKH#OOu)?N@2>>81GL5ow_gCK% zfEzz~#8IpvL@=1e)Rc+yMm!kH!N-Q=UoCc62#hRH15C|+U1 zi!Agx{X=D#A_CI|@)Wf=SOm5`qv|-1GtJENy)4CyQZczUsKp8?(sYOA7!_nTFEkNCFEHn9U(u4!3I4rfMp)rk(~qPU7toR)@Ip*B z>&FuVycTjzKpOdY#-_&Ly+;z1x}*gYxovJBLEkFIMeR+b!ns9{PLJ8lr6Fr*CiP z)nRN}Zb$aG+jdwCfmHgBr<_6RrOA1ZxW*C8FCQciy6*3~&zBl&r(fDEy(G`(s~Kr3 zL(G}<3mgxRhtz=yv!$mo?%3AQ+K?u^-cze0DTrnRkxK?ga=vMkjP=vQB0YKhJnE5) z$Hl^kgPXA*LdS(Jr`J(~V)cq?&oR1JI5C6Db~N%LC;FH8v!%ze`u_LYij2HVer3mB zARyrWRQ8s?be&KJ^LoEbflGbB?x1qT5kP_+u4yez*V4GTqo^Ge_m^1`U!d4xF05q? zxQr{>zA;V;^(|%j1=Y%r`ZHG0WLG|sM93axaE1#(+-sQx#l@YQ7Rtl147mxR>&951$54w(?h2@7=8` zFLOP}Y7$arE1xgobgd!kiiE*!d3Ht`h}CvfCw8dLb@P<(RU9~WK@-6KB@;kvOuAnv=??V0@yvbnJ;XDNQ>qU<5$2`2R z<@ku*G<#`uf~0T&0G!XluMV^!Dz5Z3h^;^V{#H*D4=oeCR z&&F5Eh6|1Fqcr7M2ltJw*ApK#7VFA^Jqu7)Nu32~hQq%PzoZ*khhp9>sT8gPe)km1 z!z0m4+jr3nMf!399}6h}ioKH7E24GMqI*~e%wVgHlp?BYo?vMP;@T{b(9yCv=^&b^ z7?D|Fo`uR?@;2_OoZMPkur%S$<-yWrc*edbhmA97NqaAXmg95AVsuu$^#TqUPc-R6 za(I{Y>8%vcZQ;h<(nRe?pMj-4#ym%zl{Ul%;z)Iw`Fz&o`mZs^Sl`N3>fhus+a;GvJ2D9acNvBqLQK1(Gn%XwZB!+IF#J(<5 zdBiqp$^xaX2;4)@p&G?z8XpV_QlihHyTBN-^$vygn4u7ZNj_UDA1OMI(CVHdSDaG? zB<#Rw`cw-EZ^tTEuvH>2Xd18k0Es4*NTZ!)xFlL^hU_N~Kw;%#Y1oN3JcD?5>SIqT z@qQk;<_i6)+-*DF4m^Rhjy%{uTq|)E9Qi4o-XnwzHo;ji#46cntVZ4pfq6b9T6tX5 zED{_|N8b0FS~0Cj(@UMYWFfog!DOkfJLmOXeOS%&jP)q)!lY!;oEEU!peB=7Eyr+^ zL^Q6@$85FpKQ&c3UWs2M%>aN)N{4g1MsuZ24si^N`Q z1tmg7%Ft4iv}lhOnwHP0u*w`0lD9$AOwA|wl<6+;s|lI|!D9L)esbRzW4H(|tvU^^ zqI+9UMcY@>4Zy#-fGn@l_^qUYrFnZ5JSZ`Sl$(HKfiR=vpS?y<0uEIUcBY{#SJJo| zqh$GPDtscJ5uvkNHO?>tk13hh!uEAw1Y1L%FczUA<-0>*Je9Z$?jN7Xm|rWz`{6*R z2!|T5Oefx2op-@cK%F`U)@+-wjL##URjbKhvAIzA7=HMZWjx)S9iW!Ormx2 zI3Qv_A;%{c2B{pp%w=I|5O?5Ib1~-{moYpNS48^OP1Xk-tSQHl9Q zAy>=WdkF%~sT%yy?!cpwQihAVJGge(fSF;RoDc12D|iy)BAi8B-U2TnVZO8A(@P)# z^O-6N9x>D$L!)C7IN-U<4`Vd1qz^un3Kt?bg^SHb8RuA}+nT@C|A1a5KyRcE{d=4( zz_zJU+#);kBmlkf5|?$WNFJYFD{F}O{2i)3yFr}Es(dXo8vLZgCJJkE;n_%s5CS#b zD$pH1WKR#rAcMQp0{l5R*8EXrZWG+au6Ed37L)Q(iyVjl;a#gg-vzHZ&RjNg!E`HH zQz30R)3@_9rQJpnRC93pdAw4~o@6<3$z429Nfw6&M-E`hBir5fQQZjy$XQ;`&8;?%o5A#bqQ#vz!Jkfmv|K3eVE`7_ z#fBe&c^YxBDj0B9?hBZI4!xnYpd-dcW_h6E0b<+L=wmQ3V)kQ%3>$33`?r_BNfJ#F zj8ZLS_4S0*O}@h7Wt{IUN6bQ!Bd^f2Ic5nHNwv{;TD@~g)~p1t zH6hTvM-ImVC{^K;hvj{D02gYoix63}*?i%PRb81;qFBwXp38>`6mm;`#dtTQHBd*1qW17uI+vSOe`n+a|eY zEBYmYM1};K(p1;3Sb=KHiQO`434uk<2IDU-HCXyhR3aMZpObAi(usP;%s@6>+$zI z82Cm*pCbu}-XBQcwK@&E>YJC3F@=oYJ_c#$4!hPmnzCbTSA{~G)DfU`_oEn<7$ zD+QEhms@@`J6#!{I9WTs1n+asA#{OyMQ3G(7w7&h!`Xj?9dIVMQEO(%*)|+^%DVfx zCqYqS7Y%g_k@F0|nbfFruFrFD;c9O~`>>FDzL-*3R&+vy5H^WBO6i_C&D-AdB)khb z!S=$r5w@TBLF$NHJ^={OJAoqsr>d4-XtDuBY)+Ha`n`D0-$Q+aHw7QoS@-O@K@+0` zt9h0VgfA;KB_0xR`n#`{E&2De>vjzAq>0^tnq~{wiFF%0Qg!-N)f-m@;X9O)9;kBI8_&ly+7DRRy|Uv4Oq4141R{`_GkfG+z+Jq0meo;XQB+_Ep8 zJe*UhN+0jJX4y&Lo+&MA4jqm|j=^{Xfzz7VOY%D2#kU-qtZie*R+1pgOhS5WSb}?F z925V8NMNq^iV@;>ehWJzFq*|zd?;+OJlh$xU=RPuw)X8(bVQ%{HmX=3btU-N(tU}S zg`~u+a9{a`t9jHAWFK3H3}{O{J(;&Khuw}9F&-~8?0&GNMM9~X8%Ao{gl1xu^DgX< zvwsr^mbSrnlLCxsY}YymLy;ABvJ+kduzImYQw=na2`$Nj67$c{m-r$R6tE9qgP7VU ziZ(DGJB|)fP&H_?_^qxALCJH$yUeLc?;q|Q3dk^RX^#Q2C;eioUHdd`&oSIwI3G6e zt=m1xrS0hP4d4TbrmU4^XV zo(?D%j1|8tItX+?C*En4thBhfy-0@uuBt>Xo=8;8>PW; z(tg^B+v#l3YK)_OG)Q|71xBdsLerAmOmOw5G6$D*H*4m=HEJHsi#GgMS^j#>ryRRr zwjF*XR*BU_;m{Ml0{<+eA1COxrGDKZUbvH*eopEg`!eK?Ps0HSn!)0lNgxo)bhQ+?aNugTTGYjiyl3_Q)yPQ z-4@o6iQJIAMk0#2J1;arIbdkP6a}aH^yUi(M6Xs*ggezxD(RCVmR4*|k=!X1O5F3% zDZz@0K|?Z}N{flVLdTu_IzCuKJ+z4Z(%ns{9Xwkv+Yf?`&!OQZt_QGFi2 zThRU(xmbVS$8O6<&LP^j9`UARMYspSE2wkIeVLr$Xa6T4fSbe5%hkAUyOWQ7Vp-FF z0`{s;7mgGK8ydAxoy9AjuB^=I6EUd=skI#yQ<&)MFwRd2c>*Q2+f(lJ~VX~!L6 z=GVV{zAyOHyv82SaU;~`iN8J)7KBjn;(?`w+hLsW9#9)RV!do0zt3}t>hbc-fa0de zW^ZBjn3q~cL}#?piNIze=0dN zjllA?1s0Zv2Gx#}?d}xEoO0NRy8`mHG*?_9E%Q!Ln=MH1%>E>fuud#)vacQ9fu#FZ`Jn7Yf6;=G!=c*J^SkZ z9@|()=2p$Al@l~v==ZXA5?Ml7bf9DC1->bt>%slO?;sL@61L;|KvY`ya{aWX$4|?k zHKyt|Y_EirA&mR7<0cshl}La-I4 zriiOT<7Dr@lrl`WMAXIl5c{ z`{e-*IPPwe!mY8hol0`MeTa2A!ABly`Jn@=H{=@v+-za%*Ew9RR5JVma$mycP4wQI z))%pvpDp2Rp7rdH8b>@$6%gV4$l7wM&OlZ0mwUTI3sbXK-RWQ^&WLt%(AQ?imOLZ1xW7V6Lk(;QF0g6^h-_f3$l)P&#FIeW z5tRxo+!v)*Z{pj;4aQ_H>|)MUYdh-THd#S>_E;taM*bxEP$Nk8q*W;x-zqhVjNJ#k zz*<>xq_CKpGdOs%+s@HM!WGJ` zKA+f2zvBX%Low@ukw@eoMkIFFYC}WHb_e*L92B!U0uTlA1+Y5g+wuwY`q{w@rX$SxP?od2!-i!W_x1)ww8HpZ`>*dVGw@MXX3dj}4UZZNn{vQ72s;qU zDf5QA7WQ~LaE+p#fPSXQ4rM2|&%X3HxEjfNo$YWeFNImx=k66HfC>E<(+JIfcHDwH zt5R$7*@U>s@lN>!yASDn{5aKh zJS$#uzi%?@h>V`AQr~OvU5>;MzBPP*1I2EJNIhUe9vAA`T*-R4a*q)Ke=1*r3m0-Ht^d33dEg*ljbC2q0fLt=QX$tWu<)M>u+X(a0yQDtzv zHq_l#!_rPh3y}eLHQ#fX4K|K)t2PUIvkp>SRv1Ppz=QsD`9jxRJ}b}a=qM>shjpe- z!ZQ4T!I55D-}~PQ&c$*1ZQt{2KiS;jdTWzuN)+ctjHmUGtVnO;_J9crLuT4&3k%eK z>+Yoqi}p%uBN#yMAWff5NInE%acHkekHYgjuZDk>Zqqx1mX)>WLOhttT{!c@Yr2V% z6i1^|I0xYONz*i^ZR_X+{Cu8G>yD7c1E@>M%O^fF!C*f7Vs=1L5U6TYh7YGor7o#F z#7nvD1B(*nU)4(En-vn(yd&X)x?E~lO`?4(&d#d2yMFs>#?F5>)A277bEfoYoS+bZ-XGpVZ zaJG&-sd?|bbedeWPS15YAp}k`*m_O2p6jbkJ~V&+k+j&S^#_{BtT-=}DU3D|EDpus z&Dodvnr%`|2{&+rYI!80*p)CMPOPB$4=)17i?sCJ?dw{LeRddbheqT%t{ZmcdI*l$Ua|AhuxC;VG$N8N2#+~A5 z>AV-7mW1#D4G(s((9=f^*~v|rTccj9w(KM5UQgRf9ZPvD8r){ZEpQ&KH0)n0?#ina zC1Jg|tlSTeP;;aB?y7wPayd|qUEyE`GwaWS-cb5*)E$9@;(p?tEg79Pn1$bW)C*P^ z{WH^56!VobV|M2u!#>$i)FqQ0?__&#~nB$OyAn&E@jy>fx19NdqruYt{@!a zLUkvzb+isuRW|mv1_Pm_$Qo6+i95?(ft0n@hV}%+|LRzK39V;ZP_YFdRC`Qz!lx_s zx$!J|;xt9A->t?@A>4bC+6G?eQozZnfF1n64NJ|-@FG66$6kK*(BmGI*YPKRFR#$Z zg-C9w+VrlVo3Z)C_a1BJvAohzx=F65vOjmO;jz$A)3)1rb1u8AWAn+^9b_$u{>ooi zl%P=J`0ZOw)&c%7xZ(FXuAoAH36kQrXPXKUNic}zg|~+}=0;{}bv;CNglUe!X{fY< ztWw>wJCK`tb4FUfeZs_3W`R$uB4cy`On+1`OZx%AN2$R~jjjEk*y5WPkl^PxiU$YT z0;6gYIMm2CIx$q4xsu zUj|Kg!z%R}0_B*GNkU-=v($#3Jz4!Sz)V>cY;4kgT#K@y3LrMyG0Dq2_e^|j(T-NZq*%bq4%g;ek|szUjf4zbd?qJ|S&W1w?cdsKlfqf~G}BX+6#h<1Yo zUJg$2;?CY=by6nj-pfDaJu-gkVynK0q4N^MsqApnO3E$tmX!0*?HFNO`VYtvKRdu@ zCuR52{soTrI{>9^Y(iIi$n11`4j<9n;5wQrOUaoF1M=vK6D3XZX ztv#w5$>^c6@vTJ0ud4jJt7Qq@5BrZD&INbH*UhZV)Rs?bKiW23C{s-Spm#v-Hrn0p zRY8!ZN5uy@0)(F#;wqwh1LEk!A2*!Z zjSng)$(HZYRV_9LIXIb5M)k|w2=8q78n8l_OG=v-L2ilRK!8tU^QLs~GDIN3 zRr$uGWkb@w4iFm@5g})f#v9bVO2LQ=tTK_tFV19o{B2yd>*jDqK*!mGXvz6)$i{B> zBO6TfM;3ZAmhWb&JF0Zok%BZf0XJN5xbKmqmZl~t_5U@QF=zfG(D9CJA^Yhtg2$mv zLM1BexizwW3^5ZDy~_$B-sU@c;^MXr0(Axo#AZ*L+RXrCk~uh(idWeoy)bTk>VyRw zl2P)1_v<5oJOaY=G0|!+U#G*{aQ!V$!MxVw%m0qA})!n1wO_3rk1_@_Zx#7_qJ_Bd!dj1SQ=WKQMcf0$ zw*8jK{jss>7k147z+0o@KSEfIPk8M&@S05dFPk|eq0Fx{PymY?*m_oyJ|&!3R~t|~!GyL(8{`APu2)ZHjD8vWZySEk6`Kv;v`4T@>JUU>>glFgu8w_a3+xqf(I|YIe zPHnKcRq=#=eUW+rr+LN2BEq?NxNLDgldIwJsPSg8<@b)2Su$Mi7B306|9Z@<=QqEa zHTAry4v^0i-2Na)XM{h)JHJ9NW(QIFhJX;8g!$_nG{G+JRYW53=bFPpm5O6MpUs}} z9oBLRb)Y$Lv5U*;8QUGcIp@3#!XWO2-`G|qk|fs?PSEhq$bsM&FSvRkJ2wfLtozgK z5)QSOL2)YB&|);`%S(<=d1#5eH<-+>?`P4nwCw<{Tv2-1ZXsH8Z?t6O>T#}G zj-SUqr6*kE_%3_VKfbq9zLcZ_f28ldTqombe1R}f3T#fIpB(rICdh2Pc!wfF|NvmnogXVHwVzmCN z;)kH7flD&yaCxteFZ-M{S+LT3Lyeu zl|FBaK+6i?0XCWErygaGFqRpYh25Wgx|xBosAK)fRN{yxp5m!{- zi@a$EfYu?XflF^hW}&ovtMmpXHrQw&B+!dRf={$3O%XZB)4Yd1O8~e>+8K0ogO>}3 zTW}EMWCjaXKtlteYNzT39Xv1;o%=wyi5)ao2H+MmH}Zu@6({&ETcF~stx}C{ zEV4K7f5Hr6Z5+#=rv2^ZgJ>A(XLJOxfX}iD(ALnCJA5oitTt4_z*8RiV6qZXp8 zM=Ybm;dP*F*vK=+I$iN*ZeAlFrTScIqu)JNs4Zpb+;5dfn1esq)kQx~Nf$C=<4sB2 zW(zhEA((o%d>2EweJ}y^g2>W?nxGZsa&cmirt<2UaD(N2Z&IvqAY~!uxl|qB-ggba zyUIEAV;*#07A4YlV^5``xN7)AU9@nr?CjZ_oz5&1-vnocX*H2foAnD76Apmi{dLCh zXvNCE4Ot^XU9eKf+spofKnKA*WtD$#$iX$`>UMoC0A+qlA`yHHjIKAN>?&CjY$guF zBRU9Fp}=UXKT5ipq43+sU#|DgEZE0N%#vQbxhi0J!#6C2+u&g-8S?8m_;bOlBoD2c zYFolIBvK07^2Ye7p)og`3E~~(9#Xb0rgOapALBpN;gh}_>awBmU-|#8MNI493_V|W z#+D|=Q)6us?I%n9vv=qH%WHgHmbW=ggzI{Xax()hsEfty)qBkn!bS0 z^Znaee=N26qk5hfgaNP70p-pvqiWODvA=lD=qW%Y|C@ga3{XhnBqCLAeq)kH z$lT5ezN}%McPE|tWuDr=lMQF>Rts&J zsMQy_-*NAkc4AHjz{@T90{R?2F2X|010pvU}k$q%*K zKoqXMtXLDnL-6T@yV!+&Xg#MP*si-E+|N^pQu1nYJ$SLFE8G636t!Wdf+ZvnwnyZU z3Ps_WRdEeq@jlA^7!Xi1-PaTi#Dj{s2EukD3CxRrtm+j-Q`HdmJi^P7cs2>=_)_U* zzio1TBp);=gw|F%ZVE$6$IP4T1VOit`LYQoUlXfx-OFTJpt;5=)u;BpELK#KYJd~4 zvj!?%DkOGi!jR~C70ZMjh0KL|AcCN+?dHESt&v%N?1kRo{pF@jWd*B}Z|<%)TCLCQ z2j?t_GR5oXC5l~{N&4AHfE$2Se&J}tnANAN zFr=@WYWQlbX|_gIB11^}HOo=Cn|c=C7?P$2%{<^e`ExGzX7G_sQmtu>{K4Q#7b%!G zWrduiHlpAew=@ADJ8AYHtDi+H!s$3et%Wads4~Wyn+tN9bds^vJN=~kLVUM~>$+ZD z$d92$;v44@G`Q@-0kTRD?QBK59|8TZSQGtq`3qF=8p($7ZV1Bqss2Wh6GqQQr3<^% zjjONP0HbwI=v(zK0%N)9heK0GtE@Y^niG%*zqXMssahC2<3OoKM)si+shk-~X1B&# zE_ek%^sOs%lZK4E5N$;kjMclA_=+I7438LEZR)^Fgf^Pb6a;d}ael@+*V-k%_3E!_ z8}1G0c2T>=FWDo@c2&%x5c`B+(@cehV&?%YOSL6czW3OF*$YU5^Nw~1yHOL&ZPuQF zg2{BD9)MwdN_=QutAZE==_u>mPLacVx5&cV?kH%0%MUpf$1iW~B)d^0k!>9*sQ=0Y zb5IXAv$mhIHlwfXQ@8jrRTOJtT3fbJL9mqfvy_NF3Vcr7EUnU@ ztVk02p6>A{rrYF(M&J}XpU2ieDhx{N1dVu)N}xgy zLGZc2rLgICMJo*Fh6YUBqhP6mD_DVFee5)nRR4stMb@PS#A3!=X_*bxs=pKVX6vzr zX>lxwtYyX!ws z39upIdHDSw-H&jHJ3!~6mxx=ABQY!#;Kk-07~O%Edipwjc`jPzfH1K$nHNOgiGKH5c*TBeI{#~@;ak zSwJ`KFF}{1AS!R`G4%s20dx+`r~m9$j@+yavWn<4rXc($Xr3V($;hJ5;Bgpr22Z*2 zq%%v6Qic=RQpG%M0=PsE=va^eU{>&q2m^XgF@&VHS%mx8wri0s9yuah9v?9Y#&PRd zMCzF4&EGIZ{Qe(i(#Y@DZQDn3p7ji`+iL6DY^+EVQUFQ$xL;O>@@D7F+QUw2nYBVh zrUO6KF|Oi(-`B70x0bQ+%?=B5Y0a^VlK0if1P|rW6)Jnz2VYVT|Mn>{hklVF&qxnZ zx{+rfN>B!Qv#AJGPe;TXUUL%h60v|F(A$1&Y?_1q79H8T2WyS$AY>{+QM;PJW>pvH zbFs4sya-Eg<8{!7Wx!G6yAeLFlQ*D0+L#tq->#nm4Vn$-xgnuMy!veAs!ABWd4jV4 zjKT3e(X+KA&UV>(c*5sSJfd&#lJjlR$e`7qkOfOj)^U-wxAXIM)4SEM=u3Y0b<-y9 zUnJNx*2L4I*MebU!fQb?UbBt~__JvF>XV3M^U%6Zk37+Bf*5(QJ9*+yg%iP_cJkHQ zo3qI#9YDErSJEVgqMxl^)3fY9HBdK}Rn5UHG1J}ev0vGIugaM1J=fSUY%0r9zMMlQ zV1)FNRvt(te5L`(B}#<|E?z$H#xo9ZXr6LRj)YgW&gcjUsA}Q*2|9o&+dB3$)q8%W zFGSJ2b)=h2g257!=?<@(J`gW#o5PRi)`Pn56f;cHMp7_rdgE66^F}9O#;oiinLj>~ zn+z?eCTrq<)Q4d7B)=%~Al;h0+DH<5Gn3#?DvrZa-|xIwuzJeIIF6ZM=E0MZN#6oR zHpfk5zA1TT)ywG`0KN7nTdC5cuQ6Mt3qQ(0P#BhJE~=~IX_Q)pEO26<{B2n3K7ik(TL;;Krx@_{0O2>w@CE6st;lmoBAKFoPe;PmGJIX36_7x)ixkO zS>IAL{!aAfk)MUih6+dGZ6(E#)gq`CpL>s87f58^VM9(Ca}XS$(cJp*%}q-|VWNg3b#1*Zt7yMvt&?t_J!kBdcOzSXmoJV1Uo&3mR%H7J9#s8=q~cbf3Q(D zzDw>u7Qnx1N#r(&m+n^mERN+3u}@d!MG>aP$9`xjjy)Lfz(Z^bGLNA>TE&c9S53jX z$O@bNl8uSvV;7*iCw;y)MV96dX>#{6C#YUKXhj>+$`e04#k(4TRwsisQ_paAppSHP zPJ-mFHb`keCrKE-c4%=l!K}f=h()VtB09SVrOL`TNGQC|X21;Omz-S$hTUCDACvC5 zTh!FUKX)8grt9(at6n{O>tS=d;}}2AdW_t>dW9~9*R@YF=GZLwp<{CU2*Rw3*Sr`f zQEv1zJ?t9521P68g_ujZ8009t(ycrJl2VV3s(njYtPsmScOA59Pe>(X&eOT>o-VVx zvSGRbwKe?UXXX=}iP;(K(( zt_Y%Va0n;_9t;=!_uDk$D3nF@QfiL#ZshUD?*R#Yqep`8%+_Ppp)nEXfj;)st-O^4 zYHdyS;VWPm#hDd7$yDxaj5Z(m>llILang*)KoS}6{;a8uRAvU<{RX7pmI*LR#4d%F z?lI@A3cDEX$t*W0U2R4>T9T2Z$vx&V&K^&|ZPhr|oOEH(K7X>Ymqmng~!SIQZ< zvbGJ?1fdw%#YymzyV5LH?W4t6-K>8>c`){Or)6t1OTQMt!_lw0C8tPPlAAHURcac0 zHfwx`$ZW5NBcKl2mrAuv|8!E=7}O3Z-W}12=}G-50D)7dVT=BpkDTb`B0Wf)*>)1T zO$#=!1ZwteK>56{J^N+|0lF%6Y-t0XKUmx)ZSI@7!vt1j?&bvK{(Y8<2m;=Oa0Gbi ziyL)cx>1~yM)Bht-b`lmPo&S4fFc1Oo(^e21~@qgrc3F?rGieH)axy6NW84c>($4Y zgr473fixh0^n62WBzJn}`L&eQ4V$ew2O(ASj^7tC&m;~@Z=ycSESkg>ai9OJY@Hhd zWL%}X8`Z)Z=&^>uo;RB%16w5Cc#{~!Z5EJRKBk$KlMwoY*kt0>B_ODk^eU&c0NZ+VdsU_!}ttzot z{@lyeqv-WEJT|L5!dE?4K1J1khA6~Xs?IU^jX#ouG|0C{JEAyHZalaRF4LD7Q+9Vc z2L8PAU_C^G`-_R8t-NB?){D78wPxJHAZ43ejARs6F7gVK22%0%E9GL(l@pS7v}G%W zpJjpY3OsT%L8zm%LN(-;y^IToWT=?k8Cc7B6A9TZwinI0#wlv!HWap}az1(2xHV?K zcpao`F% z`{Vll!)Kk+1Tuu5UJynDbj{iMnM^VT*#SOH3TL(1KzKLoZ#nu0ZVCm*;FCVPMCDYV zh~z{F!vSbi0m1+3Ws*3Ca5*dBW|?0IfMx7c(SI5PH=5UTBJpHj9wvV}I-8QJjx9Y- z{>;a08L)|U}w{yAYp>0^RV@(c>o4iJgoW^KKAC_ z++^=~ApN@qvUi$QN8>E;Co(4z(%xZK^y}(OUOiol;0Uk%Gp{zD+lwrs7_Uo;;)Z7? z{3sWp1yFAETQWKZ7i3(?cNoKpYUR>y{Yd*&qa`;m_l%qYJ17qK3+!=0>b?^Q_VPFOSF z-0}K)-FIcIn`qg{0_3|vr^)VK5xDd#*9WjL%0FkwzZPbg|KhWm%+ipD3gd!C+L(On z^LO%JyRpvWk2aJKWF+T+&}()MnKv&BiiN*zTOHKNH1elOK2BY9uimTeFN7mzGEF(UFu<__g_ig<^&}Z zCg@`W4Gy$0(`U)1LvEk=1$*0lkPUv5H6^k@g-Q386h3PO^7vg zey7~-s3~>i0?M)wf>)NS1@#gDc~N$14+SBv z?sX@Ln;=%Cxc;XWvH;v#mjHbjsINYd6>eN{iR(xHda#E*Y#b;rrCXRQ*K${eb?P8U zMQa|8BjVIxH zX-Op9m&C>MzDzYC#DKo~bjqpuH!l2YkcKpo(%&;ZLh#vC1^;d@ey9H|mWpt8(Nt^u zuaf4JeTU6fq2M-?7o(o=O^cm*Zc(H=qZ{@h#dN*8(xT#nt(ctuCwf6oEcm!Fby=eWHE>-V7$jmax>i5jI>E@Ak)l3 zeviaMqtFUy%RHtCed_Q-4JA5D93i>F1i>k!w6UL*ZfB+m69nb-TDY5CYCmSIj#R)M zalP23_JKKT?GhdddT(0!qvZ1bfkViho~hD*Vll{=(ep^aUdE-@BYceBbdRtaMWctw z3ei8kw2_{6xE0Uwu0WygrBQj3-&11pWx`UxqSswUj+i*g(qx|6cgH~6_+mtjv-N7x zr1%5rD`$2JrfWdk0WZS`tkfFlln@xm1~uX~HQgq2i2eoP4Qp=ME2I|53?n~D7d5&? zd(j9XYLo1)yf1pRB_etBgvOzBM9^WBhI3F~nH1~EVi32f);pnjUNgN=5z7uU zY=+<35h%2j%0>h>+Vr;+LN|WCRZh_3wv%bE1YZ)tV9e^>5huyf|LrrKxnNM)Dr8U8 z( zUcnl#1Z*qZOGB+{!KbbgBZFgIwLCDO{`~ol`)ko9rv#D=Cy4H*W${LG);b4Xe@i;= z_NQ)8MFT3jP$xT$ho=-ow+$WO9QE;EP9jrLWqIWQvL&+r4cc>0(la&^z@@;Pld~H7 zf@uyxjBi3emh3Nu#8fR#C7`$QvXYf^y;Q^QUg5|EH}Jepgxj{@_iyldDdF%0qh~9kd5^o3Q`H8R1`LIOVqo*f`>u)>4Nf%ea| zl-=4?ilxbZ8M%=_0@vYbW!tJ{kD8Bb?|!nFrMpKf$>h&v6RhMMaAalaDYmCA93mwf z_2FyJ!{sZ7xVoHu=e%{PUEKRw1~TOa%-UIaf<`l6FzBxVdi=yWMAKT3^$lZXE2IJ@ zrM>U*)P1{`SYx9g^r$#j4J@zL_WflKor}p%p3)X%$zwxtKG+| zC=6U_O1ii-L*Vid7)b`OEOZ}M#l78N`5b_HT#Z7Lj@pKTC8?b{LO`1fSglEOMAb^o z|D{4`H@rAz<-<6<2K`$qC&AHx3N6o%`K&HX%8LdEAzY-p8oTzb|IXUj-?$#Hr+1UnWrsu^d4o%z;vv;$s;MUWX=WWu9>pmJ zD!@K9Xn!VyN-FuwM^8KVb@#gf?-s(r*vnEN#kPISR*y;zx3<<7D{AF}n-nysiBM+tiixM`XvH~%VJa1W-=uyjkvwj+=YhDx(n?2)ZkMdK>{c3s*m zBhI2KWoKqX1T?nyZ+UC-waY{p>SQ*E#2|2iRs)B4m4=(&)v()M$1Lga+G;gN8NtbN z?nPb(o;42a(Y*Od|7O807U-7mpCr9IpIx3+lky>OKbFL&E}b>>ZO-XXQ=%|xW5QYr zg=x4pOi2ZDmzzFMK9z4ydda~CT7{gyNFf6@)0=v8PBWm4O>**jRcr!|NW*J%Dm^mF zJ{z^_7F*=o@@}U`jqspjy@=7cj5Caw&ZGr9PFwiuydig95d0(YE41_~4ZuuvJ)#~J z0JFAyF-(tr$N01cul z_04ZPe%#K*oS&v{dby*|vw`4%j3D}oZZO4&0}cLO^d!9r%)+UXLK^|2#O|@^;6(s+ zXgFj77uc=D6EQzNNroDx-?(7Q4d*p`dbHphJ@!4WwftRb$t*@kzC{$VNvt;8M7txF zw*bF{!7bJ)Gvr}=zOqFz`lGAr)A z`yT;e0iOO&`!o?ocXGeD+JL63Nl>HGr<)5EnHE-D8VbWrcc{@V@nedE(@r)SYr?lbY(gg zJkDu)!!#`nYZ#W6x`~yA2TuFH=gc;?H5VWAY%rJ*<~A^iDPoOjb^Gq|-pF#XiEJk$ zzV(#mnKJf@9L47%UISO 8601 format""") - end: datetime = Field("2100-01-31T16:47+00:00", description="""The interval end datetime in ISO 8601 format""") - -@app.get('/bot/usage/activityPerDay', summary="", tags=[]) -def usage_activity_per_day(query: BarChartRequest): - """ - - """ - chatbots = query.chatbots - start = query.start - end = query.end - - - client = Elasticsearch(app.config['elastic_uri']) - - id2name = get_slugs_for_names(client) - - s = Search(using=client, index="nextsearch_log") \ - .filter("range", timest={"gte": start}) \ - .filter("range", timest={"lte": end}) - - s = s[0:10000] #if not used size is set to 10 results - - def maybe_id2name(id): - if id in id2name: - return id2name[id] - return id - - def agg_pretty_result(d): - ls = [] - for bucket in d["aggregations"]["day"]["buckets"]: - d = {} - d["date"] = bucket["key_as_string"] - d["bots"] = [] - for bot in bucket["chatbot"]["buckets"]: - d["bots"].append({ - "bot": maybe_id2name(bot["key"]), - "cost": bot["cost_per_bot"]["value"] - }) - ls.append(d) - return ls - - - s.aggs.bucket('day', 'terms', field='date') - s.aggs['day'].bucket('chatbot', 'terms', field='chatbotid').metric('cost_per_bot', 'sum', field='inCt') - - response = s.execute() - - r = response.to_dict() - del r["hits"] - - ls = agg_pretty_result(r) - - return jsonify(ls) - -#-------------- - - -#book_tag = Tag(name="book", description="Some Book") - -class DonutChartRequest(BaseModel): - chatbots: List[str] = Field([], description="""A list of chatbot names to filter for""") - start: datetime = Field("2000-01-31T16:47+00:00", description="""The interval start datetime in ISO 8601 format""") - end: datetime = Field("2100-01-31T16:47+00:00", description="""The interval end datetime in ISO 8601 format""") - -@app.get('/bot/usage/activity', summary="Takes an interval and gives back a summary of bots and their activity and cost.", tags=[]) -def usage_activity(query: DonutChartRequest): - """ - Use datetime in ISO 8601 format: 2007-08-31T16:47+00:00 - """ - chatbots = query.chatbots - start = query.start - end = query.end - - #group nextsearch_log by chatbotid and sum inCt - - client = Elasticsearch(app.config['elastic_uri']) - - id2name = get_slugs_for_names(client) - - s = Search(using=client, index="nextsearch_log") \ - .filter("range", timest={"gte": start}) \ - .filter("range", timest={"lte": end}) - - s = s[0:10000] #if not used size is set to 10 results - - a = A('terms', field='chatbotid') \ - .metric('cost_per_bot', 'sum', field='inCt') - - s.aggs.bucket('bots', a) - response = s.execute() - #print(response.aggregations.bots.buckets) - - def maybe_id2name(id): - if id in id2name: - return id2name[id] - return id - - match chatbots: - case []: - #ls = [d for d in (d.to_dict() for d in response.aggregations.bots.buckets)] - ls = [{**d, "chatbotname": maybe_id2name(d["key"].split("_")[0])} for d in (d.to_dict() for d in response.aggregations.bots.buckets)] - case _: - ls = [{**d, "chatbotname": maybe_id2name(d["key"].split("_")[0])} for d in (d.to_dict() for d in response.aggregations.bots.buckets) if d["key"] in id2name and id2name[d["key"]] in chatbots] - - d = { - "chart": { - "series": { - "data": ls - } - } - } - - return jsonify(d) - #return jsonify(ls) - #return jsonify(list(response.aggregations.bots.buckets)) - -#------------------ - -class RetrieveChatRequest(BaseModel): - sessionId: str = Field(None, description="""The session's id. Example: d73bccba29b6376c1869944f26c3b670""") - -@app.get('/bot/usage/conversation', summary="Takes a session-id and gives you all of it's content.", tags=[]) -def usage_conversation(query: RetrieveChatRequest): - """ - Example session-id: d73bccba29b6376c1869944f26c3b670 - """ - sessionId = query.sessionId - - client = Elasticsearch(app.config['elastic_uri']) - - s = Search(using=client, index="nextsearch_log") \ - .filter("term", session=sessionId) - - s = s[0:10000] #if not used size is set to 10 results - response = s.execute() - return jsonify([hit.to_dict() for hit in response]) - -#------------ - -class DialogTableRequest(BaseModel): - chatbots: List[str] = Field([], description="""A list of chatbot names to filter for""") - start: datetime = Field("2000-01-31T16:47+00:00", description="""The interval start datetime in ISO 8601 format""") - end: datetime = Field("2100-01-31T16:47+00:00", description="""The interval end datetime in ISO 8601 format""") - -@app.get('/bot/usage/conversations', summary="Takes an interval and gives you all chatbots and their sessions within.", tags=[]) -def usage_conversations(query: DialogTableRequest): - """ - Use datetime in ISO 8601 format: 2007-08-31T16:47+00:00 - """ - #GET /bot/usage/conversations?chatbots=robobot,cyberbot,gigabot&timeStart=2024-01-15&timeEnd=2024-01-15&timeStart=00:00&timeEnd=23:00 - chatbots = query.chatbots - start = query.start - end = query.end - - client = Elasticsearch(app.config['elastic_uri']) - - s = Search(using=client, index="nextsearch_log") \ - .filter("range", timest={"gte": start}) \ - .filter("range", timest={"lte": end}) - - s = s[0:10000] #if not used size is set to 10 results - - #a = A('terms', field='chatbotid') \ - # .metric('cost_per_bot', 'sum', field='inCt') - #s.aggs.bucket('bots', a) - - response = s.execute() - hits = (x.to_dict() for x in response.hits) - - id2name = get_slugs_for_names(client) - - match chatbots: - case []: - pass - case _: - - hits = filter(lambda d: (id2name[d["chatbotid"]] in chatbots) if d["chatbotid"] in id2name else False, hits) - - - d = group_by([lambda d: d["chatbotid"], lambda d: d["session"] ], hits) - - d2 = {} - for chatbotid, v in d.items(): - if chatbotid in id2name: - d2[id2name[chatbotid]] = v - - return jsonify(d2) - - -#------------------ - -class ExtractUrlRequest(BaseModel): - url: str = Field(None, description="""The URL to a website whose HTML-embedded URLs you'd like to have.""", strict=True) - -@app.post('/bot/extract-url', summary="Get URLs from a website via its URL", tags=[]) -def extract_url(body: ExtractUrlRequest): - """ - Takes a json of form {"url": "..."} and gives back a list of URLs found within the specified URL's HTML-sourcecode. - """ - url = body.url - if not url: - return jsonify({'status': 'error', 'message': 'Missing required parameter url!'}), 400 - - with WebScraper() as web_scraper: - return jsonify(web_scraper.extract_urls(url)) - -#------------------ - -def extract_data(links: List[Dict[str, str]]) -> List[Dict[str, str]]: - """ - Webscrape pages of the given links and return a list of texts - """ - with WebScraper() as web_scraper: - return web_scraper.extract_page_data(links) - - -def get_word_splits(word_file: str) -> List: - loader = Docx2txtLoader(word_file) - pages = loader.load_and_split() - txt_spliter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100, length_function=len) - doc_list = [] - for page in pages: - pg_splits = txt_spliter.split_text(page.page_content) - doc_list.extend(pg_splits) - return doc_list - - -def get_text_splits_from_file(text_file: str) -> List: - # Detect the file's encoding - with open(text_file, 'rb') as file: - encoding_result = chardet.detect(file.read()) - - # Use the detected encoding to read the file - detected_encoding = encoding_result['encoding'] - with codecs.open(text_file, 'r', encoding=detected_encoding, errors='replace') as file: - text = file.read() - - return get_text_splits(text) - - -def determine_index(chatbot_id: str) -> str: - return f"{index_prefix}_{chatbot_id.lower()}" - - -def embed_index(doc_list: List[Dict[str, str]], chatbot_id: str) -> None: - """ - Add source documents in chatbot_xyz index! - """ - index = determine_index(chatbot_id) - - #print(f"add documents to index {index}", flush=True) - app.logger.info(f"add documents to index {index}") - - - #ElasticsearchStore.from_documents(doc_list, EMBEDDING, index_name=index, es_url=elastic_uri) - ElasticsearchStore.from_documents(doc_list, EMBEDDING, index_name=index, es_url=app.config['elastic_uri']) - - - - - - -class TrainForm(BaseModel): - #url: str = Field(None, description="""""", strict=True) - chatbotSlug: str = Field(None, description="""""") - files: List[FileStorage] = Field(None, description="""Some files""") - text: str = Field(None, description="Some text") - #filesMetadata: List[Dict[str, str]] = Field(None, description="""""") - - filesMetadata: str = Field(None, description="""A JSON string""") #a json string: [ ... ] - links: str = Field(None, description="""A JSON string""") #a json? [ ... ] - - -#TODO: needs to be reimplemented with another mechanism like celeery to manage longer running tasks and give feedback to frontend -@app.post('/bot/train', summary="", tags=[]) -def upload(form: TrainForm): - """ - Caution: Long running request! - """ - #url = body.url - - #print(form.file.filename) - #print(form.file_type) - #form.file.save('test.jpg') - - #app.logger.info("TRAIN called!") - - # extract body - chatbot_id = form.chatbotSlug - files = form.files - text = form.text - files_metadata = form.filesMetadata - if files_metadata: - files_metadata = json.loads(files_metadata) - links = form.links - if links: - links = json.loads(links) #[{url: '...'}] ? - app.logger.debug(links) - - - # validate body - if not chatbot_id: - return jsonify({ - 'status': 'error', - 'message': 'chatbotId is required' - }), 400 - - if not files_metadata and not text and not links: - return jsonify({ - 'status': 'error', - 'message': 'No data source found' - }), 400 - - if files_metadata and len(files) != len(files_metadata): - return jsonify({ - 'status': 'error', - 'message': 'Number of uploaded files metadata and files should be same' - }), 400 - - if links and len(links) == 0: - return jsonify({ - 'status': 'error', - 'message': 'No links found' - }), 400 - - - - try: - - # store raw data and extract doc_list - os.makedirs(f"{app.config['UPLOAD_FOLDER']}/{chatbot_id}", exist_ok=True) - - #train with given files - for i, file in enumerate(files): - filename = files_metadata[i]["slug"] + "_" + secure_filename(file.filename) - file_path = os.path.join(app.config['UPLOAD_FOLDER'], chatbot_id, filename) - file.save(file_path) - - app.logger.info("File saved successfully!") - - doc_list = [] - match file.filename.split(".")[-1]: - case "pdf": - doc_list = get_pdf_splits(file_path) - doc_list = add_metadata( - doc_list=doc_list, - source_type="pdf_file", - chatbot_id=chatbot_id, - source_file_id=files_metadata[i]["slug"], - filename=file.filename - ) - case "txt": - doc_list = get_text_splits_from_file(file_path) - doc_list = add_metadata( - doc_list=doc_list, - source_type="text_file", - chatbot_id=chatbot_id, - source_file_id=files_metadata[i]["slug"], - filename=file.filename - ) - case "docx" | "doc": - doc_list = get_word_splits(file_path) - doc_list = add_metadata( - doc_list=doc_list, - source_type="word_file", - chatbot_id=chatbot_id, - source_file_id=files_metadata[i]["slug"], - filename=file.filename - ) - case _: - app.logger.error("Unknown file extension: '%s'!" % file.filename.split(".")[-1]) - - - # embed file doc_list - embed_index(doc_list=doc_list, chatbot_id=chatbot_id) - - #train with given text - if text: - doc_list = get_text_splits(text) - doc_list = add_metadata( - doc_list=doc_list, - source_type="text", - chatbot_id=chatbot_id, - source_file_id="text", - txt_id=hashlib.md5(text.encode()).hexdigest() - ) - - # embed raw text doc_list - embed_index(doc_list=doc_list, chatbot_id=chatbot_id) - - #train with given links - if links and len(links) > 0: - links_docs = extract_data(links) - for i, doc in enumerate(links_docs): - if not doc['text']: - app.logger.info(f"Document {i} '{doc['url']} of {len(links_docs)} doesn't contain text. Skip.") - - else: - app.logger.info(f"embed document {i + 1} '{doc['url']}' of {len(links_docs)}") - - doc_list = get_text_splits(doc["text"], "link") - doc_list = add_metadata(doc_list, "link", chatbot_id, doc["slug"], url=doc["url"]) - - #TODO: save url with source! - - - # embed html doc_list - embed_index(doc_list=doc_list, chatbot_id=chatbot_id) - - #TODO: js backend needs to be merged into this one - # ping status endpoint - - express_api_endpoint = f"{app.config['api_url']}/api/chatbot/status/{chatbot_id}" - - #express_api_endpoint = f"{api_url}/api/chatbot/status/{chatbot_id}" - - - - try: - response = requests.put(express_api_endpoint, json={'status': 'ready'}) - - if response.status_code == 200: - app.logger.info("Express API updated successfully!") - else: - app.logger.error(f"Failed to update Express API {express_api_endpoint}") - - except Exception as e: - app.logger.error(f"Failed to update Express API {express_api_endpoint}") - app.logger.error(e) - - - return 'Files uploaded successfully' - except Exception as e: - app.logger.error(e) - - #TODO: log traceback! - traceback.print_exc() - return jsonify({'status': 'error', 'message': 'Something went wrong!'}), 400 - - -#------------------ - -class ReviseAnswerRequest(BaseModel): - revisedText: str = Field(None, description="""The new revised text""") - chatbotSlug: str = Field(None, description="""The chatbot id""") - -@app.post('/bot/revise-answer', summary="", tags=[]) -def revise2(body: ReviseAnswerRequest): - """ - - """ - revised_text = body.revisedText - chatbot_id = body.chatbotSlug - - if not revised_text: - return jsonify({ - 'status': 'error', - 'message': 'Missing required parameter revisedText!' - }), 400 - - if not chatbot_id: - return jsonify({ - 'status': 'error', - 'message': 'Missing required parameter chatbotSlug!' - }), 400 - - doc_list = get_text_splits(revised_text) - doc_list = add_metadata(doc_list, "revised_text", chatbot_id, "text") - embed_index(doc_list=doc_list, chatbot_id=chatbot_id) - return jsonify({ - 'status': 'success', - 'message': 'Answer revised successfully!' - }) - -#------------------ - -def clean_history(hist: List[Dict[str, str]]) -> str: - out = '' - for qa in hist[-5:]: # only the last 5 - if len(qa['bot']) < 2: - continue - out += 'user: ' + qa['user'] + '\nassistant: ' + qa['bot'] + "\n\n" - return out - - - -def get_prices(model_name) -> Dict[str, float]: - """ - prices in Ct. per 1000 tokens - """ - match model_name: - - # Azure OpenAI - case 'gpt-35-turbo': - inCt = 0.15 - outCt = 0.2 - - # OpenAI - case 'gpt-3.5-turbo-16k': - inCt = 0.3 - outCt = 0.4 - - case 'gpt-3.5-turbo-0125': - inCt = 0.05 - outCt = 0.15 - - case 'gpt-4': - inCt = 3.0 - outCt = 6.0 - - case 'gpt-4-32k': - inCt = 6.0 - outCt = 12.0 - - case 'gpt-4-0125-preview': - inCt = 1.0 - outCt = 3.0 - - case _: - inCt = 1.0 - outCt = 1.0 - - return { - "inCt": inCt, - "outCt": outCt - } - - - -def query_log(chatbot_id, queryId, sess, temperature, q, a, rating, llm, dura, sources, inputTokens, inCt, outputTokens, outCt): - """ - Add a doc to nextsearch_log - """ - - connections.create_connection(hosts=app.config['elastic_uri']) - - # create the mappings in elasticsearch - NextsearchLog.init() - - totalCt = ((inputTokens / 1000) * inCt) + ((outputTokens / 1000) * outCt) - esdoc = { - 'queryid': queryId, - 'chatbotid': chatbot_id, - 'timest': datetime.now(), - - 'date': date.today().isoformat(), - - 'session': sess, - 'temperature': temperature, - 'q': q, - 'a': a, - 'rating': rating, - 'reason': '', - 'reasontags': '', - 'llm': llm, - 'durasecs': dura, - 'sources': sources, - 'inToks': inputTokens, - 'inCt': inCt, - 'outToks': outputTokens, - 'outCt': outCt, - 'totalCt': totalCt - } - - client = Elasticsearch(app.config['elastic_uri']) - - resp = client.index(index='nextsearch_log', document=esdoc) - #TODO: check resp for success - - #print(resp) - app.logger.info(resp) - - return resp - - - - -def get_llm(temperature: float, stream_key: str, sid: str): - """ - Get the right LLM - """ - if OPENAI_API_TYPE == 'azure': - llm = AzureChatOpenAI( - openai_api_version=OPENAI_API_VERSION, - deployment_name=OPENAI_DEPLOYMENT_NAME, - azure_endpoint=AZURE_OPENAI_ENDPOINT, - - openai_api_key=OPENAI_API_KEY, - model_name=OPENAI_MODEL_NAME, - temperature=temperature, - streaming=True, - callbacks=BaseCallbackManager([StreamingCallback(stream_key, sid)]) - ) - else: - llm = ChatOpenAI( - openai_api_key=OPENAI_API_KEY, - model_name=OPENAI_MODEL_NAME, - temperature=temperature, - streaming=True, - callbacks=BaseCallbackManager([StreamingCallback(stream_key, sid)]) - ) - - return llm - - -class QueryRequest(BaseModel): - queryId: str = Field("", description="""The query id""") #generated by the js backend atm - key: str = Field("", description="""String used for the streaming of the chat""") - - prompt: str = Field(None, description="""The prompt/question to the bot""") - history: List[Dict[str,str]] = Field([], description="""""") - chatbotSlug: str = Field(None, description="""The chatbot id. Example: 'MyBot_c2wun1'""") - temprature: float = Field(0.1, description="""The temperature value passed to OpenAI affecting the strictness of it#s answers""") - sid: str = Field(None, description="""String used for the streaming of the chat""") - systemPrompt: str = Field("Antworte freundlich, mit einer ausführlichen Erklärung, sofern vorhanden auf Basis der folgenden Informationen. Please answer in the language of the question.", description="""A prompt always contextualizing the query used""") - - -@app.post('/bot/query', summary="Query the bot via prompt", tags=[]) -def bot_query(body: QueryRequest): - """ - The main route to use the chatbots LLM with a given prompt string, temperature, system prompt and history context - """ - dura = datetime.now().timestamp() - - queryId = body.queryId - prompt = body.prompt - history = clean_history(body.history) - chatbot_id = body.chatbotSlug - system_prompt = body.systemPrompt - temperature = body.temprature #typo in 'temprature' instead of is key temperature - key = body.key - sid = body.sid - - stream_key = key if key else f"{chatbot_id}_stream" - - sess = str(request.user_agent) + ' ' + str(request.environ.get('HTTP_X_REAL_IP', request.remote_addr)) +' '+ str(request.remote_addr) - sessMD5 = hashlib.md5(sess.encode()).hexdigest() - - #TODO: we need a better way to create these ids... it seems kind of random - if (queryId == None) or (queryId == ''): - queryId = sessMD5 - - - encoding = tiktoken.encoding_for_model(OPENAI_MODEL_NAME) - - if not chatbot_id: - return jsonify({ - 'status': 'error', - 'message': 'Missing required parameter chatbotSlug!' - }), 400 - if not prompt: - return jsonify({ - 'status': 'error', - 'message': 'Missing required parameter prompt!' - }), 400 - if not sid: - return jsonify({ - 'status': 'error', - 'message': 'Missing required parameter sid in query!' - }), 400 - - - default_temperature = 0.1 - temperature = temperature if temperature is not None else default_temperature - - llm = get_llm(temperature, stream_key, sid) - - prompt_template = system_prompt + """ - - {context} - - - """ + history + """ - - Question: {question} - """ - - chat_prompt = PromptTemplate( - template=prompt_template, input_variables=["context", "question"] - ) - - index = determine_index(chatbot_id) - - db = ElasticsearchStore( - es_url=app.config['elastic_uri'], - index_name=index, - distance_strategy="COSINE", - embedding=EMBEDDING - ) - - k = int(LLM_PAYLOAD / CHUNK_SIZE) - 1 - if (k < 2): - k = 2 - - scoredocs = db.similarity_search_with_score(prompt, k=k+10) - - query = RetrievalQA.from_chain_type( - llm=llm, - chain_type="stuff", - verbose=False, - return_source_documents=True, - retriever=db.as_retriever(search_kwargs={'k': k}), - chain_type_kwargs={"prompt": chat_prompt} - ) - - inputTokens = 0 - outputTokens = 0 - - with get_openai_callback() as cb: - qares = query.invoke({'query': prompt}) - qadocs = qares['source_documents'] #TODO: STS: deliver doc names and page numbers in the future - inputDocTxt = '' - - sources = [] - count = 0 - for qadoc in qadocs: - mdata = qadoc.metadata - if 'chatbotId' in mdata: - del mdata['chatbotId'] - - nextScore = 0.0 - for scoredoc in scoredocs: - if (len(qadoc.page_content) > 20) and (len(scoredoc[0].page_content) > 20) and (qadoc.page_content[:20] == scoredoc[0].page_content[:20]): - nextScore = scoredoc[1] - inputDocTxt += ' ' + qadoc.page_content - break - - # Lets make Percent of the score, only look at 0.6-1.0 - nextScore = float((nextScore - 0.6) * 250) - if nextScore < 1.0: - nextScore = 1.0 - if nextScore > 99.99: - nextScore = 99.99 - - mdata['score'] = round(nextScore, 2) - sources.append(mdata) - count += 1 - - answer = qares['result'] - #print(f"Total Tokens: {cb.total_tokens}") - #print(f"Prompt Tokens: {cb.prompt_tokens}") - #print(f"Completion Tokens: {cb.completion_tokens}") - - app.logger.info("ANSWER: " + answer) - - #print(ans, flush=True) - - inputTokens = len(encoding.encode(inputDocTxt + ' ' + prompt_template)) - outputTokens = len(encoding.encode(answer)) - - app.logger.info(f"Input Tokens: {inputTokens}") - app.logger.info(f"Output Tokens: {outputTokens}") - app.logger.info(f"Total Cost (USD): ${cb.total_cost}") - - - - d = get_prices(OPENAI_MODEL_NAME) - inCt = d["inCt"] - outCt = d["outCt"] - - # log question/answer - dura = round(datetime.now().timestamp() - dura, 2) - resp = query_log(chatbot_id, - queryId, - sessMD5, - temperature, - prompt, - answer, - 0, - OPENAI_MODEL_NAME, - dura, - sources, - inputTokens, - inCt, - outputTokens, - outCt) - - app.logger.info(resp) - - sources_index = "chatbot_" + chatbot_id - - client = Elasticsearch(app.config['elastic_uri']) - - s = Search(using=client, index=sources_index) - s = s[0:10000] - response = s.execute() - srcs = (x.to_dict() for x in response.hits) - src_grps = group_by([lambda d: d["metadata"]["sourceType"] ], srcs) - - #def print_type(x): - - new_sources = [] - for source in sources: - app.logger.info("Source: " + repr(source)) - match source["sourceType"]: - case "text": - if "txt_id" in source: - source["text"] = "" - d2 = group_by([lambda d: d["metadata"]["txt_id"] ], src_grps["text"]) - for src_item in d2[source["txt_id"]]: - source["text"] += " " + src_item["text"] - - new_sources.append(source) - - case "link": - if "sourceFileId" in source: - source["text"] = "" - d2 = group_by([lambda d: d["metadata"]["sourceFileId"] ], src_grps["link"]) - for src_item in d2[source["sourceFileId"]]: - source["text"] += " " + src_item["text"] - if "url" in src_item: - source["url"] = src_item["url"] - - new_sources.append(source) - - case "file": - if "sourceFileId" in source: - source["text"] = "" - d2 = group_by([lambda d: d["metadata"]["sourceFileId"] ], src_grps["file"]) - for src_item in d2[source["sourceFileId"]]: - source["text"] += " " + src_item["text"] - if "filename" in src_item: - source["filename"] = src_item["filename"] - - new_sources.append(source) - - - if resp.body["result"] == "created": - return jsonify({ - 'status': 'success', - 'answer': answer, - 'query_id': queryId, - 'sources': new_sources #sources - }) - else: - return jsonify({ - 'status': 'error', - 'message': resp.body["result"] - }), 400 - - -#------------------ - -#TODO create separate delete bot route - -class DeleteBotRequest(BaseModel): - chatbot_id: str = Field(None, description="""Chatbot id""") - -@app.delete('/bot', summary="", tags=[]) -def delete_bot(body: DeleteBotRequest): - """ - Not implemented yet - - Delete a chatbot via it's id - """ - chatbot_id = body.chatbot_id - - # Ensure chatbotId is provided - if not chatbot_id: - app.logger.error('Missing required parameter chatbotSlug!') - return jsonify({ - 'status': 'error', - 'message': 'Missing required parameter chatbotSlug!' - }), 400 - - client = Elasticsearch(app.config['elastic_uri']) - id2name = get_slugs_for_names(client) - index = determine_index(chatbot_id) - - - if not chatbot_id in id2name: - app.logger.error("Missing associated chatbot name of this id: '%s'!" % chatbot_id) - return jsonify({ - 'status': 'error', - 'message': 'Chatbot id not found!' - }), 404 - else: - chatbot_name = id_value=id2name[chatbot_id] - - - #TODO: delete index chatbot_ - try: - client.indices.delete(index=index) - app.logger.info("Deleted index '%s' !" % index) - except: - app.logger.error("Could not delete index '%s' !" % index) - - - #TODO: delete associated doc from index chatbot - #try: - delete_by_id(client, index="chatbot", id_field_name="slug", id_value=chatbot_id) - # app.logger.info("Deleted chatbot '%s' data from index '%s' !" % (chatbot_id, "chatbot")) - #except: - # app.logger.error("Could not delete data for '%s' in index 'chatbot' !" % chatbot_id) - - - #TODO: delete associated doc from index settings - #try: - delete_by_id(client, index="settings", id_field_name="displayName", id_value=chatbot_name) - # app.logger.info("Deleted chatbot '%s' data from index '%s' !" % (id2name[chatbot_id], "settings")) - #except: - # app.logger.error("Could not delete data for '%s' in index 'settings' !" % id2name[chatbot_id]) - - - #TODO: delete associated doc from index nextsearch_log - #try: - delete_by_id(client, index="nextsearch_log", id_field_name="chatbotid", id_value=chatbot_id) - # app.logger.info("Deleted chatbot '%s' data from index '%s' !" % (chatbot_id, "nextsearch_log")) - #except: - # app.logger.error("Could not delete data for '%s' in index 'nextsearch_log' !" % chatbot_id) - - return "", 202 - - -#------------------ - -#TODO: overloaded route... split into two or three, one for each resource -#server/routes/api/chatbot.js - -#FE calls js BE: /api/chatbot/resources/abotycrsh1 -#which calls bot BE - - - -class DeleteResourceRequest(BaseModel): - sourceType: str = Field(None, description="""Source type: ...link, text, file ?""") - sourceId: str = Field(None, description="""Source id?""") - chatbotSlug: str = Field(None, description="""Chatbot id""") - -@app.delete('/bot/resources', summary="delete a bot or resource via it's id", tags=[]) -def delete_resource(body: DeleteResourceRequest): - """ - * delete a bot via it's id - * delete files used as training source - - or other resources... unclear atm - """ - source_type = body.sourceType - source_id = body.sourceId - chatbot_id = body.chatbotSlug - - # Validate presence of sourceType - if not source_type: - msg = 'sourceType is required!' - app.logger.error(msg) - return jsonify({ - 'status': 'error', - 'message': msg - }), 400 - - # Ensure chatbotId is provided - if not chatbot_id: - app.logger.error('Missing required parameter chatbotSlug!') - return jsonify({ - 'status': 'error', - 'message': 'Missing required parameter chatbotSlug!' - }), 400 - - # Apply criteria based on sourceType - filter_criteria = { - "bool": { - "must": [ - {"match": {"metadata.sourceType": source_type}}, - {"match": {"metadata.chatbotId": chatbot_id}}, - ] - } - } - if source_type != 'text': - if not source_id: - app.logger.error('Missing required parameter sourceId!') - return jsonify({ - 'status': 'error', - 'message': 'Missing required parameter sourceId!' - }), 400 - new_match: Dict[str, Dict[str, Any]] = { - "match": { - "metadata.sourceFileId": source_id - } - } - filter_criteria["bool"]["must"].append(new_match) - - try: - # Assuming delete method returns a status or raises an exception on failure - app.logger.info(filter_criteria) - - index = determine_index(chatbot_id) - - store = ElasticsearchStore( - es_url=app.config['elastic_uri'], - index_name=index, - embedding=EMBEDDING - ) - - store.client.delete_by_query(index=index, query=filter_criteria) - - # isDeleted = index.delete(filter=filter_criteria) - except Exception as e: - #TODO: Handle specific exceptions if possible - - app.logger.error(str(e)) - - return jsonify({ - 'status': 'error', - 'message': str(e) - }), 500 - - - msg = 'Resource deleted successfully!' - app.logger.info(msg) - return jsonify({ - 'status': 'success', - 'message': msg - }) - - -#------------------ - - -# Splits the text into small chunks of 150 characters -def get_pdf_splits(pdf_file: str) -> List: - loader = PyPDFLoader(pdf_file) - pages = loader.load_and_split() - text_split = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100, length_function=len) - doc_list = [] - for pg in pages: - pg_splits = text_split.split_text(pg.page_content) - doc_list.extend(pg_splits) - return doc_list - - -def get_text_splits(text: str, source: str="text") -> List: - chunk_size = 1536 - chunk_overlap = 200 - - #if source == "link": - # chunk_size = 1536 - # chunk_overlap = 200 - - text_split = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len) - doc_list = text_split.split_text(text) - return doc_list - - -ESDocument = namedtuple('Document', ['page_content', 'metadata']) - -def add_metadata(doc_list: List[str], source_type: str, chatbot_id: str, source_file_id, tags=[], url=None, filename=None, txt_id=None) -> List[ESDocument]: - """ - - """ - for i, doc in enumerate(doc_list): - # If doc is a string, convert it to the Document format - if isinstance(doc, str): - doc = ESDocument(page_content=doc, metadata={}) - doc_list[i] = doc - - # Update the metadata - updated_metadata = doc.metadata.copy() - updated_metadata["chatbotId"] = chatbot_id - updated_metadata["tags"] = ' | '.join(tags) - - match source_type: - case "text": - updated_metadata["sourceType"] = "text" - if txt_id is not None: - updated_metadata["txt_id"] = txt_id - - case "revised_text": - updated_metadata["sourceType"] = "revised_text" - - case "pdf_file" | "word_file" | "text_file": - updated_metadata["sourceType"] = "file" - updated_metadata["sourceFileId"] = source_file_id - if filename is not None: - updated_metadata["filename"] = filename - - case "link": - updated_metadata["sourceType"] = "link" - updated_metadata["sourceFileId"] = source_file_id - if url is not None: - updated_metadata["url"] = url - - # Update the document in the doc_list with new metadata - doc_list[i] = ESDocument( - page_content=doc.page_content, - metadata=updated_metadata - ) - - return doc_list - - - -@app.errorhandler(500) -def server_error(error): - app.logger.exception('An exception occurred during a request: ' + str(error)) - return 'Internal Server Error', 500 - - - -#JS Backend routes to reimplement: -# http://localhost:8000/api/chatbot/add-resources - -if __name__ == '__main__': - app.run(debug=True, host="0.0.0.0", port=5000) - - -