mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
516 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2f33ec7ad0 | |||
| 3b1375ef99 | |||
| 2c05e6e6bd | |||
| 8ccc696723 | |||
| 1621313c0f | |||
| b94c15ef1e | |||
| 8a16c8cc44 | |||
| b12a437a30 | |||
| deeb950e1c | |||
| 6a0702f55f | |||
| 3044cb85fd | |||
| d3262ca378 | |||
| 99a7c0fb97 | |||
| 7e75b9d778 | |||
| a467f31238 | |||
| 54342ae0a2 | |||
| bdcf195b20 | |||
| 3f571a13c2 | |||
| 9d4bb5767c | |||
| 5e7b93e802 | |||
| ec4def9a44 | |||
| 2bd71d722b | |||
| 8f2c0176b4 | |||
| b261b6aac0 | |||
| cbdf54cf36 | |||
| db0606e064 | |||
| cfae63d107 | |||
| 88f8c8ed86 | |||
| 4158697fe6 | |||
| 5f9cb16a3c | |||
| 4730145696 | |||
| 68d0210e92 | |||
| f8e9a0590f | |||
| ba834aee26 | |||
| 983540614e | |||
| 6722b3d558 | |||
| 6000c3e304 | |||
| 333608a1d4 | |||
| 8052cbc70e | |||
| b0e0e1fdd0 | |||
| 8e3228d461 | |||
| f789098e9f | |||
| d6e6c530d7 | |||
| 22c5affacc | |||
| 35b7d17d97 | |||
| 1fc14ff6d4 | |||
| 7fad48f42c | |||
| 77988fe3c2 | |||
| cb00f36f62 | |||
| 7edb4ad7dc | |||
| 66c54e75f3 | |||
| f60dfffb4b | |||
| f1ad778250 | |||
| 7c8f159751 | |||
| c57cc0769b | |||
| 869df1f704 | |||
| 42eeb38247 | |||
| 7241c73c7a | |||
| 336a639164 | |||
| ceae4df889 | |||
| 884dcbcb7e | |||
| 4b57177523 | |||
| 4130519599 | |||
| 0c73f77c4d | |||
| fbe68034aa | |||
| 22acd0ac67 | |||
| 4cf122c6db | |||
| 6a77c94365 | |||
| 80656309f7 | |||
| 9f7d187ab3 | |||
| 63da2cb7d5 | |||
| cb69c742b0 | |||
| 2ac72899ef | |||
| 473f9892fb | |||
| fe4b2bf969 | |||
| c18b78b261 | |||
| 8dd3adc443 | |||
| e85fea31a8 | |||
| 1aba978de2 | |||
| 7e0b3d19d6 | |||
| 788ca41d9e | |||
| 6b23308f26 | |||
| 925dd2aa85 | |||
| b5a2711c05 | |||
| c6e723f2ee | |||
| fd3e55cfcf | |||
| 6ae0da92cb | |||
| 9377192859 | |||
| 42671e08f1 | |||
| b2f87a9f8f | |||
| 878dca26bb | |||
| 445576ec88 | |||
| 04de0c4cef | |||
| 7e65df87dd | |||
| 7c98cb5075 | |||
| 6df0f44e71 | |||
| c998ad7a18 | |||
| 1dcc416c70 | |||
| 8c075f8287 | |||
| 9b90a44323 | |||
| 426fdafb66 | |||
| 02fb7a88e3 | |||
| 0fe19f3fbc | |||
| 9b4cceb3f7 | |||
| 65255f2a8e | |||
| 9dd380d474 | |||
| 0164856343 | |||
| 4f05803690 | |||
| abc32803cc | |||
| 07de36ec86 | |||
| 87a998e9e5 | |||
| 0aafa281a5 | |||
| 2871455e4e | |||
| f09b204ae4 | |||
| 5a2c542ce2 | |||
| 4d9e9f0dbb | |||
| 6d232f1bdb | |||
| 21179a9be9 | |||
| 9081bc969a | |||
| e949594579 | |||
| 1a1888ed22 | |||
| 97e4eccf03 | |||
| b10eb8d085 | |||
| 1d2c081710 | |||
| ad09d4bb24 | |||
| b9c383612d | |||
| ab9efb3c23 | |||
| 922f79e757 | |||
| c04686d426 | |||
| 9a85f83569 | |||
| 5decdde182 | |||
| def18308d0 | |||
| fc6d8ee77f | |||
| 5400467da1 | |||
| 2c771fb0b4 | |||
| 667632ba00 | |||
| a82f092dac | |||
| 742d0f0ea9 | |||
| 69bbf8e9c5 | |||
| 12975cf128 | |||
| 99993e5026 | |||
| 15b78bd894 | |||
| f8a479bf88 | |||
| f87e7242cd | |||
| fc1ac3a962 | |||
| 212bb8e601 | |||
| 06abef66ef | |||
| 0abc01311b | |||
| 1eb6286339 | |||
| 4bd6c3145c | |||
| 190e144a70 | |||
| 527ebec2f5 | |||
| a0b7c78dca | |||
| 54f7c6ea8e | |||
| f843dd05e5 | |||
| 3abc9be1c2 | |||
| e627ee9ea4 | |||
| 6c1f1a9f53 | |||
| b51237be17 | |||
| 5daed10136 | |||
| 074d4f5031 | |||
| e9f5468a49 | |||
| a2b4d0190c | |||
| c8097e97cb | |||
| fc172b4a79 | |||
| 0bea7f21ae | |||
| 61d2a74b25 | |||
| 1d88b197fb | |||
| b88c3897b9 | |||
| 2da4e7aa46 | |||
| cf038e099f | |||
| 88d52e335c | |||
| 13785edaae | |||
| 6d3e3e4e3c | |||
| 6b7c028578 | |||
| c3e344b0f1 | |||
| e9202999cb | |||
| a6d85c6c2f | |||
| 7539d142a9 | |||
| e953f01951 | |||
| eb20b60b13 | |||
| d48731ac8c | |||
| b4a5d83b44 | |||
| 99af1cbeac | |||
| 63d0b39c5c | |||
| 863cec1bad | |||
| e14e0ec695 | |||
| 6228b1bd53 | |||
| e18f407604 | |||
| 60767e66e0 | |||
| cc6a48b128 | |||
| 19396998eb | |||
| 89b05ad79f | |||
| 884fd83dc7 | |||
| c739b68b29 | |||
| 35e880c432 | |||
| 733219cc3f | |||
| 21f2c5838b | |||
| 20f3f54714 | |||
| e4765ebe0c | |||
| 3f263df3ef | |||
| 404cdc0b6d | |||
| f2c4d53c58 | |||
| 642006c8e2 | |||
| 59ba34e167 | |||
| 4580ad2fd7 | |||
| 11dd23d8aa | |||
| c5c3240c4c | |||
| 0f95086813 | |||
| 9b3f5fd38b | |||
| 6c26872799 | |||
| 85247e6837 | |||
| 17ada637db | |||
| c9d7a34690 | |||
| 96438ca821 | |||
| 7927d80a84 | |||
| be431449bd | |||
| 02985fc905 | |||
| 6f438e0a49 | |||
| 5efb3476f2 | |||
| 83c673e093 | |||
| 8d2f8ed561 | |||
| 73a03287a5 | |||
| 85f10f84bd | |||
| 9cfd521d67 | |||
| e91af1dff9 | |||
| 9065fb1050 | |||
| 99b634c68d | |||
| 79426fc41f | |||
| be5a67895e | |||
| 5a4e64e741 | |||
| 2302a6baba | |||
| a74c0ccce0 | |||
| 8e75a23ad0 | |||
| 4121636084 | |||
| 3738dd71ab | |||
| 9729ca2aed | |||
| e5caa702f5 | |||
| 644f68de97 | |||
| b4ef50bdb5 | |||
| 5b5e3677b6 | |||
| c9551b7f68 | |||
| 4810cb2dc9 | |||
| d92e927685 | |||
| 7bdd5a48c0 | |||
| d3ff1a30bf | |||
| 6acc46bc7b | |||
| ef8728a314 | |||
| 5169299826 | |||
| bd19656c8f | |||
| c59c1b603d | |||
| c9caccf354 | |||
| eedec157a7 | |||
| c6c3961250 | |||
| 6b3a40be5c | |||
| 1328d715db | |||
| a3a5a9966f | |||
| 78ed8fe9a5 | |||
| 853aa121a9 | |||
| 54fc6dcf01 | |||
| da8802d010 | |||
| d73a75506e | |||
| 13bcfd7ebd | |||
| aa8b021478 | |||
| e013ac52af | |||
| 06700850df | |||
| 7a08e91909 | |||
| 77f0fb03e3 | |||
| da2d8b8267 | |||
| b75115264d | |||
| 8badf3f423 | |||
| eb8feaf20a | |||
| 936d8ab7dd | |||
| 68d1315079 | |||
| 6baba54e9e | |||
| ad48e8d915 | |||
| cafdee536f | |||
| cd861e3653 | |||
| e9e39d57ce | |||
| 94cb66ba80 | |||
| 9a6dc89156 | |||
| fdd5b1b8cf | |||
| 827042f72b | |||
| 37be0ff3d3 | |||
| a313b77cdd | |||
| 4fecc2fae6 | |||
| ff75008801 | |||
| e3cf14a3c9 | |||
| 6529c764c9 | |||
| 44184d12a8 | |||
| 8779aa1986 | |||
| 411c645134 | |||
| afccbc88e8 | |||
| 33e78cf638 | |||
| 193aa3ba88 | |||
| ffb3fc4bf5 | |||
| 6ccfbca204 | |||
| 439da32234 | |||
| db8f83104f | |||
| f43db8bc51 | |||
| ce587cba56 | |||
| 5164835681 | |||
| c981a57616 | |||
| c7d00c2272 | |||
| aed1bbbcaa | |||
| 19ded65c66 | |||
| ad6def4178 | |||
| ed6a693820 | |||
| 1d5a9b74ff | |||
| e34817c2a9 | |||
| 60428c4ad2 | |||
| 7bc9742674 | |||
| a199572bf8 | |||
| 06dfb83529 | |||
| 3c19e3125b | |||
| 4ae9de76d4 | |||
| c55e9d16da | |||
| 4c2906d6fd | |||
| 1e2c0c6705 | |||
| ede733e130 | |||
| b67484e77d | |||
| 66e4113e0b | |||
| 0dba1743e3 | |||
| 43199c45c3 | |||
| 3fd7db40ea | |||
| 5650442b0b | |||
| 5b013da4d6 | |||
| fe797bcc66 | |||
| 9542f4484c | |||
| 2452c5624f | |||
| a5c03ccd4c | |||
| d2213141e0 | |||
| 3da3260eb5 | |||
| 07f283b73e | |||
| 29509ff69d | |||
| 216f6495c4 | |||
| f60a249fe1 | |||
| 152072f900 | |||
| 80032b1fc0 | |||
| 5d55e6a049 | |||
| 418700b455 | |||
| eea6565472 | |||
| 3f21603558 | |||
| 3a739e3dd7 | |||
| 4ba1ba973a | |||
| e8b9871fb9 | |||
| e37b0d217d | |||
| 50e9df4c76 | |||
| b9a50ef4b8 | |||
| da11a20c92 | |||
| 955619c8ac | |||
| ad2e116367 | |||
| ccbd4365be | |||
| 9169643157 | |||
| 5cff780ec4 | |||
| ceb0419fe5 | |||
| 74ebc497c1 | |||
| 161cb08bbd | |||
| ff8702f7de | |||
| a973b9e01f | |||
| 5e19423d82 | |||
| 29f7f8b81e | |||
| 6012f376ca | |||
| 8468031e39 | |||
| aac460ad29 | |||
| 753c13d76f | |||
| 0cb588f7bf | |||
| ebdd71ce68 | |||
| 013856b604 | |||
| 61096596bc | |||
| 549d67e281 | |||
| 79c873344b | |||
| 548f01850f | |||
| 3f495b2d22 | |||
| c943517932 | |||
| 935687998e | |||
| 375f621405 | |||
| a99d19bdea | |||
| 906c0c5c89 | |||
| c92d334b29 | |||
| d38f995ba6 | |||
| bc50f68127 | |||
| b24abee364 | |||
| 6fee2962cb | |||
| e67bfca552 | |||
| d5f87a5498 | |||
| d7426d86d5 | |||
| 7ca98848ac | |||
| 32d5885b68 | |||
| f4d182e4ee | |||
| 69b9581417 | |||
| 1e21056364 | |||
| fdfa5d0ad4 | |||
| d96348eb22 | |||
| 100b3165d8 | |||
| 7e60800c95 | |||
| 4b195cc14c | |||
| 7034dc8dea | |||
| 71f2ba1452 | |||
| 1ec84a589e | |||
| eb40377700 | |||
| bbf9d6d786 | |||
| 8c2b91d3db | |||
| 55028b2db7 | |||
| daf86dbf74 | |||
| b2ef6a05a1 | |||
| 6bc3a2d58a | |||
| d69f4ec829 | |||
| ef45526700 | |||
| 79034bd194 | |||
| 60356b52c6 | |||
| 80d703f9c2 | |||
| 022afbb39d | |||
| 792a1a9d91 | |||
| d2b70e73dd | |||
| 37b0829e28 | |||
| b4a281eca1 | |||
| 95821f6fb6 | |||
| bf2ea04d02 | |||
| ac7a0d4fbf | |||
| 9f109adf28 | |||
| cf12c3cc1f | |||
| 29a7b7a040 | |||
| eb42adc818 | |||
| a4d230f12b | |||
| 9352a09c53 | |||
| a0c1d83ddc | |||
| 657019a5a9 | |||
| 58df013722 | |||
| 347cb61f26 | |||
| 264303ba98 | |||
| 1c90c39897 | |||
| 3fcdba1683 | |||
| 915354bec9 | |||
| c0090a1b4f | |||
| be6d5b76c3 | |||
| fb21efd77d | |||
| cf4fff64f8 | |||
| 0b94376cd4 | |||
| 2b5812d0a9 | |||
| 4da3ee400b | |||
| f8602b5286 | |||
| fc8a752cd5 | |||
| 478cd006d6 | |||
| 4d10dbcf95 | |||
| 43cd455b52 | |||
| b54d5807f3 | |||
| 58e95f76c1 | |||
| 06fd35d420 | |||
| 4df75ca84e | |||
| 701e5be535 | |||
| 9ae57eb370 | |||
| fe5dd5b70a | |||
| 1015436691 | |||
| 83c9f1ed39 | |||
| e4f4b30ae3 | |||
| 9bf6f7c9a0 | |||
| b06957e561 | |||
| baeedc699d | |||
| 00943dc04a | |||
| f43cf7c2b0 | |||
| 9e1421b77c | |||
| 13389be3f4 | |||
| a5306e6345 | |||
| 99adeabc85 | |||
| 6a5e1d597c | |||
| 266119bf62 | |||
| 75086f41a9 | |||
| 3657b1f2a2 | |||
| 975798c643 | |||
| 607de74ace | |||
| 2a647162a8 | |||
| d4332643c4 | |||
| 2ea696934b | |||
| 5a6a34cef9 | |||
| 1daa0b4d46 | |||
| 60d406acaa | |||
| 1a6bd437f5 | |||
| 258a10fb74 | |||
| fdc21ec853 | |||
| c2693d2f46 | |||
| ca9c9c4e1e | |||
| bafe137502 | |||
| 2dea8448a6 | |||
| d9868d0229 | |||
| 38a90c32b2 | |||
| eecec7b119 | |||
| 4eeb535946 | |||
| 26de9adb41 | |||
| 0c9a7caa9d | |||
| a5a617b7a3 | |||
| d5618749c9 | |||
| de8267cfd7 | |||
| 740714b79d | |||
| 013db9410f | |||
| b96ba6f831 | |||
| d29fd52e14 | |||
| 99f7bbaaa2 | |||
| 575099df2d | |||
| ddeac9ab3d | |||
| 009e18f094 | |||
| 9c023b6d8c | |||
| 2c2b2e0779 | |||
| 8d7fb12305 | |||
| 7f4c63d102 | |||
| 3e9f444e6b | |||
| 2290c2a2f0 | |||
| dbb8f7b77b | |||
| 8964817d72 | |||
| 0b950da73f | |||
| 30b88e2b91 | |||
| fb66b1e726 | |||
| 198a8b6592 | |||
| 56e3fa2d6a | |||
| 24f9b17ff6 | |||
| 427fb97562 |
@ -10,7 +10,8 @@ ADD ./api ./api
|
|||||||
ADD ./conf ./conf
|
ADD ./conf ./conf
|
||||||
ADD ./deepdoc ./deepdoc
|
ADD ./deepdoc ./deepdoc
|
||||||
ADD ./rag ./rag
|
ADD ./rag ./rag
|
||||||
ADD ./graph ./graph
|
ADD ./agent ./agent
|
||||||
|
ADD ./graphrag ./graphrag
|
||||||
|
|
||||||
ENV PYTHONPATH=/ragflow/
|
ENV PYTHONPATH=/ragflow/
|
||||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
|||||||
@ -4,6 +4,10 @@ USER root
|
|||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
COPY requirements_arm.txt /ragflow/requirements.txt
|
COPY requirements_arm.txt /ragflow/requirements.txt
|
||||||
|
|
||||||
|
|
||||||
|
RUN pip install nltk --default-timeout=10000
|
||||||
|
|
||||||
RUN pip install -i https://mirrors.aliyun.com/pypi/simple/ --default-timeout=1000 -r requirements.txt &&\
|
RUN pip install -i https://mirrors.aliyun.com/pypi/simple/ --default-timeout=1000 -r requirements.txt &&\
|
||||||
python -c "import nltk;nltk.download('punkt');nltk.download('wordnet')"
|
python -c "import nltk;nltk.download('punkt');nltk.download('wordnet')"
|
||||||
|
|
||||||
@ -14,6 +18,11 @@ RUN apt-get update && \
|
|||||||
RUN curl -sL https://deb.nodesource.com/setup_20.x | bash - && \
|
RUN curl -sL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||||
apt-get install -y --fix-missing nodejs nginx ffmpeg libsm6 libxext6 libgl1
|
apt-get install -y --fix-missing nodejs nginx ffmpeg libsm6 libxext6 libgl1
|
||||||
|
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||||
|
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||||
|
|
||||||
|
RUN pip install graspologic
|
||||||
|
|
||||||
ADD ./web ./web
|
ADD ./web ./web
|
||||||
RUN cd ./web && npm i --force && npm run build
|
RUN cd ./web && npm i --force && npm run build
|
||||||
|
|
||||||
@ -21,7 +30,8 @@ ADD ./api ./api
|
|||||||
ADD ./conf ./conf
|
ADD ./conf ./conf
|
||||||
ADD ./deepdoc ./deepdoc
|
ADD ./deepdoc ./deepdoc
|
||||||
ADD ./rag ./rag
|
ADD ./rag ./rag
|
||||||
ADD ./graph ./graph
|
ADD ./agent ./agent
|
||||||
|
ADD ./graphrag ./graphrag
|
||||||
|
|
||||||
ENV PYTHONPATH=/ragflow/
|
ENV PYTHONPATH=/ragflow/
|
||||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||||
@ -30,4 +40,4 @@ ADD docker/entrypoint.sh ./entrypoint.sh
|
|||||||
ADD docker/.env ./
|
ADD docker/.env ./
|
||||||
RUN chmod +x ./entrypoint.sh
|
RUN chmod +x ./entrypoint.sh
|
||||||
|
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
|
|||||||
@ -1,25 +1,27 @@
|
|||||||
FROM infiniflow/ragflow-base:v2.0
|
FROM infiniflow/ragflow-base:v2.0
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
## for cuda > 12.0
|
## for cuda > 12.0
|
||||||
RUN /root/miniconda3/envs/py11/bin/pip uninstall -y onnxruntime-gpu
|
RUN pip uninstall -y onnxruntime-gpu
|
||||||
RUN /root/miniconda3/envs/py11/bin/pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
RUN pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||||
|
|
||||||
|
|
||||||
ADD ./web ./web
|
ADD ./web ./web
|
||||||
RUN cd ./web && npm i --force && npm run build
|
RUN cd ./web && npm i --force && npm run build
|
||||||
|
|
||||||
ADD ./api ./api
|
ADD ./api ./api
|
||||||
ADD ./conf ./conf
|
ADD ./conf ./conf
|
||||||
ADD ./deepdoc ./deepdoc
|
ADD ./deepdoc ./deepdoc
|
||||||
ADD ./rag ./rag
|
ADD ./rag ./rag
|
||||||
|
ADD ./agent ./agent
|
||||||
ENV PYTHONPATH=/ragflow/
|
ADD ./graphrag ./graphrag
|
||||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
|
||||||
|
ENV PYTHONPATH=/ragflow/
|
||||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||||
RUN chmod +x ./entrypoint.sh
|
|
||||||
|
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
RUN chmod +x ./entrypoint.sh
|
||||||
|
|
||||||
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
|
|||||||
@ -1,55 +1,56 @@
|
|||||||
FROM ubuntu:22.04
|
FROM ubuntu:22.04
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y wget curl build-essential libopenmpi-dev
|
RUN apt-get update && apt-get install -y wget curl build-essential libopenmpi-dev
|
||||||
|
|
||||||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
||||||
bash ~/miniconda.sh -b -p /root/miniconda3 && \
|
bash ~/miniconda.sh -b -p /root/miniconda3 && \
|
||||||
rm ~/miniconda.sh && ln -s /root/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
rm ~/miniconda.sh && ln -s /root/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||||
echo ". /root/miniconda3/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
echo ". /root/miniconda3/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||||
echo "conda activate base" >> ~/.bashrc
|
echo "conda activate base" >> ~/.bashrc
|
||||||
|
|
||||||
ENV PATH /root/miniconda3/bin:$PATH
|
ENV PATH /root/miniconda3/bin:$PATH
|
||||||
|
|
||||||
RUN conda create -y --name py11 python=3.11
|
RUN conda create -y --name py11 python=3.11
|
||||||
|
|
||||||
ENV CONDA_DEFAULT_ENV py11
|
ENV CONDA_DEFAULT_ENV py11
|
||||||
ENV CONDA_PREFIX /root/miniconda3/envs/py11
|
ENV CONDA_PREFIX /root/miniconda3/envs/py11
|
||||||
ENV PATH $CONDA_PREFIX/bin:$PATH
|
ENV PATH $CONDA_PREFIX/bin:$PATH
|
||||||
|
|
||||||
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
|
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
|
||||||
RUN apt-get install -y nodejs
|
RUN apt-get install -y nodejs
|
||||||
|
|
||||||
RUN apt-get install -y nginx
|
RUN apt-get install -y nginx
|
||||||
|
|
||||||
ADD ./web ./web
|
ADD ./web ./web
|
||||||
ADD ./api ./api
|
ADD ./api ./api
|
||||||
ADD ./conf ./conf
|
ADD ./conf ./conf
|
||||||
ADD ./deepdoc ./deepdoc
|
ADD ./deepdoc ./deepdoc
|
||||||
ADD ./rag ./rag
|
ADD ./rag ./rag
|
||||||
ADD ./requirements.txt ./requirements.txt
|
ADD ./requirements.txt ./requirements.txt
|
||||||
ADD ./graph ./graph
|
ADD ./agent ./agent
|
||||||
|
ADD ./graphrag ./graphrag
|
||||||
RUN apt install openmpi-bin openmpi-common libopenmpi-dev
|
|
||||||
ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu/openmpi/lib:$LD_LIBRARY_PATH
|
RUN apt install openmpi-bin openmpi-common libopenmpi-dev
|
||||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu/openmpi/lib:$LD_LIBRARY_PATH
|
||||||
RUN cd ./web && npm i --force && npm run build
|
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||||
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ -r ./requirements.txt
|
RUN cd ./web && npm i --force && npm run build
|
||||||
|
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ -r ./requirements.txt
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y libglib2.0-0 libgl1-mesa-glx && \
|
RUN apt-get update && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
apt-get install -y libglib2.0-0 libgl1-mesa-glx && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ ollama
|
|
||||||
RUN conda run -n py11 python -m nltk.downloader punkt
|
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ ollama
|
||||||
RUN conda run -n py11 python -m nltk.downloader wordnet
|
RUN conda run -n py11 python -m nltk.downloader punkt
|
||||||
|
RUN conda run -n py11 python -m nltk.downloader wordnet
|
||||||
ENV PYTHONPATH=/ragflow/
|
|
||||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
ENV PYTHONPATH=/ragflow/
|
||||||
|
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
|
||||||
RUN chmod +x ./entrypoint.sh
|
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||||
|
RUN chmod +x ./entrypoint.sh
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
|
||||||
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
|
|||||||
@ -1,57 +1,58 @@
|
|||||||
FROM opencloudos/opencloudos:9.0
|
FROM opencloudos/opencloudos:9.0
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
RUN dnf update -y && dnf install -y wget curl gcc-c++ openmpi-devel
|
RUN dnf update -y && dnf install -y wget curl gcc-c++ openmpi-devel
|
||||||
|
|
||||||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
||||||
bash ~/miniconda.sh -b -p /root/miniconda3 && \
|
bash ~/miniconda.sh -b -p /root/miniconda3 && \
|
||||||
rm ~/miniconda.sh && ln -s /root/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
rm ~/miniconda.sh && ln -s /root/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||||
echo ". /root/miniconda3/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
echo ". /root/miniconda3/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||||
echo "conda activate base" >> ~/.bashrc
|
echo "conda activate base" >> ~/.bashrc
|
||||||
|
|
||||||
ENV PATH /root/miniconda3/bin:$PATH
|
ENV PATH /root/miniconda3/bin:$PATH
|
||||||
|
|
||||||
RUN conda create -y --name py11 python=3.11
|
RUN conda create -y --name py11 python=3.11
|
||||||
|
|
||||||
ENV CONDA_DEFAULT_ENV py11
|
ENV CONDA_DEFAULT_ENV py11
|
||||||
ENV CONDA_PREFIX /root/miniconda3/envs/py11
|
ENV CONDA_PREFIX /root/miniconda3/envs/py11
|
||||||
ENV PATH $CONDA_PREFIX/bin:$PATH
|
ENV PATH $CONDA_PREFIX/bin:$PATH
|
||||||
|
|
||||||
# RUN curl -sL https://rpm.nodesource.com/setup_14.x | bash -
|
# RUN curl -sL https://rpm.nodesource.com/setup_14.x | bash -
|
||||||
RUN dnf install -y nodejs
|
RUN dnf install -y nodejs
|
||||||
|
|
||||||
RUN dnf install -y nginx
|
RUN dnf install -y nginx
|
||||||
|
|
||||||
ADD ./web ./web
|
ADD ./web ./web
|
||||||
ADD ./api ./api
|
ADD ./api ./api
|
||||||
ADD ./conf ./conf
|
ADD ./conf ./conf
|
||||||
ADD ./deepdoc ./deepdoc
|
ADD ./deepdoc ./deepdoc
|
||||||
ADD ./rag ./rag
|
ADD ./rag ./rag
|
||||||
ADD ./requirements.txt ./requirements.txt
|
ADD ./requirements.txt ./requirements.txt
|
||||||
ADD ./graph ./graph
|
ADD ./agent ./agent
|
||||||
|
ADD ./graphrag ./graphrag
|
||||||
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
|
||||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||||
ENV LD_LIBRARY_PATH /usr/lib64/openmpi/lib:$LD_LIBRARY_PATH
|
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
ENV LD_LIBRARY_PATH /usr/lib64/openmpi/lib:$LD_LIBRARY_PATH
|
||||||
RUN cd ./web && npm i --force && npm run build
|
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||||
RUN conda run -n py11 pip install $(grep -ivE "mpi4py" ./requirements.txt) # without mpi4py==3.1.5
|
RUN cd ./web && npm i --force && npm run build
|
||||||
RUN conda run -n py11 pip install redis
|
RUN conda run -n py11 pip install $(grep -ivE "mpi4py" ./requirements.txt) # without mpi4py==3.1.5
|
||||||
|
RUN conda run -n py11 pip install redis
|
||||||
RUN dnf update -y && \
|
|
||||||
dnf install -y glib2 mesa-libGL && \
|
RUN dnf update -y && \
|
||||||
dnf clean all
|
dnf install -y glib2 mesa-libGL && \
|
||||||
|
dnf clean all
|
||||||
RUN conda run -n py11 pip install ollama
|
|
||||||
RUN conda run -n py11 python -m nltk.downloader punkt
|
RUN conda run -n py11 pip install ollama
|
||||||
RUN conda run -n py11 python -m nltk.downloader wordnet
|
RUN conda run -n py11 python -m nltk.downloader punkt
|
||||||
|
RUN conda run -n py11 python -m nltk.downloader wordnet
|
||||||
ENV PYTHONPATH=/ragflow/
|
|
||||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
ENV PYTHONPATH=/ragflow/
|
||||||
|
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
|
||||||
RUN chmod +x ./entrypoint.sh
|
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||||
|
RUN chmod +x ./entrypoint.sh
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
|
||||||
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
|
|||||||
683
README.md
683
README.md
@ -1,341 +1,342 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a>
|
<a href="./README_ja.md">日本語</a> |
|
||||||
</p>
|
<a href="./README_ko.md">한국어</a>
|
||||||
|
</p>
|
||||||
<p align="center">
|
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<p align="center">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
</a>
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
<a href="https://demo.ragflow.io" target="_blank">
|
</a>
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.8.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.8.0"></a>
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.11.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.11.0"></a>
|
||||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
</a>
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
</p>
|
</a>
|
||||||
|
</p>
|
||||||
<h4 align="center">
|
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<h4 align="center">
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||||
</h4>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
|
</h4>
|
||||||
<details open>
|
|
||||||
<summary></b>📕 Table of Contents</b></summary>
|
<details open>
|
||||||
|
<summary></b>📕 Table of Contents</b></summary>
|
||||||
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
|
||||||
- 🎮 [Demo](#-demo)
|
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
||||||
- 📌 [Latest Updates](#-latest-updates)
|
- 🎮 [Demo](#-demo)
|
||||||
- 🌟 [Key Features](#-key-features)
|
- 📌 [Latest Updates](#-latest-updates)
|
||||||
- 🔎 [System Architecture](#-system-architecture)
|
- 🌟 [Key Features](#-key-features)
|
||||||
- 🎬 [Get Started](#-get-started)
|
- 🔎 [System Architecture](#-system-architecture)
|
||||||
- 🔧 [Configurations](#-configurations)
|
- 🎬 [Get Started](#-get-started)
|
||||||
- 🛠️ [Build from source](#-build-from-source)
|
- 🔧 [Configurations](#-configurations)
|
||||||
- 🛠️ [Launch service from source](#-launch-service-from-source)
|
- 🛠️ [Build from source](#-build-from-source)
|
||||||
- 📚 [Documentation](#-documentation)
|
- 🛠️ [Launch service from source](#-launch-service-from-source)
|
||||||
- 📜 [Roadmap](#-roadmap)
|
- 📚 [Documentation](#-documentation)
|
||||||
- 🏄 [Community](#-community)
|
- 📜 [Roadmap](#-roadmap)
|
||||||
- 🙌 [Contributing](#-contributing)
|
- 🏄 [Community](#-community)
|
||||||
|
- 🙌 [Contributing](#-contributing)
|
||||||
</details>
|
|
||||||
|
</details>
|
||||||
## 💡 What is RAGFlow?
|
|
||||||
|
## 💡 What is RAGFlow?
|
||||||
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
|
||||||
|
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
||||||
## 🎮 Demo
|
|
||||||
|
## 🎮 Demo
|
||||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
</div>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||||
|
</div>
|
||||||
## 📌 Latest Updates
|
|
||||||
|
|
||||||
- 2024-07-08 Supports [Graph](./graph/README.md).
|
## 🔥 Latest Updates
|
||||||
|
|
||||||
- 2024-06-27 Supports Markdown and Docx in the Q&A parsing method. Supports extracting images from Docx files. Supports extracting tables from Markdown files.
|
- 2024-09-13 Adds search mode for knowledge base Q&A.
|
||||||
- 2024-06-14 Supports PDF in the Q&A parsing method.
|
- 2024-09-09 Adds a medical consultant agent template.
|
||||||
- 2024-06-06 Supports [Self-RAG](https://huggingface.co/papers/2310.11511), which is enabled by default in dialog settings.
|
- 2024-08-22 Support text to SQL statements through RAG.
|
||||||
- 2024-05-30 Integrates [BCE](https://github.com/netease-youdao/BCEmbedding) and [BGE](https://github.com/FlagOpen/FlagEmbedding) reranker models.
|
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
||||||
- 2024-05-28 Supports LLM Baichuan and VolcanoArk.
|
- 2024-07-23 Supports audio file parsing.
|
||||||
- 2024-05-23 Supports [RAPTOR](https://arxiv.org/html/2401.18059v1) for better text retrieval.
|
- 2024-07-08 Supports workflow based on [Graph](./agent/README.md).
|
||||||
- 2024-05-21 Supports streaming output and text chunk retrieval API.
|
- 2024-06-27 Supports Markdown and Docx in the Q&A parsing method, extracting images from Docx files, extracting tables from Markdown files.
|
||||||
- 2024-05-15 Integrates OpenAI GPT-4o.
|
- 2024-05-23 Supports [RAPTOR](https://arxiv.org/html/2401.18059v1) for better text retrieval.
|
||||||
|
|
||||||
## 🌟 Key Features
|
|
||||||
|
## 🌟 Key Features
|
||||||
### 🍭 **"Quality in, quality out"**
|
|
||||||
|
### 🍭 **"Quality in, quality out"**
|
||||||
- [Deep document understanding](./deepdoc/README.md)-based knowledge extraction from unstructured data with complicated formats.
|
|
||||||
- Finds "needle in a data haystack" of literally unlimited tokens.
|
- [Deep document understanding](./deepdoc/README.md)-based knowledge extraction from unstructured data with complicated formats.
|
||||||
|
- Finds "needle in a data haystack" of literally unlimited tokens.
|
||||||
### 🍱 **Template-based chunking**
|
|
||||||
|
### 🍱 **Template-based chunking**
|
||||||
- Intelligent and explainable.
|
|
||||||
- Plenty of template options to choose from.
|
- Intelligent and explainable.
|
||||||
|
- Plenty of template options to choose from.
|
||||||
### 🌱 **Grounded citations with reduced hallucinations**
|
|
||||||
|
### 🌱 **Grounded citations with reduced hallucinations**
|
||||||
- Visualization of text chunking to allow human intervention.
|
|
||||||
- Quick view of the key references and traceable citations to support grounded answers.
|
- Visualization of text chunking to allow human intervention.
|
||||||
|
- Quick view of the key references and traceable citations to support grounded answers.
|
||||||
### 🍔 **Compatibility with heterogeneous data sources**
|
|
||||||
|
### 🍔 **Compatibility with heterogeneous data sources**
|
||||||
- Supports Word, slides, excel, txt, images, scanned copies, structured data, web pages, and more.
|
|
||||||
|
- Supports Word, slides, excel, txt, images, scanned copies, structured data, web pages, and more.
|
||||||
### 🛀 **Automated and effortless RAG workflow**
|
|
||||||
|
### 🛀 **Automated and effortless RAG workflow**
|
||||||
- Streamlined RAG orchestration catered to both personal and large businesses.
|
|
||||||
- Configurable LLMs as well as embedding models.
|
- Streamlined RAG orchestration catered to both personal and large businesses.
|
||||||
- Multiple recall paired with fused re-ranking.
|
- Configurable LLMs as well as embedding models.
|
||||||
- Intuitive APIs for seamless integration with business.
|
- Multiple recall paired with fused re-ranking.
|
||||||
|
- Intuitive APIs for seamless integration with business.
|
||||||
## 🔎 System Architecture
|
|
||||||
|
## 🔎 System Architecture
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
</div>
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||||
|
</div>
|
||||||
## 🎬 Get Started
|
|
||||||
|
## 🎬 Get Started
|
||||||
### 📝 Prerequisites
|
|
||||||
|
### 📝 Prerequisites
|
||||||
- CPU >= 4 cores
|
|
||||||
- RAM >= 16 GB
|
- CPU >= 4 cores
|
||||||
- Disk >= 50 GB
|
- RAM >= 16 GB
|
||||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
- Disk >= 50 GB
|
||||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
|
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||||
### 🚀 Start up the server
|
|
||||||
|
### 🚀 Start up the server
|
||||||
1. Ensure `vm.max_map_count` >= 262144:
|
|
||||||
|
1. Ensure `vm.max_map_count` >= 262144:
|
||||||
> To check the value of `vm.max_map_count`:
|
|
||||||
>
|
> To check the value of `vm.max_map_count`:
|
||||||
> ```bash
|
>
|
||||||
> $ sysctl vm.max_map_count
|
> ```bash
|
||||||
> ```
|
> $ sysctl vm.max_map_count
|
||||||
>
|
> ```
|
||||||
> Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
>
|
||||||
>
|
> Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||||
> ```bash
|
>
|
||||||
> # In this case, we set it to 262144:
|
> ```bash
|
||||||
> $ sudo sysctl -w vm.max_map_count=262144
|
> # In this case, we set it to 262144:
|
||||||
> ```
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
>
|
> ```
|
||||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
>
|
||||||
>
|
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||||
> ```bash
|
>
|
||||||
> vm.max_map_count=262144
|
> ```bash
|
||||||
> ```
|
> vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
2. Clone the repo:
|
|
||||||
|
2. Clone the repo:
|
||||||
```bash
|
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
```bash
|
||||||
```
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
```
|
||||||
3. Build the pre-built Docker images and start up the server:
|
|
||||||
|
3. Build the pre-built Docker images and start up the server:
|
||||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.8.0`, before running the following commands.
|
|
||||||
|
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.11.0`, before running the following commands.
|
||||||
```bash
|
|
||||||
$ cd ragflow/docker
|
```bash
|
||||||
$ chmod +x ./entrypoint.sh
|
$ cd ragflow/docker
|
||||||
$ docker compose up -d
|
$ chmod +x ./entrypoint.sh
|
||||||
```
|
$ docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
> The core image is about 9 GB in size and may take a while to load.
|
|
||||||
|
> The core image is about 9 GB in size and may take a while to load.
|
||||||
4. Check the server status after having the server up and running:
|
|
||||||
|
4. Check the server status after having the server up and running:
|
||||||
```bash
|
|
||||||
$ docker logs -f ragflow-server
|
```bash
|
||||||
```
|
$ docker logs -f ragflow-server
|
||||||
|
```
|
||||||
_The following output confirms a successful launch of the system:_
|
|
||||||
|
_The following output confirms a successful launch of the system:_
|
||||||
```bash
|
|
||||||
____ ______ __
|
```bash
|
||||||
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
____ ______ __
|
||||||
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||||
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||||
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
/____/
|
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||||
|
/____/
|
||||||
* Running on all addresses (0.0.0.0)
|
|
||||||
* Running on http://127.0.0.1:9380
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://x.x.x.x:9380
|
* Running on http://127.0.0.1:9380
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
* Running on http://x.x.x.x:9380
|
||||||
```
|
INFO:werkzeug:Press CTRL+C to quit
|
||||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anomaly` error because, at that moment, your RAGFlow may not be fully initialized.
|
```
|
||||||
|
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network abnormal` error because, at that moment, your RAGFlow may not be fully initialized.
|
||||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
|
||||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||||
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update the `API_KEY` field with the corresponding API key.
|
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||||
|
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update the `API_KEY` field with the corresponding API key.
|
||||||
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
|
||||||
|
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
||||||
_The show is now on!_
|
|
||||||
|
_The show is now on!_
|
||||||
## 🔧 Configurations
|
|
||||||
|
## 🔧 Configurations
|
||||||
When it comes to system configurations, you will need to manage the following files:
|
|
||||||
|
When it comes to system configurations, you will need to manage the following files:
|
||||||
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and `MINIO_PASSWORD`.
|
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml): Configures the back-end services.
|
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and `MINIO_PASSWORD`.
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): The system relies on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
- [service_conf.yaml](./docker/service_conf.yaml): Configures the back-end services.
|
||||||
|
- [docker-compose.yml](./docker/docker-compose.yml): The system relies on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
||||||
You must ensure that changes to the [.env](./docker/.env) file are in line with what are in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
|
||||||
|
You must ensure that changes to the [.env](./docker/.env) file are in line with what are in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
||||||
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service configurations, and you are REQUIRED to ensure that all environment settings listed in the [./docker/README](./docker/README.md) file are aligned with the corresponding configurations in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
|
||||||
|
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service configurations, and you are REQUIRED to ensure that all environment settings listed in the [./docker/README](./docker/README.md) file are aligned with the corresponding configurations in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
||||||
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80` to `<YOUR_SERVING_PORT>:80`.
|
|
||||||
|
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80` to `<YOUR_SERVING_PORT>:80`.
|
||||||
> Updates to all system configurations require a system reboot to take effect:
|
|
||||||
>
|
> Updates to all system configurations require a system reboot to take effect:
|
||||||
> ```bash
|
>
|
||||||
> $ docker-compose up -d
|
> ```bash
|
||||||
> ```
|
> $ docker-compose up -d
|
||||||
|
> ```
|
||||||
## 🛠️ Build from source
|
|
||||||
|
## 🛠️ Build from source
|
||||||
To build the Docker images from source:
|
|
||||||
|
To build the Docker images from source:
|
||||||
```bash
|
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
```bash
|
||||||
$ cd ragflow/
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
$ docker build -t infiniflow/ragflow:dev .
|
$ cd ragflow/
|
||||||
$ cd ragflow/docker
|
$ docker build -t infiniflow/ragflow:dev .
|
||||||
$ chmod +x ./entrypoint.sh
|
$ cd ragflow/docker
|
||||||
$ docker compose up -d
|
$ chmod +x ./entrypoint.sh
|
||||||
```
|
$ docker compose up -d
|
||||||
|
```
|
||||||
## 🛠️ Launch service from source
|
|
||||||
|
## 🛠️ Launch service from source
|
||||||
To launch the service from source:
|
|
||||||
|
To launch the service from source:
|
||||||
1. Clone the repository:
|
|
||||||
|
1. Clone the repository:
|
||||||
```bash
|
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
```bash
|
||||||
$ cd ragflow/
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
```
|
$ cd ragflow/
|
||||||
|
```
|
||||||
2. Create a virtual environment, ensuring that Anaconda or Miniconda is installed:
|
|
||||||
|
2. Create a virtual environment, ensuring that Anaconda or Miniconda is installed:
|
||||||
```bash
|
|
||||||
$ conda create -n ragflow python=3.11.0
|
```bash
|
||||||
$ conda activate ragflow
|
$ conda create -n ragflow python=3.11.0
|
||||||
$ pip install -r requirements.txt
|
$ conda activate ragflow
|
||||||
```
|
$ pip install -r requirements.txt
|
||||||
|
```
|
||||||
```bash
|
|
||||||
# If your CUDA version is higher than 12.0, run the following additional commands:
|
```bash
|
||||||
$ pip uninstall -y onnxruntime-gpu
|
# If your CUDA version is higher than 12.0, run the following additional commands:
|
||||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
$ pip uninstall -y onnxruntime-gpu
|
||||||
```
|
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||||
|
```
|
||||||
3. Copy the entry script and configure environment variables:
|
|
||||||
|
3. Copy the entry script and configure environment variables:
|
||||||
```bash
|
|
||||||
# Get the Python path:
|
```bash
|
||||||
$ which python
|
# Get the Python path:
|
||||||
# Get the ragflow project path:
|
$ which python
|
||||||
$ pwd
|
# Get the ragflow project path:
|
||||||
```
|
$ pwd
|
||||||
|
```
|
||||||
```bash
|
|
||||||
$ cp docker/entrypoint.sh .
|
```bash
|
||||||
$ vi entrypoint.sh
|
$ cp docker/entrypoint.sh .
|
||||||
```
|
$ vi entrypoint.sh
|
||||||
|
```
|
||||||
```bash
|
|
||||||
# Adjust configurations according to your actual situation (the following two export commands are newly added):
|
```bash
|
||||||
# - Assign the result of `which python` to `PY`.
|
# Adjust configurations according to your actual situation (the following two export commands are newly added):
|
||||||
# - Assign the result of `pwd` to `PYTHONPATH`.
|
# - Assign the result of `which python` to `PY`.
|
||||||
# - Comment out `LD_LIBRARY_PATH`, if it is configured.
|
# - Assign the result of `pwd` to `PYTHONPATH`.
|
||||||
# - Optional: Add Hugging Face mirror.
|
# - Comment out `LD_LIBRARY_PATH`, if it is configured.
|
||||||
PY=${PY}
|
# - Optional: Add Hugging Face mirror.
|
||||||
export PYTHONPATH=${PYTHONPATH}
|
PY=${PY}
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export PYTHONPATH=${PYTHONPATH}
|
||||||
```
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
```
|
||||||
4. Launch the third-party services (MinIO, Elasticsearch, Redis, and MySQL):
|
|
||||||
|
4. Launch the third-party services (MinIO, Elasticsearch, Redis, and MySQL):
|
||||||
```bash
|
|
||||||
$ cd docker
|
```bash
|
||||||
$ docker compose -f docker-compose-base.yml up -d
|
$ cd docker
|
||||||
```
|
$ docker compose -f docker-compose-base.yml up -d
|
||||||
|
```
|
||||||
5. Check the configuration files, ensuring that:
|
|
||||||
|
5. Check the configuration files, ensuring that:
|
||||||
- The settings in **docker/.env** match those in **conf/service_conf.yaml**.
|
|
||||||
- The IP addresses and ports for related services in **service_conf.yaml** match the local machine IP and ports exposed by the container.
|
- The settings in **docker/.env** match those in **conf/service_conf.yaml**.
|
||||||
|
- The IP addresses and ports for related services in **service_conf.yaml** match the local machine IP and ports exposed by the container.
|
||||||
6. Launch the RAGFlow backend service:
|
|
||||||
|
6. Launch the RAGFlow backend service:
|
||||||
```bash
|
|
||||||
$ chmod +x ./entrypoint.sh
|
```bash
|
||||||
$ bash ./entrypoint.sh
|
$ chmod +x ./entrypoint.sh
|
||||||
```
|
$ bash ./entrypoint.sh
|
||||||
|
```
|
||||||
7. Launch the frontend service:
|
|
||||||
|
7. Launch the frontend service:
|
||||||
```bash
|
|
||||||
$ cd web
|
```bash
|
||||||
$ npm install --registry=https://registry.npmmirror.com --force
|
$ cd web
|
||||||
$ vim .umirc.ts
|
$ npm install --registry=https://registry.npmmirror.com --force
|
||||||
# Update proxy.target to http://127.0.0.1:9380
|
$ vim .umirc.ts
|
||||||
$ npm run dev
|
# Update proxy.target to http://127.0.0.1:9380
|
||||||
```
|
$ npm run dev
|
||||||
|
```
|
||||||
8. Deploy the frontend service:
|
|
||||||
|
8. Deploy the frontend service:
|
||||||
```bash
|
|
||||||
$ cd web
|
```bash
|
||||||
$ npm install --registry=https://registry.npmmirror.com --force
|
$ cd web
|
||||||
$ umi build
|
$ npm install --registry=https://registry.npmmirror.com --force
|
||||||
$ mkdir -p /ragflow/web
|
$ umi build
|
||||||
$ cp -r dist /ragflow/web
|
$ mkdir -p /ragflow/web
|
||||||
$ apt install nginx -y
|
$ cp -r dist /ragflow/web
|
||||||
$ cp ../docker/nginx/proxy.conf /etc/nginx
|
$ apt install nginx -y
|
||||||
$ cp ../docker/nginx/nginx.conf /etc/nginx
|
$ cp ../docker/nginx/proxy.conf /etc/nginx
|
||||||
$ cp ../docker/nginx/ragflow.conf /etc/nginx/conf.d
|
$ cp ../docker/nginx/nginx.conf /etc/nginx
|
||||||
$ systemctl start nginx
|
$ cp ../docker/nginx/ragflow.conf /etc/nginx/conf.d
|
||||||
```
|
$ systemctl start nginx
|
||||||
|
```
|
||||||
## 📚 Documentation
|
|
||||||
|
## 📚 Documentation
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||||
## 📜 Roadmap
|
|
||||||
|
## 📜 Roadmap
|
||||||
See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
|
||||||
|
See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||||
## 🏄 Community
|
|
||||||
|
## 🏄 Community
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
## 🙌 Contributing
|
|
||||||
|
## 🙌 Contributing
|
||||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community. If you would like to be a part, review our [Contribution Guidelines](./docs/references/CONTRIBUTING.md) first.
|
|
||||||
|
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community. If you would like to be a part, review our [Contribution Guidelines](./docs/references/CONTRIBUTING.md) first.
|
||||||
|
|||||||
575
README_ja.md
575
README_ja.md
@ -1,286 +1,289 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a>
|
<a href="./README_ja.md">日本語</a> |
|
||||||
</p>
|
<a href="./README_ko.md">한국어</a>
|
||||||
|
</p>
|
||||||
<p align="center">
|
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<p align="center">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
</a>
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
<a href="https://demo.ragflow.io" target="_blank">
|
</a>
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.8.0-brightgreen"
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
alt="docker pull infiniflow/ragflow:v0.8.0"></a>
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.11.0-brightgreen"
|
||||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
alt="docker pull infiniflow/ragflow:v0.11.0"></a>
|
||||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
</a>
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
</p>
|
</a>
|
||||||
|
</p>
|
||||||
<h4 align="center">
|
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<h4 align="center">
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||||
</h4>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
|
</h4>
|
||||||
## 💡 RAGFlow とは?
|
|
||||||
|
## 💡 RAGFlow とは?
|
||||||
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
|
||||||
|
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
||||||
## 🎮 Demo
|
|
||||||
|
## 🎮 Demo
|
||||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
</div>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||||
|
</div>
|
||||||
## 📌 最新情報
|
|
||||||
- 2024-07-08 [Graph](./graph/README.md) に対応しました。.
|
|
||||||
- 2024-06-27 Q&A解析方式はMarkdownファイルとDocxファイルをサポートしています。Docxファイルからの画像の抽出をサポートします。Markdownファイルからテーブルを抽出することをサポートします。
|
## 🔥 最新情報
|
||||||
- 2024-06-14 Q&A 解析メソッドは PDF ファイルをサポートしています。
|
|
||||||
- 2024-06-06 会話設定でデフォルトでチェックされている [Self-RAG](https://huggingface.co/papers/2310.11511) をサポートします。
|
- 2024-09-13 ナレッジベース Q&A の検索モードを追加しました。
|
||||||
- 2024-05-30 [BCE](https://github.com/netease-youdao/BCEmbedding) 、[BGE](https://github.com/FlagOpen/FlagEmbedding) reranker を統合。
|
- 2024-09-09 エージェントに医療相談テンプレートを追加しました。
|
||||||
- 2024-05-28 LLM BaichuanとVolcanoArkを統合しました。
|
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||||
- 2024-05-23 より良いテキスト検索のために [RAPTOR](https://arxiv.org/html/2401.18059v1) をサポート。
|
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
||||||
- 2024-05-21 ストリーミング出力とテキストチャンク取得APIをサポート。
|
- 2024-07-23 音声ファイルの解析をサポートしました。
|
||||||
- 2024-05-15 OpenAI GPT-4oを統合しました。
|
- 2024-07-08 [Graph](./agent/README.md) ベースのワークフローをサポート
|
||||||
|
- 2024-06-27 Q&A 解析メソッドで Markdown と Docx をサポートし、Docx ファイルから画像を抽出し、Markdown ファイルからテーブルを抽出します。
|
||||||
## 🌟 主な特徴
|
- 2024-05-23 より良いテキスト検索のために [RAPTOR](https://arxiv.org/html/2401.18059v1) をサポート。
|
||||||
|
|
||||||
### 🍭 **"Quality in, quality out"**
|
|
||||||
|
## 🌟 主な特徴
|
||||||
- 複雑な形式の非構造化データからの[深い文書理解](./deepdoc/README.md)ベースの知識抽出。
|
|
||||||
- 無限のトークンから"干し草の山の中の針"を見つける。
|
### 🍭 **"Quality in, quality out"**
|
||||||
|
|
||||||
### 🍱 **テンプレートベースのチャンク化**
|
- 複雑な形式の非構造化データからの[深い文書理解](./deepdoc/README.md)ベースの知識抽出。
|
||||||
|
- 無限のトークンから"干し草の山の中の針"を見つける。
|
||||||
- 知的で解釈しやすい。
|
|
||||||
- テンプレートオプションが豊富。
|
### 🍱 **テンプレートベースのチャンク化**
|
||||||
|
|
||||||
### 🌱 **ハルシネーションが軽減された根拠のある引用**
|
- 知的で解釈しやすい。
|
||||||
|
- テンプレートオプションが豊富。
|
||||||
- 可視化されたテキストチャンキング(text chunking)で人間の介入を可能にする。
|
|
||||||
- 重要な参考文献のクイックビューと、追跡可能な引用によって根拠ある答えをサポートする。
|
### 🌱 **ハルシネーションが軽減された根拠のある引用**
|
||||||
|
|
||||||
### 🍔 **多様なデータソースとの互換性**
|
- 可視化されたテキストチャンキング(text chunking)で人間の介入を可能にする。
|
||||||
|
- 重要な参考文献のクイックビューと、追跡可能な引用によって根拠ある答えをサポートする。
|
||||||
- Word、スライド、Excel、txt、画像、スキャンコピー、構造化データ、Web ページなどをサポート。
|
|
||||||
|
### 🍔 **多様なデータソースとの互換性**
|
||||||
### 🛀 **自動化された楽な RAG ワークフロー**
|
|
||||||
|
- Word、スライド、Excel、txt、画像、スキャンコピー、構造化データ、Web ページなどをサポート。
|
||||||
- 個人から大企業まで対応できる RAG オーケストレーション(orchestration)。
|
|
||||||
- カスタマイズ可能な LLM とエンベッディングモデル。
|
### 🛀 **自動化された楽な RAG ワークフロー**
|
||||||
- 複数の想起と融合された再ランク付け。
|
|
||||||
- 直感的な API によってビジネスとの統合がシームレスに。
|
- 個人から大企業まで対応できる RAG オーケストレーション(orchestration)。
|
||||||
|
- カスタマイズ可能な LLM とエンベッディングモデル。
|
||||||
## 🔎 システム構成
|
- 複数の想起と融合された再ランク付け。
|
||||||
|
- 直感的な API によってビジネスとの統合がシームレスに。
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
## 🔎 システム構成
|
||||||
</div>
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
## 🎬 初期設定
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||||
|
</div>
|
||||||
### 📝 必要条件
|
|
||||||
|
## 🎬 初期設定
|
||||||
- CPU >= 4 cores
|
|
||||||
- RAM >= 16 GB
|
### 📝 必要条件
|
||||||
- Disk >= 50 GB
|
|
||||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
- CPU >= 4 cores
|
||||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
- RAM >= 16 GB
|
||||||
|
- Disk >= 50 GB
|
||||||
### 🚀 サーバーを起動
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
|
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||||
1. `vm.max_map_count` >= 262144 であることを確認する:
|
|
||||||
|
### 🚀 サーバーを起動
|
||||||
> `vm.max_map_count` の値をチェックするには:
|
|
||||||
>
|
1. `vm.max_map_count` >= 262144 であることを確認する:
|
||||||
> ```bash
|
|
||||||
> $ sysctl vm.max_map_count
|
> `vm.max_map_count` の値をチェックするには:
|
||||||
> ```
|
>
|
||||||
>
|
> ```bash
|
||||||
> `vm.max_map_count` が 262144 より大きい値でなければリセットする。
|
> $ sysctl vm.max_map_count
|
||||||
>
|
> ```
|
||||||
> ```bash
|
>
|
||||||
> # In this case, we set it to 262144:
|
> `vm.max_map_count` が 262144 より大きい値でなければリセットする。
|
||||||
> $ sudo sysctl -w vm.max_map_count=262144
|
>
|
||||||
> ```
|
> ```bash
|
||||||
>
|
> # In this case, we set it to 262144:
|
||||||
> この変更はシステム再起動後にリセットされる。変更を恒久的なものにするには、**/etc/sysctl.conf** の `vm.max_map_count` 値を適宜追加または更新する:
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
>
|
> ```
|
||||||
> ```bash
|
>
|
||||||
> vm.max_map_count=262144
|
> この変更はシステム再起動後にリセットされる。変更を恒久的なものにするには、**/etc/sysctl.conf** の `vm.max_map_count` 値を適宜追加または更新する:
|
||||||
> ```
|
>
|
||||||
|
> ```bash
|
||||||
2. リポジトリをクローンする:
|
> vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
```bash
|
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
2. リポジトリをクローンする:
|
||||||
```
|
|
||||||
|
```bash
|
||||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
```
|
||||||
```bash
|
|
||||||
$ cd ragflow/docker
|
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||||
$ chmod +x ./entrypoint.sh
|
|
||||||
$ docker compose up -d
|
```bash
|
||||||
```
|
$ cd ragflow/docker
|
||||||
|
$ chmod +x ./entrypoint.sh
|
||||||
> 上記のコマンドを実行すると、RAGFlowの開発版dockerイメージが自動的にダウンロードされます。 特定のバージョンのDockerイメージをダウンロードして実行したい場合は、docker/.envファイルのRAGFLOW_VERSION変数を見つけて、対応するバージョンに変更してください。 例えば、RAGFLOW_VERSION=v0.8.0として、上記のコマンドを実行してください。
|
$ docker compose up -d
|
||||||
|
```
|
||||||
> コアイメージのサイズは約 9 GB で、ロードに時間がかかる場合があります。
|
|
||||||
|
> 上記のコマンドを実行すると、RAGFlowの開発版dockerイメージが自動的にダウンロードされます。 特定のバージョンのDockerイメージをダウンロードして実行したい場合は、docker/.envファイルのRAGFLOW_VERSION変数を見つけて、対応するバージョンに変更してください。 例えば、RAGFLOW_VERSION=v0.11.0として、上記のコマンドを実行してください。
|
||||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
|
||||||
|
> コアイメージのサイズは約 9 GB で、ロードに時間がかかる場合があります。
|
||||||
```bash
|
|
||||||
$ docker logs -f ragflow-server
|
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||||
```
|
|
||||||
|
```bash
|
||||||
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
$ docker logs -f ragflow-server
|
||||||
|
```
|
||||||
```bash
|
|
||||||
____ ______ __
|
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
||||||
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
|
||||||
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
```bash
|
||||||
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
____ ______ __
|
||||||
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||||
/____/
|
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||||
|
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
* Running on all addresses (0.0.0.0)
|
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||||
* Running on http://127.0.0.1:9380
|
/____/
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
* Running on all addresses (0.0.0.0)
|
||||||
```
|
* Running on http://127.0.0.1:9380
|
||||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
* Running on http://x.x.x.x:9380
|
||||||
|
INFO:werkzeug:Press CTRL+C to quit
|
||||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
```
|
||||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
|
||||||
|
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||||
|
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||||
_これで初期設定完了!ショーの開幕です!_
|
|
||||||
|
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||||
## 🔧 コンフィグ
|
|
||||||
|
_これで初期設定完了!ショーの開幕です!_
|
||||||
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
|
||||||
|
## 🔧 コンフィグ
|
||||||
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml): バックエンドのサービスを設定します。
|
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
|
||||||
|
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
||||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml](./docker/service_conf.yaml) ファイルの内容と一致していることを確認する必要があります。
|
- [service_conf.yaml](./docker/service_conf.yaml): バックエンドのサービスを設定します。
|
||||||
|
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
||||||
> [./docker/README](./docker/README.md) ファイルは環境設定とサービスコンフィグの詳細な説明を提供し、[./docker/README](./docker/README.md) ファイルに記載されている全ての環境設定が [service_conf.yaml](./docker/service_conf.yaml) ファイルの対応するコンフィグと一致していることを確認することが義務付けられています。
|
|
||||||
|
[.env](./docker/.env) ファイルの変更が [service_conf.yaml](./docker/service_conf.yaml) ファイルの内容と一致していることを確認する必要があります。
|
||||||
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
|
||||||
|
> [./docker/README](./docker/README.md) ファイルは環境設定とサービスコンフィグの詳細な説明を提供し、[./docker/README](./docker/README.md) ファイルに記載されている全ての環境設定が [service_conf.yaml](./docker/service_conf.yaml) ファイルの対応するコンフィグと一致していることを確認することが義務付けられています。
|
||||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
|
||||||
>
|
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
||||||
> ```bash
|
|
||||||
> $ docker-compose up -d
|
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||||
> ```
|
>
|
||||||
|
> ```bash
|
||||||
## 🛠️ ソースからビルドする
|
> $ docker-compose up -d
|
||||||
|
> ```
|
||||||
ソースからDockerイメージをビルドするには:
|
|
||||||
|
## 🛠️ ソースからビルドする
|
||||||
```bash
|
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
ソースからDockerイメージをビルドするには:
|
||||||
$ cd ragflow/
|
|
||||||
$ docker build -t infiniflow/ragflow:v0.8.0 .
|
```bash
|
||||||
$ cd ragflow/docker
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
$ chmod +x ./entrypoint.sh
|
$ cd ragflow/
|
||||||
$ docker compose up -d
|
$ docker build -t infiniflow/ragflow:v0.11.0 .
|
||||||
```
|
$ cd ragflow/docker
|
||||||
|
$ chmod +x ./entrypoint.sh
|
||||||
## 🛠️ ソースコードからサービスを起動する方法
|
$ docker compose up -d
|
||||||
|
```
|
||||||
ソースコードからサービスを起動する場合は、以下の手順に従ってください:
|
|
||||||
|
## 🛠️ ソースコードからサービスを起動する方法
|
||||||
1. リポジトリをクローンします
|
|
||||||
```bash
|
ソースコードからサービスを起動する場合は、以下の手順に従ってください:
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
|
||||||
$ cd ragflow/
|
1. リポジトリをクローンします
|
||||||
```
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
2. 仮想環境を作成します(AnacondaまたはMinicondaがインストールされていることを確認してください)
|
$ cd ragflow/
|
||||||
```bash
|
```
|
||||||
$ conda create -n ragflow python=3.11.0
|
|
||||||
$ conda activate ragflow
|
2. 仮想環境を作成します(AnacondaまたはMinicondaがインストールされていることを確認してください)
|
||||||
$ pip install -r requirements.txt
|
```bash
|
||||||
```
|
$ conda create -n ragflow python=3.11.0
|
||||||
CUDAのバージョンが12.0以上の場合、以下の追加コマンドを実行してください:
|
$ conda activate ragflow
|
||||||
```bash
|
$ pip install -r requirements.txt
|
||||||
$ pip uninstall -y onnxruntime-gpu
|
```
|
||||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
CUDAのバージョンが12.0以上の場合、以下の追加コマンドを実行してください:
|
||||||
```
|
```bash
|
||||||
|
$ pip uninstall -y onnxruntime-gpu
|
||||||
3. エントリースクリプトをコピーし、環境変数を設定します
|
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||||
```bash
|
```
|
||||||
$ cp docker/entrypoint.sh .
|
|
||||||
$ vi entrypoint.sh
|
3. エントリースクリプトをコピーし、環境変数を設定します
|
||||||
```
|
```bash
|
||||||
以下のコマンドでPythonのパスとragflowプロジェクトのパスを取得します:
|
$ cp docker/entrypoint.sh .
|
||||||
```bash
|
$ vi entrypoint.sh
|
||||||
$ which python
|
```
|
||||||
$ pwd
|
以下のコマンドで Python のパスとragflowプロジェクトのパスを取得します:
|
||||||
```
|
```bash
|
||||||
|
$ which python
|
||||||
`which python`の出力を`PY`の値として、`pwd`の出力を`PYTHONPATH`の値として設定します。
|
$ pwd
|
||||||
|
```
|
||||||
`LD_LIBRARY_PATH`が既に設定されている場合は、コメントアウトできます。
|
|
||||||
|
`which python` の出力を `PY` の値として、`pwd` の出力を `PYTHONPATH` の値として設定します。
|
||||||
```bash
|
|
||||||
# 実際の状況に応じて設定を調整してください。以下の二つのexportは新たに追加された設定です
|
`LD_LIBRARY_PATH` が既に設定されている場合は、コメントアウトできます。
|
||||||
PY=${PY}
|
|
||||||
export PYTHONPATH=${PYTHONPATH}
|
```bash
|
||||||
# オプション:Hugging Faceミラーを追加
|
# 実際の状況に応じて設定を調整してください。以下の二つの export は新たに追加された設定です
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
PY=${PY}
|
||||||
```
|
export PYTHONPATH=${PYTHONPATH}
|
||||||
|
# オプション:Hugging Face ミラーを追加
|
||||||
4. 基本サービスを起動します
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```bash
|
```
|
||||||
$ cd docker
|
|
||||||
$ docker compose -f docker-compose-base.yml up -d
|
4. 基本サービスを起動します
|
||||||
```
|
```bash
|
||||||
|
$ cd docker
|
||||||
5. 設定ファイルを確認します
|
$ docker compose -f docker-compose-base.yml up -d
|
||||||
**docker/.env**内の設定が**conf/service_conf.yaml**内の設定と一致していることを確認してください。**service_conf.yaml**内の関連サービスのIPアドレスとポートは、ローカルマシンのIPアドレスとコンテナが公開するポートに変更する必要があります。
|
```
|
||||||
|
|
||||||
6. サービスを起動します
|
5. 設定ファイルを確認します
|
||||||
```bash
|
**docker/.env** 内の設定が**conf/service_conf.yaml**内の設定と一致していることを確認してください。**service_conf.yaml**内の関連サービスのIPアドレスとポートは、ローカルマシンのIPアドレスとコンテナが公開するポートに変更する必要があります。
|
||||||
$ chmod +x ./entrypoint.sh
|
|
||||||
$ bash ./entrypoint.sh
|
6. サービスを起動します
|
||||||
```
|
```bash
|
||||||
|
$ chmod +x ./entrypoint.sh
|
||||||
## 📚 ドキュメンテーション
|
$ bash ./entrypoint.sh
|
||||||
|
```
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
## 📚 ドキュメンテーション
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||||
## 📜 ロードマップ
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||||
[RAGFlow ロードマップ 2024](https://github.com/infiniflow/ragflow/issues/162) を参照
|
|
||||||
|
## 📜 ロードマップ
|
||||||
## 🏄 コミュニティ
|
|
||||||
|
[RAGFlow ロードマップ 2024](https://github.com/infiniflow/ragflow/issues/162) を参照
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
## 🏄 コミュニティ
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
|
||||||
|
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||||
## 🙌 コントリビュート
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず[コントリビューションガイド](./docs/references/CONTRIBUTING.md)をご覧ください。
|
|
||||||
|
## 🙌 コントリビュート
|
||||||
|
|
||||||
|
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](./docs/references/CONTRIBUTING.md)をご覧ください。
|
||||||
|
|||||||
325
README_ko.md
Normal file
325
README_ko.md
Normal file
@ -0,0 +1,325 @@
|
|||||||
|
<div align="center">
|
||||||
|
<a href="https://demo.ragflow.io/">
|
||||||
|
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="./README.md">English</a> |
|
||||||
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_ja.md">日本語</a> |
|
||||||
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.11.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.11.0"></a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h4 align="center">
|
||||||
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||||
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
|
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||||
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
|
</h4>
|
||||||
|
|
||||||
|
|
||||||
|
## 💡 RAGFlow란?
|
||||||
|
|
||||||
|
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 🎮 데모
|
||||||
|
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
## 🔥 업데이트
|
||||||
|
|
||||||
|
- 2024-09-13 지식베이스 Q&A 검색 모드를 추가합니다.
|
||||||
|
|
||||||
|
- 2024-09-09 Agent에 의료상담 템플릿을 추가하였습니다.
|
||||||
|
|
||||||
|
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||||
|
|
||||||
|
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
||||||
|
|
||||||
|
- 2024-07-23: 오디오 파일 분석을 지원합니다.
|
||||||
|
|
||||||
|
- 2024-07-08: [Graph](./agent/README.md)를 기반으로 한 워크플로우를 지원합니다.
|
||||||
|
|
||||||
|
- 2024-06-27 Q&A 구문 분석 방식에서 Markdown 및 Docx를 지원하고, Docx 파일에서 이미지 추출, Markdown 파일에서 테이블 추출을 지원합니다.
|
||||||
|
|
||||||
|
- 2024-05-23: 더 나은 텍스트 검색을 위해 [RAPTOR](https://arxiv.org/html/2401.18059v1)를 지원합니다.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## 🌟 주요 기능
|
||||||
|
|
||||||
|
### 🍭 **"Quality in, quality out"**
|
||||||
|
- [심층 문서 이해](./deepdoc/README.md)를 기반으로 복잡한 형식의 비정형 데이터에서 지식을 추출합니다.
|
||||||
|
- 문자 그대로 무한한 토큰에서 "데이터 속의 바늘"을 찾아냅니다.
|
||||||
|
|
||||||
|
### 🍱 **템플릿 기반의 chunking**
|
||||||
|
- 똑똑하고 설명 가능한 방식.
|
||||||
|
- 다양한 템플릿 옵션을 제공합니다.
|
||||||
|
|
||||||
|
|
||||||
|
### 🌱 **할루시네이션을 줄인 신뢰할 수 있는 인용**
|
||||||
|
- 텍스트 청킹을 시각화하여 사용자가 개입할 수 있도록 합니다.
|
||||||
|
- 중요한 참고 자료와 추적 가능한 인용을 빠르게 확인하여 신뢰할 수 있는 답변을 지원합니다.
|
||||||
|
|
||||||
|
|
||||||
|
### 🍔 **다른 종류의 데이터 소스와의 호환성**
|
||||||
|
- 워드, 슬라이드, 엑셀, 텍스트 파일, 이미지, 스캔본, 구조화된 데이터, 웹 페이지 등을 지원합니다.
|
||||||
|
|
||||||
|
### 🛀 **자동화되고 손쉬운 RAG 워크플로우**
|
||||||
|
- 개인 및 대규모 비즈니스에 맞춘 효율적인 RAG 오케스트레이션.
|
||||||
|
- 구성 가능한 LLM 및 임베딩 모델.
|
||||||
|
- 다중 검색과 결합된 re-ranking.
|
||||||
|
- 비즈니스와 원활하게 통합할 수 있는 직관적인 API.
|
||||||
|
|
||||||
|
|
||||||
|
## 🔎 시스템 아키텍처
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🎬 시작하기
|
||||||
|
### 📝 사전 준비 사항
|
||||||
|
- CPU >= 4 cores
|
||||||
|
- RAM >= 16 GB
|
||||||
|
- Disk >= 50 GB
|
||||||
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
|
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치]((https://docs.docker.com/engine/install/))를 참조하세요.
|
||||||
|
|
||||||
|
|
||||||
|
### 🚀 서버 시작하기
|
||||||
|
|
||||||
|
1. `vm.max_map_count`가 262144 이상인지 확인하세요:
|
||||||
|
> `vm.max_map_count`의 값을 아래 명령어를 통해 확인하세요:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> $ sysctl vm.max_map_count
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> 만약 `vm.max_map_count` 이 262144 보다 작다면 값을 쟈설정하세요.
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> # 이 경우에 262144로 설정했습니다.:
|
||||||
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> 이 변경 사항은 시스템 재부팅 후에 초기화됩니다. 변경 사항을 영구적으로 적용하려면 /etc/sysctl.conf 파일에 vm.max_map_count 값을 추가하거나 업데이트하세요:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
|
||||||
|
2. 레포지토리를 클론하세요:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||||
|
|
||||||
|
> 다음 명령어를 실행하면 *dev* 버전의 RAGFlow Docker 이미지가 자동으로 다운로드됩니다. 특정 Docker 버전을 다운로드하고 실행하려면, **docker/.env** 파일에서 `RAGFLOW_VERSION`을 원하는 버전으로 업데이트한 후, 예를 들어 `RAGFLOW_VERSION=v0.11.0`로 업데이트 한 뒤, 다음 명령어를 실행하세요.
|
||||||
|
```bash
|
||||||
|
$ cd ragflow/docker
|
||||||
|
$ chmod +x ./entrypoint.sh
|
||||||
|
$ docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
> 기본 이미지는 약 9GB 크기이며 로드하는 데 시간이 걸릴 수 있습니다.
|
||||||
|
|
||||||
|
|
||||||
|
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker logs -f ragflow-server
|
||||||
|
```
|
||||||
|
|
||||||
|
_다음 출력 결과로 시스템이 성공적으로 시작되었음을 확인합니다:_
|
||||||
|
|
||||||
|
```bash
|
||||||
|
____ ______ __
|
||||||
|
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||||
|
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||||
|
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
|
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||||
|
/____/
|
||||||
|
|
||||||
|
* Running on all addresses (0.0.0.0)
|
||||||
|
* Running on http://127.0.0.1:9380
|
||||||
|
* Running on http://x.x.x.x:9380
|
||||||
|
INFO:werkzeug:Press CTRL+C to quit
|
||||||
|
```
|
||||||
|
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network abnormal` 오류가 발생할 수 있습니다.
|
||||||
|
|
||||||
|
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||||
|
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||||
|
6. [service_conf.yaml](./docker/service_conf.yaml) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||||
|
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||||
|
|
||||||
|
_이제 쇼가 시작됩니다!_
|
||||||
|
|
||||||
|
## 🔧 설정
|
||||||
|
|
||||||
|
시스템 설정과 관련하여 다음 파일들을 관리해야 합니다:
|
||||||
|
|
||||||
|
- [.env](./docker/.env): `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, `MINIO_PASSWORD`와 같은 시스템의 기본 설정을 포함합니다.
|
||||||
|
- [service_conf.yaml](./docker/service_conf.yaml): 백엔드 서비스를 구성합니다.
|
||||||
|
- [docker-compose.yml](./docker/docker-compose.yml): 시스템은 [docker-compose.yml](./docker/docker-compose.yml)을 사용하여 시작됩니다.
|
||||||
|
|
||||||
|
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 내용과 일치하도록 해야 합니다.
|
||||||
|
|
||||||
|
> [./docker/README](./docker/README.md) 파일에는 환경 설정과 서비스 구성에 대한 자세한 설명이 있으며, [./docker/README](./docker/README.md) 파일에 나열된 모든 환경 설정이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 해당 구성과 일치하도록 해야 합니다.
|
||||||
|
|
||||||
|
기본 HTTP 서비스 포트(80)를 업데이트하려면 [docker-compose.yml](./docker/docker-compose.yml) 파일에서 `80:80`을 `<YOUR_SERVING_PORT>:80`으로 변경하세요.
|
||||||
|
|
||||||
|
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> $ docker-compose up -d
|
||||||
|
> ```
|
||||||
|
|
||||||
|
## 🛠️ 소스에서 빌드하기
|
||||||
|
|
||||||
|
Docker 이미지를 소스에서 빌드하려면:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
$ cd ragflow/
|
||||||
|
$ docker build -t infiniflow/ragflow:dev .
|
||||||
|
$ cd ragflow/docker
|
||||||
|
$ chmod +x ./entrypoint.sh
|
||||||
|
$ docker compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## 🛠️ 소스에서 서비스 시작하기
|
||||||
|
|
||||||
|
서비스를 소스에서 시작하려면:
|
||||||
|
|
||||||
|
1. 레포지토리를 클론하세요:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
$ cd ragflow/
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 가상 환경을 생성하고, Anaconda 또는 Miniconda가 설치되어 있는지 확인하세요:
|
||||||
|
```bash
|
||||||
|
$ conda create -n ragflow python=3.11.0
|
||||||
|
$ conda activate ragflow
|
||||||
|
$ pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# CUDA 버전이 12.0보다 높은 경우, 다음 명령어를 추가로 실행하세요:
|
||||||
|
$ pip uninstall -y onnxruntime-gpu
|
||||||
|
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||||
|
```
|
||||||
|
|
||||||
|
3. 진입 스크립트를 복사하고 환경 변수를 설정하세요:
|
||||||
|
```bash
|
||||||
|
# 파이썬 경로를 받아옵니다:
|
||||||
|
$ which python
|
||||||
|
# RAGFlow 프로젝트 경로를 받아옵니다:
|
||||||
|
$ pwd
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cp docker/entrypoint.sh .
|
||||||
|
$ vi entrypoint.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 실제 상황에 맞게 설정 조정하기 (다음 두 개의 export 명령어는 새로 추가되었습니다):
|
||||||
|
# - `which python`의 결과를 `PY`에 할당합니다.
|
||||||
|
# - `pwd`의 결과를 `PYTHONPATH`에 할당합니다.
|
||||||
|
# - `LD_LIBRARY_PATH`가 설정되어 있는 경우 주석 처리합니다.
|
||||||
|
# - 선택 사항: Hugging Face 미러 추가.
|
||||||
|
PY=${PY}
|
||||||
|
export PYTHONPATH=${PYTHONPATH}
|
||||||
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
```
|
||||||
|
|
||||||
|
4. 다른 서비스(MinIO, Elasticsearch, Redis, MySQL)를 시작하세요:
|
||||||
|
```bash
|
||||||
|
$ cd docker
|
||||||
|
$ docker compose -f docker-compose-base.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
5. 설정 파일을 확인하여 다음 사항을 확인하세요:
|
||||||
|
- **docker/.env**의 설정이 **conf/service_conf.yaml**의 설정과 일치하는지 확인합니다.
|
||||||
|
- **service_conf.yaml**의 관련 서비스에 대한 IP 주소와 포트가 로컬 머신의 IP 주소와 컨테이너에서 노출된 포트와 일치하는지 확인합니다.
|
||||||
|
|
||||||
|
|
||||||
|
6. RAGFlow 백엔드 서비스를 시작합니다:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ chmod +x ./entrypoint.sh
|
||||||
|
$ bash ./entrypoint.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
7. 프론트엔드 서비스를 시작합니다:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd web
|
||||||
|
$ npm install --registry=https://registry.npmmirror.com --force
|
||||||
|
$ vim .umirc.ts
|
||||||
|
# proxy.target을 http://127.0.0.1:9380로 업데이트합니다.
|
||||||
|
$ npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
8. 프론트엔드 서비스를 배포합니다:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd web
|
||||||
|
$ npm install --registry=https://registry.npmmirror.com --force
|
||||||
|
$ umi build
|
||||||
|
$ mkdir -p /ragflow/web
|
||||||
|
$ cp -r dist /ragflow/web
|
||||||
|
$ apt install nginx -y
|
||||||
|
$ cp ../docker/nginx/proxy.conf /etc/nginx
|
||||||
|
$ cp ../docker/nginx/nginx.conf /etc/nginx
|
||||||
|
$ cp ../docker/nginx/ragflow.conf /etc/nginx/conf.d
|
||||||
|
$ systemctl start nginx
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📚 문서
|
||||||
|
|
||||||
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||||
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
|
## 📜 로드맵
|
||||||
|
|
||||||
|
[RAGFlow 로드맵 2024](https://github.com/infiniflow/ragflow/issues/162)을 확인하세요.
|
||||||
|
|
||||||
|
## 🏄 커뮤니티
|
||||||
|
|
||||||
|
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||||
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
## 🙌 컨트리뷰션
|
||||||
|
|
||||||
|
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](./docs/references/CONTRIBUTING.md)을 검토해 주세요.
|
||||||
53
README_zh.md
53
README_zh.md
@ -7,7 +7,8 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a>
|
<a href="./README_ja.md">日本語</a> |
|
||||||
|
<a href="./README_ko.md">한국어</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
@ -17,7 +18,7 @@
|
|||||||
<a href="https://demo.ragflow.io" target="_blank">
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.8.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.8.0"></a>
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.11.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.11.0"></a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
</a>
|
</a>
|
||||||
@ -40,20 +41,20 @@
|
|||||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 📌 近期更新
|
## 🔥 近期更新
|
||||||
|
|
||||||
- 2024-07-08 支持 [Graph](./graph/README.md)。
|
- 2024-09-13 增加知识库问答搜索模式。
|
||||||
- 2024-06-27 Q&A 解析方式支持 Markdown 文件和 Docx 文件。支持提取出 Docx 文件中的图片。支持提取出 Markdown 文件中的表格。
|
- 2024-09-09 在 Agent 中加入医疗问诊模板。
|
||||||
- 2024-06-14 Q&A 解析方式支持 PDF 文件。
|
- 2024-08-22 支持用RAG技术实现从自然语言到SQL语句的转换。
|
||||||
- 2024-06-06 支持 [Self-RAG](https://huggingface.co/papers/2310.11511) ,在对话设置里面默认勾选。
|
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
||||||
- 2024-05-30 集成 [BCE](https://github.com/netease-youdao/BCEmbedding) 和 [BGE](https://github.com/FlagOpen/FlagEmbedding) 重排序模型。
|
- 2024-07-23 支持解析音频文件。
|
||||||
- 2024-05-28 集成大模型 Baichuan 和火山方舟。
|
- 2024-07-08 支持 Agentic RAG: 基于 [Graph](./agent/README.md) 的工作流。
|
||||||
|
- 2024-06-27 Q&A 解析方式支持 Markdown 文件和 Docx 文件,支持提取出 Docx 文件中的图片和 Markdown 文件中的表格。
|
||||||
- 2024-05-23 实现 [RAPTOR](https://arxiv.org/html/2401.18059v1) 提供更好的文本检索。
|
- 2024-05-23 实现 [RAPTOR](https://arxiv.org/html/2401.18059v1) 提供更好的文本检索。
|
||||||
- 2024-05-21 支持流式结果输出和文本块获取API。
|
|
||||||
- 2024-05-15 集成大模型 OpenAI GPT-4o。
|
|
||||||
|
|
||||||
## 🌟 主要功能
|
## 🌟 主要功能
|
||||||
|
|
||||||
@ -136,7 +137,7 @@
|
|||||||
$ docker compose -f docker-compose-CN.yml up -d
|
$ docker compose -f docker-compose-CN.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> 请注意,运行上述命令会自动下载 RAGFlow 的开发版本 docker 镜像。如果你想下载并运行特定版本的 docker 镜像,请在 docker/.env 文件中找到 RAGFLOW_VERSION 变量,将其改为对应版本。例如 RAGFLOW_VERSION=v0.8.0,然后运行上述命令。
|
> 请注意,运行上述命令会自动下载 RAGFlow 的开发版本 docker 镜像。如果你想下载并运行特定版本的 docker 镜像,请在 docker/.env 文件中找到 RAGFLOW_VERSION 变量,将其改为对应版本。例如 RAGFLOW_VERSION=v0.11.0,然后运行上述命令。
|
||||||
|
|
||||||
> 核心镜像文件大约 9 GB,可能需要一定时间拉取。请耐心等待。
|
> 核心镜像文件大约 9 GB,可能需要一定时间拉取。请耐心等待。
|
||||||
|
|
||||||
@ -161,7 +162,7 @@
|
|||||||
* Running on http://x.x.x.x:9380
|
* Running on http://x.x.x.x:9380
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
INFO:werkzeug:Press CTRL+C to quit
|
||||||
```
|
```
|
||||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anomaly` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network abnormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||||
|
|
||||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||||
@ -198,7 +199,7 @@
|
|||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
$ cd ragflow/
|
$ cd ragflow/
|
||||||
$ docker build -t infiniflow/ragflow:v0.8.0 .
|
$ docker build -t infiniflow/ragflow:v0.11.0 .
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
$ chmod +x ./entrypoint.sh
|
$ chmod +x ./entrypoint.sh
|
||||||
$ docker compose up -d
|
$ docker compose up -d
|
||||||
@ -209,24 +210,27 @@ $ docker compose up -d
|
|||||||
如需从源码启动服务,请参考以下步骤:
|
如需从源码启动服务,请参考以下步骤:
|
||||||
|
|
||||||
1. 克隆仓库
|
1. 克隆仓库
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
$ cd ragflow/
|
$ cd ragflow/
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 创建虚拟环境(确保已安装 Anaconda 或 Miniconda)
|
2. 创建虚拟环境(确保已安装 Anaconda 或 Miniconda)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ conda create -n ragflow python=3.11.0
|
$ conda create -n ragflow python=3.11.0
|
||||||
$ conda activate ragflow
|
$ conda activate ragflow
|
||||||
$ pip install -r requirements.txt
|
$ pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
如果cuda > 12.0,需额外执行以下命令:
|
如果 cuda > 12.0,需额外执行以下命令:
|
||||||
```bash
|
```bash
|
||||||
$ pip uninstall -y onnxruntime-gpu
|
$ pip uninstall -y onnxruntime-gpu
|
||||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||||
```
|
```
|
||||||
|
|
||||||
3. 拷贝入口脚本并配置环境变量
|
3. 拷贝入口脚本并配置环境变量
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cp docker/entrypoint.sh .
|
$ cp docker/entrypoint.sh .
|
||||||
$ vi entrypoint.sh
|
$ vi entrypoint.sh
|
||||||
@ -237,19 +241,20 @@ $ which python
|
|||||||
$ pwd
|
$ pwd
|
||||||
```
|
```
|
||||||
|
|
||||||
将上述`which python`的输出作为`PY`的值,将`pwd`的输出作为`PYTHONPATH`的值。
|
将上述 `which python` 的输出作为 `PY` 的值,将 `pwd` 的输出作为 `PYTHONPATH` 的值。
|
||||||
|
|
||||||
`LD_LIBRARY_PATH`如果环境已经配置好,可以注释掉。
|
`LD_LIBRARY_PATH` 如果环境已经配置好,可以注释掉。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 此处配置需要按照实际情况调整,两个export为新增配置
|
# 此处配置需要按照实际情况调整,两个 export 为新增配置
|
||||||
PY=${PY}
|
PY=${PY}
|
||||||
export PYTHONPATH=${PYTHONPATH}
|
export PYTHONPATH=${PYTHONPATH}
|
||||||
# 可选:添加Hugging Face镜像
|
# 可选:添加 Hugging Face 镜像
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
4. 启动基础服务
|
4. 启动基础服务
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd docker
|
$ cd docker
|
||||||
$ docker compose -f docker-compose-base.yml up -d
|
$ docker compose -f docker-compose-base.yml up -d
|
||||||
@ -259,11 +264,14 @@ $ docker compose -f docker-compose-base.yml up -d
|
|||||||
确保**docker/.env**中的配置与**conf/service_conf.yaml**中配置一致, **service_conf.yaml**中相关服务的IP地址与端口应该改成本机IP地址及容器映射出来的端口。
|
确保**docker/.env**中的配置与**conf/service_conf.yaml**中配置一致, **service_conf.yaml**中相关服务的IP地址与端口应该改成本机IP地址及容器映射出来的端口。
|
||||||
|
|
||||||
6. 启动服务
|
6. 启动服务
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ chmod +x ./entrypoint.sh
|
$ chmod +x ./entrypoint.sh
|
||||||
$ bash ./entrypoint.sh
|
$ bash ./entrypoint.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
7. 启动WebUI服务
|
7. 启动WebUI服务
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd web
|
$ cd web
|
||||||
$ npm install --registry=https://registry.npmmirror.com --force
|
$ npm install --registry=https://registry.npmmirror.com --force
|
||||||
@ -273,6 +281,7 @@ $ npm run dev
|
|||||||
```
|
```
|
||||||
|
|
||||||
8. 部署WebUI服务
|
8. 部署WebUI服务
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd web
|
$ cd web
|
||||||
$ npm install --registry=https://registry.npmmirror.com --force
|
$ npm install --registry=https://registry.npmmirror.com --force
|
||||||
@ -304,7 +313,11 @@ $ systemctl start nginx
|
|||||||
|
|
||||||
## 🙌 贡献指南
|
## 🙌 贡献指南
|
||||||
|
|
||||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的[贡献者指南](./docs/references/CONTRIBUTING.md) 。
|
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](./docs/references/CONTRIBUTING.md) 。
|
||||||
|
|
||||||
|
## 🤝 商务合作
|
||||||
|
|
||||||
|
- [预约咨询](https://aao615odquw.feishu.cn/share/base/form/shrcnjw7QleretCLqh1nuPo1xxh)
|
||||||
|
|
||||||
## 👥 加入社区
|
## 👥 加入社区
|
||||||
|
|
||||||
|
|||||||
@ -18,7 +18,7 @@ main
|
|||||||
### Actual behavior
|
### Actual behavior
|
||||||
|
|
||||||
The restricted_loads function at [api/utils/__init__.py#L215](https://github.com/infiniflow/ragflow/blob/main/api/utils/__init__.py#L215) is still vulnerable leading via code execution.
|
The restricted_loads function at [api/utils/__init__.py#L215](https://github.com/infiniflow/ragflow/blob/main/api/utils/__init__.py#L215) is still vulnerable leading via code execution.
|
||||||
The main reson is that numpy module has a numpy.f2py.diagnose.run_command function directly execute commands, but the restricted_loads function allows users import functions in module numpy.
|
The main reason is that numpy module has a numpy.f2py.diagnose.run_command function directly execute commands, but the restricted_loads function allows users import functions in module numpy.
|
||||||
|
|
||||||
|
|
||||||
### Steps to reproduce
|
### Steps to reproduce
|
||||||
|
|||||||
@ -22,9 +22,9 @@ from functools import partial
|
|||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from graph.component import component_class
|
from agent.component import component_class
|
||||||
from graph.component.base import ComponentBase
|
from agent.component.base import ComponentBase
|
||||||
from graph.settings import flow_logger, DEBUG
|
from agent.settings import flow_logger, DEBUG
|
||||||
|
|
||||||
|
|
||||||
class Canvas(ABC):
|
class Canvas(ABC):
|
||||||
@ -188,14 +188,19 @@ class Canvas(ABC):
|
|||||||
def prepare2run(cpns):
|
def prepare2run(cpns):
|
||||||
nonlocal ran, ans
|
nonlocal ran, ans
|
||||||
for c in cpns:
|
for c in cpns:
|
||||||
|
if self.path[-1] and c == self.path[-1][-1]: continue
|
||||||
cpn = self.components[c]["obj"]
|
cpn = self.components[c]["obj"]
|
||||||
if cpn.component_name == "Answer":
|
if cpn.component_name == "Answer":
|
||||||
self.answer.append(c)
|
self.answer.append(c)
|
||||||
else:
|
else:
|
||||||
if DEBUG: print("RUN: ", c)
|
if DEBUG: print("RUN: ", c)
|
||||||
|
if cpn.component_name == "Generate":
|
||||||
|
cpids = cpn.get_dependent_components()
|
||||||
|
if any([c not in self.path[-1] for c in cpids]):
|
||||||
|
continue
|
||||||
ans = cpn.run(self.history, **kwargs)
|
ans = cpn.run(self.history, **kwargs)
|
||||||
self.path[-1].append(c)
|
self.path[-1].append(c)
|
||||||
ran += 1
|
ran += 1
|
||||||
|
|
||||||
prepare2run(self.components[self.path[-2][-1]]["downstream"])
|
prepare2run(self.components[self.path[-2][-1]]["downstream"])
|
||||||
while 0 <= ran < len(self.path[-1]):
|
while 0 <= ran < len(self.path[-1]):
|
||||||
@ -220,6 +225,7 @@ class Canvas(ABC):
|
|||||||
prepare2run([p])
|
prepare2run([p])
|
||||||
break
|
break
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
break
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -231,6 +237,7 @@ class Canvas(ABC):
|
|||||||
prepare2run([p])
|
prepare2run([p])
|
||||||
break
|
break
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
break
|
||||||
|
|
||||||
if self.answer:
|
if self.answer:
|
||||||
cpn_id = self.answer[0]
|
cpn_id = self.answer[0]
|
||||||
@ -253,7 +260,7 @@ class Canvas(ABC):
|
|||||||
|
|
||||||
def get_history(self, window_size):
|
def get_history(self, window_size):
|
||||||
convs = []
|
convs = []
|
||||||
for role, obj in self.history[window_size * -2:]:
|
for role, obj in self.history[(window_size + 1) * -1:]:
|
||||||
convs.append({"role": role, "content": (obj if role == "user" else
|
convs.append({"role": role, "content": (obj if role == "user" else
|
||||||
'\n'.join(pd.DataFrame(obj)['content']))})
|
'\n'.join(pd.DataFrame(obj)['content']))})
|
||||||
return convs
|
return convs
|
||||||
@ -267,7 +274,7 @@ class Canvas(ABC):
|
|||||||
def get_embedding_model(self):
|
def get_embedding_model(self):
|
||||||
return self._embed_id
|
return self._embed_id
|
||||||
|
|
||||||
def _find_loop(self, max_loops=2):
|
def _find_loop(self, max_loops=6):
|
||||||
path = self.path[-1][::-1]
|
path = self.path[-1][::-1]
|
||||||
if len(path) < 2: return False
|
if len(path) < 2: return False
|
||||||
|
|
||||||
@ -293,3 +300,6 @@ class Canvas(ABC):
|
|||||||
return pat + " => " + pat
|
return pat + " => " + pat
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def get_prologue(self):
|
||||||
|
return self.components["begin"]["obj"]._param.prologue
|
||||||
35
agent/component/__init__.py
Normal file
35
agent/component/__init__.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
import importlib
|
||||||
|
from .begin import Begin, BeginParam
|
||||||
|
from .generate import Generate, GenerateParam
|
||||||
|
from .retrieval import Retrieval, RetrievalParam
|
||||||
|
from .answer import Answer, AnswerParam
|
||||||
|
from .categorize import Categorize, CategorizeParam
|
||||||
|
from .switch import Switch, SwitchParam
|
||||||
|
from .relevant import Relevant, RelevantParam
|
||||||
|
from .message import Message, MessageParam
|
||||||
|
from .rewrite import RewriteQuestion, RewriteQuestionParam
|
||||||
|
from .keyword import KeywordExtract, KeywordExtractParam
|
||||||
|
from .baidu import Baidu, BaiduParam
|
||||||
|
from .duckduckgo import DuckDuckGo, DuckDuckGoParam
|
||||||
|
from .wikipedia import Wikipedia, WikipediaParam
|
||||||
|
from .pubmed import PubMed, PubMedParam
|
||||||
|
from .arxiv import ArXiv, ArXivParam
|
||||||
|
from .google import Google, GoogleParam
|
||||||
|
from .bing import Bing, BingParam
|
||||||
|
from .googlescholar import GoogleScholar, GoogleScholarParam
|
||||||
|
from .deepl import DeepL, DeepLParam
|
||||||
|
from .github import GitHub, GitHubParam
|
||||||
|
from .baidufanyi import BaiduFanyi, BaiduFanyiParam
|
||||||
|
from .qweather import QWeather, QWeatherParam
|
||||||
|
from .exesql import ExeSQL, ExeSQLParam
|
||||||
|
from .yahoofinance import YahooFinance, YahooFinanceParam
|
||||||
|
from .wencai import WenCai, WenCaiParam
|
||||||
|
from .jin10 import Jin10, Jin10Param
|
||||||
|
from .tushare import TuShare, TuShareParam
|
||||||
|
from .akshare import AkShare, AkShareParam
|
||||||
|
|
||||||
|
|
||||||
|
def component_class(class_name):
|
||||||
|
m = importlib.import_module("agent.component")
|
||||||
|
c = getattr(m, class_name)
|
||||||
|
return c
|
||||||
56
agent/component/akshare.py
Normal file
56
agent/component/akshare.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
import akshare as ak
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class AkShareParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the AkShare component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
|
||||||
|
|
||||||
|
class AkShare(ComponentBase, ABC):
|
||||||
|
component_name = "AkShare"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return AkShare.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
ak_res = []
|
||||||
|
stock_news_em_df = ak.stock_news_em(symbol=ans)
|
||||||
|
stock_news_em_df = stock_news_em_df.head(self._param.top_n)
|
||||||
|
ak_res = [{"content": '<a href="' + i["新闻链接"] + '">' + i["新闻标题"] + '</a>\n 新闻内容: ' + i[
|
||||||
|
"新闻内容"] + " \n发布时间:" + i["发布时间"] + " \n文章来源: " + i["文章来源"]} for index, i in stock_news_em_df.iterrows()]
|
||||||
|
except Exception as e:
|
||||||
|
return AkShare.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not ak_res:
|
||||||
|
return AkShare.be_output("")
|
||||||
|
|
||||||
|
return pd.DataFrame(ak_res)
|
||||||
@ -19,7 +19,7 @@ from functools import partial
|
|||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from graph.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class AnswerParam(ComponentParamBase):
|
class AnswerParam(ComponentParamBase):
|
||||||
@ -59,8 +59,10 @@ class Answer(ComponentBase, ABC):
|
|||||||
stream = self.get_stream_input()
|
stream = self.get_stream_input()
|
||||||
if isinstance(stream, pd.DataFrame):
|
if isinstance(stream, pd.DataFrame):
|
||||||
res = stream
|
res = stream
|
||||||
|
answer = ""
|
||||||
for ii, row in stream.iterrows():
|
for ii, row in stream.iterrows():
|
||||||
yield row.to_dict()
|
answer += row.to_dict()["content"]
|
||||||
|
yield {"content": answer}
|
||||||
else:
|
else:
|
||||||
for st in stream():
|
for st in stream():
|
||||||
res = st
|
res = st
|
||||||
69
agent/component/arxiv.py
Normal file
69
agent/component/arxiv.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import arxiv
|
||||||
|
import pandas as pd
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class ArXivParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the ArXiv component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 6
|
||||||
|
self.sort_by = 'submittedDate'
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_valid_value(self.sort_by, "ArXiv Search Sort_by",
|
||||||
|
['submittedDate', 'lastUpdatedDate', 'relevance'])
|
||||||
|
|
||||||
|
|
||||||
|
class ArXiv(ComponentBase, ABC):
|
||||||
|
component_name = "ArXiv"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return ArXiv.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
sort_choices = {"relevance": arxiv.SortCriterion.Relevance,
|
||||||
|
"lastUpdatedDate": arxiv.SortCriterion.LastUpdatedDate,
|
||||||
|
'submittedDate': arxiv.SortCriterion.SubmittedDate}
|
||||||
|
arxiv_client = arxiv.Client()
|
||||||
|
search = arxiv.Search(
|
||||||
|
query=ans,
|
||||||
|
max_results=self._param.top_n,
|
||||||
|
sort_by=sort_choices[self._param.sort_by]
|
||||||
|
)
|
||||||
|
arxiv_res = [
|
||||||
|
{"content": 'Title: ' + i.title + '\nPdf_Url: <a href="' + i.pdf_url + '"></a> \nSummary: ' + i.summary} for
|
||||||
|
i in list(arxiv_client.results(search))]
|
||||||
|
except Exception as e:
|
||||||
|
return ArXiv.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not arxiv_res:
|
||||||
|
return ArXiv.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(arxiv_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
69
agent/component/baidu.py
Normal file
69
agent/component/baidu.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import random
|
||||||
|
from abc import ABC
|
||||||
|
from functools import partial
|
||||||
|
import pandas as pd
|
||||||
|
import requests
|
||||||
|
import re
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class BaiduParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Baidu component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
|
||||||
|
|
||||||
|
class Baidu(ComponentBase, ABC):
|
||||||
|
component_name = "Baidu"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return Baidu.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
url = 'https://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||||
|
response = requests.get(url=url, headers=headers)
|
||||||
|
|
||||||
|
url_res = re.findall(r"'url': \\\"(.*?)\\\"}", response.text)
|
||||||
|
title_res = re.findall(r"'title': \\\"(.*?)\\\",\\n", response.text)
|
||||||
|
body_res = re.findall(r"\"contentText\":\"(.*?)\"", response.text)
|
||||||
|
baidu_res = [{"content": re.sub('<em>|</em>', '', '<a href="' + url + '">' + title + '</a> ' + body)} for
|
||||||
|
url, title, body in zip(url_res, title_res, body_res)]
|
||||||
|
del body_res, url_res, title_res
|
||||||
|
except Exception as e:
|
||||||
|
return Baidu.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not baidu_res:
|
||||||
|
return Baidu.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(baidu_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
|
|
||||||
99
agent/component/baidufanyi.py
Normal file
99
agent/component/baidufanyi.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import random
|
||||||
|
from abc import ABC
|
||||||
|
import requests
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from hashlib import md5
|
||||||
|
|
||||||
|
|
||||||
|
class BaiduFanyiParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the BaiduFanyi component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.appid = "xxx"
|
||||||
|
self.secret_key = "xxx"
|
||||||
|
self.trans_type = 'translate'
|
||||||
|
self.parameters = []
|
||||||
|
self.source_lang = 'auto'
|
||||||
|
self.target_lang = 'auto'
|
||||||
|
self.domain = 'finance'
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_empty(self.appid, "BaiduFanyi APPID")
|
||||||
|
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
||||||
|
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
||||||
|
self.check_valid_value(self.trans_type, "Translate domain",
|
||||||
|
['it', 'finance', 'machinery', 'senimed', 'novel', 'academic', 'aerospace', 'wiki',
|
||||||
|
'news', 'law', 'contract'])
|
||||||
|
self.check_valid_value(self.source_lang, "Source language",
|
||||||
|
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||||
|
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||||
|
'hu', 'cht', 'vie'])
|
||||||
|
self.check_valid_value(self.target_lang, "Target language",
|
||||||
|
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||||
|
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||||
|
'hu', 'cht', 'vie'])
|
||||||
|
self.check_valid_value(self.domain, "Translate field",
|
||||||
|
['it', 'finance', 'machinery', 'senimed', 'novel', 'academic', 'aerospace', 'wiki',
|
||||||
|
'news', 'law', 'contract'])
|
||||||
|
|
||||||
|
|
||||||
|
class BaiduFanyi(ComponentBase, ABC):
|
||||||
|
component_name = "BaiduFanyi"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return BaiduFanyi.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
source_lang = self._param.source_lang
|
||||||
|
target_lang = self._param.target_lang
|
||||||
|
appid = self._param.appid
|
||||||
|
salt = random.randint(32768, 65536)
|
||||||
|
secret_key = self._param.secret_key
|
||||||
|
|
||||||
|
if self._param.trans_type == 'translate':
|
||||||
|
sign = md5((appid + ans + salt + secret_key).encode('utf-8')).hexdigest()
|
||||||
|
url = 'http://api.fanyi.baidu.com/api/trans/vip/translate?' + 'q=' + ans + '&from=' + source_lang + '&to=' + target_lang + '&appid=' + appid + '&salt=' + salt + '&sign=' + sign
|
||||||
|
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||||
|
response = requests.post(url=url, headers=headers).json()
|
||||||
|
|
||||||
|
if response.get('error_code'):
|
||||||
|
BaiduFanyi.be_output("**Error**:" + response['error_msg'])
|
||||||
|
|
||||||
|
return BaiduFanyi.be_output(response['trans_result'][0]['dst'])
|
||||||
|
elif self._param.trans_type == 'fieldtranslate':
|
||||||
|
domain = self._param.domain
|
||||||
|
sign = md5((appid + ans + salt + domain + secret_key).encode('utf-8')).hexdigest()
|
||||||
|
url = 'http://api.fanyi.baidu.com/api/trans/vip/fieldtranslate?' + 'q=' + ans + '&from=' + source_lang + '&to=' + target_lang + '&appid=' + appid + '&salt=' + salt + '&domain=' + domain + '&sign=' + sign
|
||||||
|
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||||
|
response = requests.post(url=url, headers=headers).json()
|
||||||
|
|
||||||
|
if response.get('error_code'):
|
||||||
|
BaiduFanyi.be_output("**Error**:" + response['error_msg'])
|
||||||
|
|
||||||
|
return BaiduFanyi.be_output(response['trans_result'][0]['dst'])
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
BaiduFanyi.be_output("**Error**:" + str(e))
|
||||||
@ -23,8 +23,8 @@ from typing import List, Dict, Tuple, Union
|
|||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from graph import settings
|
from agent import settings
|
||||||
from graph.settings import flow_logger, DEBUG
|
from agent.settings import flow_logger, DEBUG
|
||||||
|
|
||||||
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
||||||
_DEPRECATED_PARAMS = "_deprecated_params"
|
_DEPRECATED_PARAMS = "_deprecated_params"
|
||||||
@ -35,7 +35,7 @@ _IS_RAW_CONF = "_is_raw_conf"
|
|||||||
class ComponentParamBase(ABC):
|
class ComponentParamBase(ABC):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.output_var_name = "output"
|
self.output_var_name = "output"
|
||||||
self.message_history_window_size = 4
|
self.message_history_window_size = 22
|
||||||
|
|
||||||
def set_name(self, name: str):
|
def set_name(self, name: str):
|
||||||
self._name = name
|
self._name = name
|
||||||
@ -445,6 +445,12 @@ class ComponentBase(ABC):
|
|||||||
if DEBUG: print(self.component_name, reversed_cpnts[::-1])
|
if DEBUG: print(self.component_name, reversed_cpnts[::-1])
|
||||||
for u in reversed_cpnts[::-1]:
|
for u in reversed_cpnts[::-1]:
|
||||||
if self.get_component_name(u) in ["switch"]: continue
|
if self.get_component_name(u) in ["switch"]: continue
|
||||||
|
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||||
|
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||||
|
if o is not None:
|
||||||
|
upstream_outs.append(o)
|
||||||
|
continue
|
||||||
|
if u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||||
if self.component_name.lower().find("switch") < 0 \
|
if self.component_name.lower().find("switch") < 0 \
|
||||||
and self.get_component_name(u) in ["relevant", "categorize"]:
|
and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||||
continue
|
continue
|
||||||
@ -454,13 +460,19 @@ class ComponentBase(ABC):
|
|||||||
upstream_outs.append(pd.DataFrame([{"content": c}]))
|
upstream_outs.append(pd.DataFrame([{"content": c}]))
|
||||||
break
|
break
|
||||||
break
|
break
|
||||||
if self.component_name.lower().find("answer") >= 0:
|
if self.component_name.lower().find("answer") >= 0 and self.get_component_name(u) in ["relevant"]:
|
||||||
if self.get_component_name(u) in ["relevant"]: continue
|
continue
|
||||||
|
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||||
else: upstream_outs.append(self._canvas.get_component(u)["obj"].output(allow_partial=False)[1])
|
if o is not None:
|
||||||
|
upstream_outs.append(o)
|
||||||
break
|
break
|
||||||
|
|
||||||
return pd.concat(upstream_outs, ignore_index=False)
|
if upstream_outs:
|
||||||
|
df = pd.concat(upstream_outs, ignore_index=True)
|
||||||
|
if "content" in df:
|
||||||
|
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||||
|
return df
|
||||||
|
return pd.DataFrame()
|
||||||
|
|
||||||
def get_stream_input(self):
|
def get_stream_input(self):
|
||||||
reversed_cpnts = []
|
reversed_cpnts = []
|
||||||
@ -13,11 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from graph.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class BeginParam(ComponentParamBase):
|
class BeginParam(ComponentParamBase):
|
||||||
|
|
||||||
85
agent/component/bing.py
Normal file
85
agent/component/bing.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import requests
|
||||||
|
import pandas as pd
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class BingParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Bing component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
self.channel = "Webpages"
|
||||||
|
self.api_key = "YOUR_ACCESS_KEY"
|
||||||
|
self.country = "CN"
|
||||||
|
self.language = "en"
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_valid_value(self.channel, "Bing Web Search or Bing News", ["Webpages", "News"])
|
||||||
|
self.check_empty(self.api_key, "Bing subscription key")
|
||||||
|
self.check_valid_value(self.country, "Bing Country",
|
||||||
|
['AR', 'AU', 'AT', 'BE', 'BR', 'CA', 'CL', 'DK', 'FI', 'FR', 'DE', 'HK', 'IN', 'ID',
|
||||||
|
'IT', 'JP', 'KR', 'MY', 'MX', 'NL', 'NZ', 'NO', 'CN', 'PL', 'PT', 'PH', 'RU', 'SA',
|
||||||
|
'ZA', 'ES', 'SE', 'CH', 'TW', 'TR', 'GB', 'US'])
|
||||||
|
self.check_valid_value(self.language, "Bing Languages",
|
||||||
|
['ar', 'eu', 'bn', 'bg', 'ca', 'ns', 'nt', 'hr', 'cs', 'da', 'nl', 'en', 'gb', 'et',
|
||||||
|
'fi', 'fr', 'gl', 'de', 'gu', 'he', 'hi', 'hu', 'is', 'it', 'jp', 'kn', 'ko', 'lv',
|
||||||
|
'lt', 'ms', 'ml', 'mr', 'nb', 'pl', 'br', 'pt', 'pa', 'ro', 'ru', 'sr', 'sk', 'sl',
|
||||||
|
'es', 'sv', 'ta', 'te', 'th', 'tr', 'uk', 'vi'])
|
||||||
|
|
||||||
|
|
||||||
|
class Bing(ComponentBase, ABC):
|
||||||
|
component_name = "Bing"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return Bing.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
headers = {"Ocp-Apim-Subscription-Key": self._param.api_key, 'Accept-Language': self._param.language}
|
||||||
|
params = {"q": ans, "textDecorations": True, "textFormat": "HTML", "cc": self._param.country,
|
||||||
|
"answerCount": 1, "promote": self._param.channel}
|
||||||
|
if self._param.channel == "Webpages":
|
||||||
|
response = requests.get("https://api.bing.microsoft.com/v7.0/search", headers=headers, params=params)
|
||||||
|
response.raise_for_status()
|
||||||
|
search_results = response.json()
|
||||||
|
bing_res = [{"content": '<a href="' + i["url"] + '">' + i["name"] + '</a> ' + i["snippet"]} for i in
|
||||||
|
search_results["webPages"]["value"]]
|
||||||
|
elif self._param.channel == "News":
|
||||||
|
response = requests.get("https://api.bing.microsoft.com/v7.0/news/search", headers=headers,
|
||||||
|
params=params)
|
||||||
|
response.raise_for_status()
|
||||||
|
search_results = response.json()
|
||||||
|
bing_res = [{"content": '<a href="' + i["url"] + '">' + i["name"] + '</a> ' + i["description"]} for i
|
||||||
|
in search_results['news']['value']]
|
||||||
|
except Exception as e:
|
||||||
|
return Bing.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not bing_res:
|
||||||
|
return Bing.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(bing_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
@ -14,13 +14,10 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from graph.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
from graph.settings import DEBUG
|
from agent.settings import DEBUG
|
||||||
|
|
||||||
|
|
||||||
class CategorizeParam(GenerateParam):
|
class CategorizeParam(GenerateParam):
|
||||||
@ -85,6 +82,6 @@ class Categorize(Generate, ABC):
|
|||||||
if ans.lower().find(c.lower()) >= 0:
|
if ans.lower().find(c.lower()) >= 0:
|
||||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
return Categorize.be_output(self._param.category_description[c]["to"])
|
||||||
|
|
||||||
return Categorize.be_output(self._param.category_description.items()[-1][1]["to"])
|
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||||
|
|
||||||
|
|
||||||
@ -21,7 +21,7 @@ from api.db import LLMType
|
|||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.settings import retrievaler
|
from api.settings import retrievaler
|
||||||
from graph.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class CiteParam(ComponentParamBase):
|
class CiteParam(ComponentParamBase):
|
||||||
62
agent/component/deepl.py
Normal file
62
agent/component/deepl.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import re
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
import deepl
|
||||||
|
|
||||||
|
|
||||||
|
class DeepLParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the DeepL component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.auth_key = "xxx"
|
||||||
|
self.parameters = []
|
||||||
|
self.source_lang = 'ZH'
|
||||||
|
self.target_lang = 'EN-GB'
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_valid_value(self.source_lang, "Source language",
|
||||||
|
['AR', 'BG', 'CS', 'DA', 'DE', 'EL', 'EN', 'ES', 'ET', 'FI', 'FR', 'HU', 'ID', 'IT',
|
||||||
|
'JA', 'KO', 'LT', 'LV', 'NB', 'NL', 'PL', 'PT', 'RO', 'RU', 'SK', 'SL', 'SV', 'TR',
|
||||||
|
'UK', 'ZH'])
|
||||||
|
self.check_valid_value(self.target_lang, "Target language",
|
||||||
|
['AR', 'BG', 'CS', 'DA', 'DE', 'EL', 'EN-GB', 'EN-US', 'ES', 'ET', 'FI', 'FR', 'HU',
|
||||||
|
'ID', 'IT', 'JA', 'KO', 'LT', 'LV', 'NB', 'NL', 'PL', 'PT-BR', 'PT-PT', 'RO', 'RU',
|
||||||
|
'SK', 'SL', 'SV', 'TR', 'UK', 'ZH'])
|
||||||
|
|
||||||
|
|
||||||
|
class DeepL(ComponentBase, ABC):
|
||||||
|
component_name = "GitHub"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return DeepL.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
translator = deepl.Translator(self._param.auth_key)
|
||||||
|
result = translator.translate_text(ans, source_lang=self._param.source_lang,
|
||||||
|
target_lang=self._param.target_lang)
|
||||||
|
|
||||||
|
return DeepL.be_output(result.text)
|
||||||
|
except Exception as e:
|
||||||
|
DeepL.be_output("**Error**:" + str(e))
|
||||||
66
agent/component/duckduckgo.py
Normal file
66
agent/component/duckduckgo.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
from duckduckgo_search import DDGS
|
||||||
|
import pandas as pd
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class DuckDuckGoParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the DuckDuckGo component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
self.channel = "text"
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_valid_value(self.channel, "Web Search or News", ["text", "news"])
|
||||||
|
|
||||||
|
|
||||||
|
class DuckDuckGo(ComponentBase, ABC):
|
||||||
|
component_name = "DuckDuckGo"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return DuckDuckGo.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
if self._param.channel == "text":
|
||||||
|
with DDGS() as ddgs:
|
||||||
|
# {'title': '', 'href': '', 'body': ''}
|
||||||
|
duck_res = [{"content": '<a href="' + i["href"] + '">' + i["title"] + '</a> ' + i["body"]} for i
|
||||||
|
in ddgs.text(ans, max_results=self._param.top_n)]
|
||||||
|
elif self._param.channel == "news":
|
||||||
|
with DDGS() as ddgs:
|
||||||
|
# {'date': '', 'title': '', 'body': '', 'url': '', 'image': '', 'source': ''}
|
||||||
|
duck_res = [{"content": '<a href="' + i["url"] + '">' + i["title"] + '</a> ' + i["body"]} for i
|
||||||
|
in ddgs.news(ans, max_results=self._param.top_n)]
|
||||||
|
except Exception as e:
|
||||||
|
return DuckDuckGo.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not duck_res:
|
||||||
|
return DuckDuckGo.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(duck_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
99
agent/component/exesql.py
Normal file
99
agent/component/exesql.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import re
|
||||||
|
import pandas as pd
|
||||||
|
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class ExeSQLParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the ExeSQL component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.db_type = "mysql"
|
||||||
|
self.database = ""
|
||||||
|
self.username = ""
|
||||||
|
self.host = ""
|
||||||
|
self.port = 3306
|
||||||
|
self.password = ""
|
||||||
|
self.loop = 3
|
||||||
|
self.top_n = 30
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb'])
|
||||||
|
self.check_empty(self.database, "Database name")
|
||||||
|
self.check_empty(self.username, "database username")
|
||||||
|
self.check_empty(self.host, "IP Address")
|
||||||
|
self.check_positive_integer(self.port, "IP Port")
|
||||||
|
self.check_empty(self.password, "Database password")
|
||||||
|
self.check_positive_integer(self.top_n, "Number of records")
|
||||||
|
|
||||||
|
|
||||||
|
class ExeSQL(ComponentBase, ABC):
|
||||||
|
component_name = "ExeSQL"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
if not hasattr(self, "_loop"):
|
||||||
|
setattr(self, "_loop", 0)
|
||||||
|
if self._loop >= self._param.loop:
|
||||||
|
self._loop = 0
|
||||||
|
raise Exception("Maximum loop time exceeds. Can't query the correct data via SQL statement.")
|
||||||
|
self._loop += 1
|
||||||
|
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||||
|
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
||||||
|
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
||||||
|
ans = re.sub(r';[^;]*$', r';', ans)
|
||||||
|
if not ans:
|
||||||
|
raise Exception("SQL statement not found!")
|
||||||
|
|
||||||
|
if self._param.db_type in ["mysql", "mariadb"]:
|
||||||
|
db = MySQLDatabase(self._param.database, user=self._param.username, host=self._param.host,
|
||||||
|
port=self._param.port, password=self._param.password)
|
||||||
|
elif self._param.db_type == 'postgresql':
|
||||||
|
db = PostgresqlDatabase(self._param.database, user=self._param.username, host=self._param.host,
|
||||||
|
port=self._param.port, password=self._param.password)
|
||||||
|
|
||||||
|
try:
|
||||||
|
db.connect()
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception("Database Connection Failed! \n" + str(e))
|
||||||
|
sql_res = []
|
||||||
|
for single_sql in re.split(r';', ans.replace(r"\n", " ")):
|
||||||
|
if not single_sql:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
query = db.execute_sql(single_sql)
|
||||||
|
if query.rowcount == 0:
|
||||||
|
sql_res.append({"content": "\nTotal: " + str(query.rowcount) + "\n No record in the database!"})
|
||||||
|
continue
|
||||||
|
single_res = pd.DataFrame([i for i in query.fetchmany(size=self._param.top_n)])
|
||||||
|
single_res.columns = [i[0] for i in query.description]
|
||||||
|
sql_res.append({"content": "\nTotal: " + str(query.rowcount) + "\n" + single_res.to_markdown()})
|
||||||
|
except Exception as e:
|
||||||
|
sql_res.append({"content": "**Error**:" + str(e) + "\nError SQL Statement:" + single_sql})
|
||||||
|
pass
|
||||||
|
db.close()
|
||||||
|
|
||||||
|
if not sql_res:
|
||||||
|
return ExeSQL.be_output("")
|
||||||
|
|
||||||
|
return pd.DataFrame(sql_res)
|
||||||
@ -15,13 +15,11 @@
|
|||||||
#
|
#
|
||||||
import re
|
import re
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.settings import retrievaler
|
from api.settings import retrievaler
|
||||||
from graph.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class GenerateParam(ComponentParamBase):
|
class GenerateParam(ComponentParamBase):
|
||||||
@ -63,62 +61,82 @@ class GenerateParam(ComponentParamBase):
|
|||||||
class Generate(ComponentBase):
|
class Generate(ComponentBase):
|
||||||
component_name = "Generate"
|
component_name = "Generate"
|
||||||
|
|
||||||
|
def get_dependent_components(self):
|
||||||
|
cpnts = [para["component_id"] for para in self._param.parameters]
|
||||||
|
return cpnts
|
||||||
|
|
||||||
|
def set_cite(self, retrieval_res, answer):
|
||||||
|
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
||||||
|
if "empty_response" in retrieval_res.columns:
|
||||||
|
retrieval_res["empty_response"].fillna("", inplace=True)
|
||||||
|
answer, idx = retrievaler.insert_citations(answer, [ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
||||||
|
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
||||||
|
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||||
|
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||||
|
vtweight=0.3)
|
||||||
|
doc_ids = set([])
|
||||||
|
recall_docs = []
|
||||||
|
for i in idx:
|
||||||
|
did = retrieval_res.loc[int(i), "doc_id"]
|
||||||
|
if did in doc_ids: continue
|
||||||
|
doc_ids.add(did)
|
||||||
|
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
||||||
|
|
||||||
|
del retrieval_res["vector"]
|
||||||
|
del retrieval_res["content_ltks"]
|
||||||
|
|
||||||
|
reference = {
|
||||||
|
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
||||||
|
"doc_aggs": recall_docs
|
||||||
|
}
|
||||||
|
|
||||||
|
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||||
|
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||||
|
res = {"content": answer, "reference": reference}
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
prompt = self._param.prompt
|
prompt = self._param.prompt
|
||||||
|
|
||||||
retrieval_res = self.get_input()
|
retrieval_res = self.get_input()
|
||||||
input = "\n- ".join(retrieval_res["content"])
|
input = (" - " + "\n - ".join(retrieval_res["content"])) if "content" in retrieval_res else ""
|
||||||
for para in self._param.parameters:
|
for para in self._param.parameters:
|
||||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||||
_, out = cpn.output(allow_partial=False)
|
_, out = cpn.output(allow_partial=False)
|
||||||
if "content" not in out.columns:
|
if "content" not in out.columns:
|
||||||
kwargs[para["key"]] = "Nothing"
|
kwargs[para["key"]] = "Nothing"
|
||||||
else:
|
else:
|
||||||
kwargs[para["key"]] = "\n - ".join(out["content"])
|
kwargs[para["key"]] = " - " + "\n - ".join(out["content"])
|
||||||
|
|
||||||
kwargs["input"] = input
|
kwargs["input"] = input
|
||||||
for n, v in kwargs.items():
|
for n, v in kwargs.items():
|
||||||
# prompt = re.sub(r"\{%s\}"%n, re.escape(str(v)), prompt)
|
prompt = re.sub(r"\{%s\}" % n, re.escape(str(v)), prompt)
|
||||||
prompt = re.sub(r"\{%s\}" % n, str(v), prompt)
|
|
||||||
|
|
||||||
if kwargs.get("stream"):
|
downstreams = self._canvas.get_component(self._id)["downstream"]
|
||||||
|
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
||||||
|
"obj"].component_name.lower() == "answer":
|
||||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||||
|
|
||||||
if "empty_response" in retrieval_res.columns:
|
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||||
return Generate.be_output(input)
|
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||||
|
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||||
|
return Generate.be_output(res)
|
||||||
|
|
||||||
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
|
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||||
ans, idx = retrievaler.insert_citations(ans,
|
df = self.set_cite(retrieval_res, ans)
|
||||||
[ck["content_ltks"]
|
|
||||||
for _, ck in retrieval_res.iterrows()],
|
|
||||||
[ck["vector"]
|
|
||||||
for _, ck in retrieval_res.iterrows()],
|
|
||||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
|
||||||
self._canvas.get_embedding_model()),
|
|
||||||
tkweight=0.7,
|
|
||||||
vtweight=0.3)
|
|
||||||
del retrieval_res["vector"]
|
|
||||||
retrieval_res = retrieval_res.to_dict("records")
|
|
||||||
df = []
|
|
||||||
for i in idx:
|
|
||||||
df.append(retrieval_res[int(i)])
|
|
||||||
r = re.search(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), ans)
|
|
||||||
assert r, f"{i} => {ans}"
|
|
||||||
df[-1]["content"] = r.group(1)
|
|
||||||
ans = re.sub(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), "", ans)
|
|
||||||
if ans: df.append({"content": ans})
|
|
||||||
return pd.DataFrame(df)
|
return pd.DataFrame(df)
|
||||||
|
|
||||||
return Generate.be_output(ans)
|
return Generate.be_output(ans)
|
||||||
|
|
||||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||||
res = None
|
res = None
|
||||||
if "empty_response" in retrieval_res.columns and "\n- ".join(retrieval_res["content"]):
|
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||||
res = {"content": "\n- ".join(retrieval_res["content"]), "reference": []}
|
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||||
|
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||||
yield res
|
yield res
|
||||||
self.set_output(res)
|
self.set_output(res)
|
||||||
return
|
return
|
||||||
@ -131,34 +149,7 @@ class Generate(ComponentBase):
|
|||||||
yield res
|
yield res
|
||||||
|
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||||
answer, idx = retrievaler.insert_citations(answer,
|
res = self.set_cite(retrieval_res, answer)
|
||||||
[ck["content_ltks"]
|
|
||||||
for _, ck in retrieval_res.iterrows()],
|
|
||||||
[ck["vector"]
|
|
||||||
for _, ck in retrieval_res.iterrows()],
|
|
||||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
|
||||||
self._canvas.get_embedding_model()),
|
|
||||||
tkweight=0.7,
|
|
||||||
vtweight=0.3)
|
|
||||||
doc_ids = set([])
|
|
||||||
recall_docs = []
|
|
||||||
for i in idx:
|
|
||||||
did = retrieval_res.loc[int(i), "doc_id"]
|
|
||||||
if did in doc_ids: continue
|
|
||||||
doc_ids.add(did)
|
|
||||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
|
||||||
|
|
||||||
del retrieval_res["vector"]
|
|
||||||
del retrieval_res["content_ltks"]
|
|
||||||
|
|
||||||
reference = {
|
|
||||||
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
|
||||||
"doc_aggs": recall_docs
|
|
||||||
}
|
|
||||||
|
|
||||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
|
||||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
|
||||||
res = {"content": answer, "reference": reference}
|
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
self.set_output(res)
|
self.set_output(res)
|
||||||
61
agent/component/github.py
Normal file
61
agent/component/github.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
import requests
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class GitHubParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the GitHub component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
|
||||||
|
|
||||||
|
class GitHub(ComponentBase, ABC):
|
||||||
|
component_name = "GitHub"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return GitHub.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
url = 'https://api.github.com/search/repositories?q=' + ans + '&sort=stars&order=desc&per_page=' + str(
|
||||||
|
self._param.top_n)
|
||||||
|
headers = {"Content-Type": "application/vnd.github+json", "X-GitHub-Api-Version": '2022-11-28'}
|
||||||
|
response = requests.get(url=url, headers=headers).json()
|
||||||
|
|
||||||
|
github_res = [{"content": '<a href="' + i["html_url"] + '">' + i["name"] + '</a>' + str(
|
||||||
|
i["description"]) + '\n stars:' + str(i['watchers'])} for i in response['items']]
|
||||||
|
except Exception as e:
|
||||||
|
return GitHub.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not github_res:
|
||||||
|
return GitHub.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(github_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
96
agent/component/google.py
Normal file
96
agent/component/google.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
from serpapi import GoogleSearch
|
||||||
|
import pandas as pd
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Google component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
self.api_key = "xxx"
|
||||||
|
self.country = "cn"
|
||||||
|
self.language = "en"
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_empty(self.api_key, "SerpApi API key")
|
||||||
|
self.check_valid_value(self.country, "Google Country",
|
||||||
|
['af', 'al', 'dz', 'as', 'ad', 'ao', 'ai', 'aq', 'ag', 'ar', 'am', 'aw', 'au', 'at',
|
||||||
|
'az', 'bs', 'bh', 'bd', 'bb', 'by', 'be', 'bz', 'bj', 'bm', 'bt', 'bo', 'ba', 'bw',
|
||||||
|
'bv', 'br', 'io', 'bn', 'bg', 'bf', 'bi', 'kh', 'cm', 'ca', 'cv', 'ky', 'cf', 'td',
|
||||||
|
'cl', 'cn', 'cx', 'cc', 'co', 'km', 'cg', 'cd', 'ck', 'cr', 'ci', 'hr', 'cu', 'cy',
|
||||||
|
'cz', 'dk', 'dj', 'dm', 'do', 'ec', 'eg', 'sv', 'gq', 'er', 'ee', 'et', 'fk', 'fo',
|
||||||
|
'fj', 'fi', 'fr', 'gf', 'pf', 'tf', 'ga', 'gm', 'ge', 'de', 'gh', 'gi', 'gr', 'gl',
|
||||||
|
'gd', 'gp', 'gu', 'gt', 'gn', 'gw', 'gy', 'ht', 'hm', 'va', 'hn', 'hk', 'hu', 'is',
|
||||||
|
'in', 'id', 'ir', 'iq', 'ie', 'il', 'it', 'jm', 'jp', 'jo', 'kz', 'ke', 'ki', 'kp',
|
||||||
|
'kr', 'kw', 'kg', 'la', 'lv', 'lb', 'ls', 'lr', 'ly', 'li', 'lt', 'lu', 'mo', 'mk',
|
||||||
|
'mg', 'mw', 'my', 'mv', 'ml', 'mt', 'mh', 'mq', 'mr', 'mu', 'yt', 'mx', 'fm', 'md',
|
||||||
|
'mc', 'mn', 'ms', 'ma', 'mz', 'mm', 'na', 'nr', 'np', 'nl', 'an', 'nc', 'nz', 'ni',
|
||||||
|
'ne', 'ng', 'nu', 'nf', 'mp', 'no', 'om', 'pk', 'pw', 'ps', 'pa', 'pg', 'py', 'pe',
|
||||||
|
'ph', 'pn', 'pl', 'pt', 'pr', 'qa', 're', 'ro', 'ru', 'rw', 'sh', 'kn', 'lc', 'pm',
|
||||||
|
'vc', 'ws', 'sm', 'st', 'sa', 'sn', 'rs', 'sc', 'sl', 'sg', 'sk', 'si', 'sb', 'so',
|
||||||
|
'za', 'gs', 'es', 'lk', 'sd', 'sr', 'sj', 'sz', 'se', 'ch', 'sy', 'tw', 'tj', 'tz',
|
||||||
|
'th', 'tl', 'tg', 'tk', 'to', 'tt', 'tn', 'tr', 'tm', 'tc', 'tv', 'ug', 'ua', 'ae',
|
||||||
|
'uk', 'gb', 'us', 'um', 'uy', 'uz', 'vu', 've', 'vn', 'vg', 'vi', 'wf', 'eh', 'ye',
|
||||||
|
'zm', 'zw'])
|
||||||
|
self.check_valid_value(self.language, "Google languages",
|
||||||
|
['af', 'ak', 'sq', 'ws', 'am', 'ar', 'hy', 'az', 'eu', 'be', 'bem', 'bn', 'bh',
|
||||||
|
'xx-bork', 'bs', 'br', 'bg', 'bt', 'km', 'ca', 'chr', 'ny', 'zh-cn', 'zh-tw', 'co',
|
||||||
|
'hr', 'cs', 'da', 'nl', 'xx-elmer', 'en', 'eo', 'et', 'ee', 'fo', 'tl', 'fi', 'fr',
|
||||||
|
'fy', 'gaa', 'gl', 'ka', 'de', 'el', 'kl', 'gn', 'gu', 'xx-hacker', 'ht', 'ha', 'haw',
|
||||||
|
'iw', 'hi', 'hu', 'is', 'ig', 'id', 'ia', 'ga', 'it', 'ja', 'jw', 'kn', 'kk', 'rw',
|
||||||
|
'rn', 'xx-klingon', 'kg', 'ko', 'kri', 'ku', 'ckb', 'ky', 'lo', 'la', 'lv', 'ln', 'lt',
|
||||||
|
'loz', 'lg', 'ach', 'mk', 'mg', 'ms', 'ml', 'mt', 'mv', 'mi', 'mr', 'mfe', 'mo', 'mn',
|
||||||
|
'sr-me', 'my', 'ne', 'pcm', 'nso', 'no', 'nn', 'oc', 'or', 'om', 'ps', 'fa',
|
||||||
|
'xx-pirate', 'pl', 'pt', 'pt-br', 'pt-pt', 'pa', 'qu', 'ro', 'rm', 'nyn', 'ru', 'gd',
|
||||||
|
'sr', 'sh', 'st', 'tn', 'crs', 'sn', 'sd', 'si', 'sk', 'sl', 'so', 'es', 'es-419', 'su',
|
||||||
|
'sw', 'sv', 'tg', 'ta', 'tt', 'te', 'th', 'ti', 'to', 'lua', 'tum', 'tr', 'tk', 'tw',
|
||||||
|
'ug', 'uk', 'ur', 'uz', 'vu', 'vi', 'cy', 'wo', 'xh', 'yi', 'yo', 'zu']
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Google(ComponentBase, ABC):
|
||||||
|
component_name = "Google"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return Google.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
client = GoogleSearch(
|
||||||
|
{"engine": "google", "q": ans, "api_key": self._param.api_key, "gl": self._param.country,
|
||||||
|
"hl": self._param.language, "num": self._param.top_n})
|
||||||
|
google_res = [{"content": '<a href="' + i["link"] + '">' + i["title"] + '</a> ' + i["snippet"]} for i in
|
||||||
|
client.get_dict()["organic_results"]]
|
||||||
|
except Exception as e:
|
||||||
|
return Google.be_output("**ERROR**: Existing Unavailable Parameters!")
|
||||||
|
|
||||||
|
if not google_res:
|
||||||
|
return Google.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(google_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
70
agent/component/googlescholar.py
Normal file
70
agent/component/googlescholar.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from scholarly import scholarly
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleScholarParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the GoogleScholar component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 6
|
||||||
|
self.sort_by = 'relevance'
|
||||||
|
self.year_low = None
|
||||||
|
self.year_high = None
|
||||||
|
self.patents = True
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_valid_value(self.sort_by, "GoogleScholar Sort_by", ['date', 'relevance'])
|
||||||
|
self.check_boolean(self.patents, "Whether or not to include patents, defaults to True")
|
||||||
|
|
||||||
|
|
||||||
|
class GoogleScholar(ComponentBase, ABC):
|
||||||
|
component_name = "GoogleScholar"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return GoogleScholar.be_output("")
|
||||||
|
|
||||||
|
scholar_client = scholarly.search_pubs(ans, patents=self._param.patents, year_low=self._param.year_low,
|
||||||
|
year_high=self._param.year_high, sort_by=self._param.sort_by)
|
||||||
|
scholar_res = []
|
||||||
|
for i in range(self._param.top_n):
|
||||||
|
try:
|
||||||
|
pub = next(scholar_client)
|
||||||
|
scholar_res.append({"content": 'Title: ' + pub['bib']['title'] + '\n_Url: <a href="' + pub[
|
||||||
|
'pub_url'] + '"></a> ' + "\n author: " + ",".join(pub['bib']['author']) + '\n Abstract: ' + pub[
|
||||||
|
'bib'].get('abstract', 'no abstract')})
|
||||||
|
|
||||||
|
except StopIteration or Exception as e:
|
||||||
|
print("**ERROR** " + str(e))
|
||||||
|
break
|
||||||
|
|
||||||
|
if not scholar_res:
|
||||||
|
return GoogleScholar.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(scholar_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
130
agent/component/jin10.py
Normal file
130
agent/component/jin10.py
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import json
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
import requests
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class Jin10Param(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Jin10 component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.type = "flash"
|
||||||
|
self.secret_key = "xxx"
|
||||||
|
self.flash_type = '1'
|
||||||
|
self.calendar_type = 'cj'
|
||||||
|
self.calendar_datatype = 'data'
|
||||||
|
self.symbols_type = 'GOODS'
|
||||||
|
self.symbols_datatype = 'symbols'
|
||||||
|
self.contain = ""
|
||||||
|
self.filter = ""
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_valid_value(self.type, "Type", ['flash', 'calendar', 'symbols', 'news'])
|
||||||
|
self.check_valid_value(self.flash_type, "Flash Type", ['1', '2', '3', '4', '5'])
|
||||||
|
self.check_valid_value(self.calendar_type, "Calendar Type", ['cj', 'qh', 'hk', 'us'])
|
||||||
|
self.check_valid_value(self.calendar_datatype, "Calendar DataType", ['data', 'event', 'holiday'])
|
||||||
|
self.check_valid_value(self.symbols_type, "Symbols Type", ['GOODS', 'FOREX', 'FUTURE', 'CRYPTO'])
|
||||||
|
self.check_valid_value(self.symbols_datatype, 'Symbols DataType', ['symbols', 'quotes'])
|
||||||
|
|
||||||
|
|
||||||
|
class Jin10(ComponentBase, ABC):
|
||||||
|
component_name = "Jin10"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return Jin10.be_output("")
|
||||||
|
|
||||||
|
jin10_res = []
|
||||||
|
headers = {'secret-key': self._param.secret_key}
|
||||||
|
try:
|
||||||
|
if self._param.type == "flash":
|
||||||
|
params = {
|
||||||
|
'category': self._param.flash_type,
|
||||||
|
'contain': self._param.contain,
|
||||||
|
'filter': self._param.filter
|
||||||
|
}
|
||||||
|
response = requests.get(
|
||||||
|
url='https://open-data-api.jin10.com/data-api/flash?category=' + self._param.flash_type,
|
||||||
|
headers=headers, data=json.dumps(params))
|
||||||
|
response = response.json()
|
||||||
|
for i in response['data']:
|
||||||
|
jin10_res.append({"content": i['data']['content']})
|
||||||
|
if self._param.type == "calendar":
|
||||||
|
params = {
|
||||||
|
'category': self._param.calendar_type
|
||||||
|
}
|
||||||
|
response = requests.get(
|
||||||
|
url='https://open-data-api.jin10.com/data-api/calendar/' + self._param.calendar_datatype + '?category=' + self._param.calendar_type,
|
||||||
|
headers=headers, data=json.dumps(params))
|
||||||
|
|
||||||
|
response = response.json()
|
||||||
|
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||||
|
if self._param.type == "symbols":
|
||||||
|
params = {
|
||||||
|
'type': self._param.symbols_type
|
||||||
|
}
|
||||||
|
if self._param.symbols_datatype == "quotes":
|
||||||
|
params['codes'] = 'BTCUSD'
|
||||||
|
response = requests.get(
|
||||||
|
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
||||||
|
headers=headers, data=json.dumps(params))
|
||||||
|
response = response.json()
|
||||||
|
if self._param.symbols_datatype == "symbols":
|
||||||
|
for i in response['data']:
|
||||||
|
i['Commodity Code'] = i['c']
|
||||||
|
i['Stock Exchange'] = i['e']
|
||||||
|
i['Commodity Name'] = i['n']
|
||||||
|
i['Commodity Type'] = i['t']
|
||||||
|
del i['c'], i['e'], i['n'], i['t']
|
||||||
|
if self._param.symbols_datatype == "quotes":
|
||||||
|
for i in response['data']:
|
||||||
|
i['Selling Price'] = i['a']
|
||||||
|
i['Buying Price'] = i['b']
|
||||||
|
i['Commodity Code'] = i['c']
|
||||||
|
i['Stock Exchange'] = i['e']
|
||||||
|
i['Highest Price'] = i['h']
|
||||||
|
i['Yesterday’s Closing Price'] = i['hc']
|
||||||
|
i['Lowest Price'] = i['l']
|
||||||
|
i['Opening Price'] = i['o']
|
||||||
|
i['Latest Price'] = i['p']
|
||||||
|
i['Market Quote Time'] = i['t']
|
||||||
|
del i['a'], i['b'], i['c'], i['e'], i['h'], i['hc'], i['l'], i['o'], i['p'], i['t']
|
||||||
|
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||||
|
if self._param.type == "news":
|
||||||
|
params = {
|
||||||
|
'contain': self._param.contain,
|
||||||
|
'filter': self._param.filter
|
||||||
|
}
|
||||||
|
response = requests.get(
|
||||||
|
url='https://open-data-api.jin10.com/data-api/news',
|
||||||
|
headers=headers, data=json.dumps(params))
|
||||||
|
response = response.json()
|
||||||
|
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||||
|
except Exception as e:
|
||||||
|
return Jin10.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not jin10_res:
|
||||||
|
return Jin10.be_output("")
|
||||||
|
|
||||||
|
return pd.DataFrame(jin10_res)
|
||||||
@ -17,8 +17,8 @@ import re
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from graph.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
from graph.settings import DEBUG
|
from agent.settings import DEBUG
|
||||||
|
|
||||||
|
|
||||||
class KeywordExtractParam(GenerateParam):
|
class KeywordExtractParam(GenerateParam):
|
||||||
@ -16,10 +16,7 @@
|
|||||||
import random
|
import random
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
from graph.component.base import ComponentBase, ComponentParamBase
|
|
||||||
|
|
||||||
|
|
||||||
class MessageParam(ComponentParamBase):
|
class MessageParam(ComponentParamBase):
|
||||||
@ -46,7 +43,11 @@ class Message(ComponentBase, ABC):
|
|||||||
return Message.be_output(random.choice(self._param.messages))
|
return Message.be_output(random.choice(self._param.messages))
|
||||||
|
|
||||||
def stream_output(self):
|
def stream_output(self):
|
||||||
|
res = None
|
||||||
if self._param.messages:
|
if self._param.messages:
|
||||||
yield {"content": random.choice(self._param.messages)}
|
res = {"content": random.choice(self._param.messages)}
|
||||||
|
yield res
|
||||||
|
|
||||||
|
self.set_output(res)
|
||||||
|
|
||||||
|
|
||||||
69
agent/component/pubmed.py
Normal file
69
agent/component/pubmed.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
from Bio import Entrez
|
||||||
|
import re
|
||||||
|
import pandas as pd
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class PubMedParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the PubMed component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 5
|
||||||
|
self.email = "A.N.Other@example.com"
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
|
||||||
|
|
||||||
|
class PubMed(ComponentBase, ABC):
|
||||||
|
component_name = "PubMed"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return PubMed.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
Entrez.email = self._param.email
|
||||||
|
pubmedids = Entrez.read(Entrez.esearch(db='pubmed', retmax=self._param.top_n, term=ans))['IdList']
|
||||||
|
pubmedcnt = ET.fromstring(re.sub(r'<(/?)b>|<(/?)i>', '', Entrez.efetch(db='pubmed', id=",".join(pubmedids),
|
||||||
|
retmode="xml").read().decode(
|
||||||
|
"utf-8")))
|
||||||
|
pubmed_res = [{"content": 'Title:' + child.find("MedlineCitation").find("Article").find(
|
||||||
|
"ArticleTitle").text + '\nUrl:<a href=" https://pubmed.ncbi.nlm.nih.gov/' + child.find(
|
||||||
|
"MedlineCitation").find("PMID").text + '">' + '</a>\n' + 'Abstract:' + (
|
||||||
|
child.find("MedlineCitation").find("Article").find("Abstract").find(
|
||||||
|
"AbstractText").text if child.find("MedlineCitation").find(
|
||||||
|
"Article").find("Abstract") else "No abstract available")} for child in
|
||||||
|
pubmedcnt.findall("PubmedArticle")]
|
||||||
|
except Exception as e:
|
||||||
|
return PubMed.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not pubmed_res:
|
||||||
|
return PubMed.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(pubmed_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
111
agent/component/qweather.py
Normal file
111
agent/component/qweather.py
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
import requests
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class QWeatherParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the QWeather component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.web_apikey = "xxx"
|
||||||
|
self.lang = "zh"
|
||||||
|
self.type = "weather"
|
||||||
|
self.user_type = 'free'
|
||||||
|
self.error_code = {
|
||||||
|
"204": "The request was successful, but the region you are querying does not have the data you need at this time.",
|
||||||
|
"400": "Request error, may contain incorrect request parameters or missing mandatory request parameters.",
|
||||||
|
"401": "Authentication fails, possibly using the wrong KEY, wrong digital signature, wrong type of KEY (e.g. using the SDK's KEY to access the Web API).",
|
||||||
|
"402": "Exceeded the number of accesses or the balance is not enough to support continued access to the service, you can recharge, upgrade the accesses or wait for the accesses to be reset.",
|
||||||
|
"403": "No access, may be the binding PackageName, BundleID, domain IP address is inconsistent, or the data that requires additional payment.",
|
||||||
|
"404": "The queried data or region does not exist.",
|
||||||
|
"429": "Exceeded the limited QPM (number of accesses per minute), please refer to the QPM description",
|
||||||
|
"500": "No response or timeout, interface service abnormality please contact us"
|
||||||
|
}
|
||||||
|
# Weather
|
||||||
|
self.time_period = 'now'
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_empty(self.web_apikey, "BaiduFanyi APPID")
|
||||||
|
self.check_valid_value(self.type, "Type", ["weather", "indices", "airquality"])
|
||||||
|
self.check_valid_value(self.user_type, "Free subscription or paid subscription", ["free", "paid"])
|
||||||
|
self.check_valid_value(self.lang, "Use language",
|
||||||
|
['zh', 'zh-hant', 'en', 'de', 'es', 'fr', 'it', 'ja', 'ko', 'ru', 'hi', 'th', 'ar', 'pt',
|
||||||
|
'bn', 'ms', 'nl', 'el', 'la', 'sv', 'id', 'pl', 'tr', 'cs', 'et', 'vi', 'fil', 'fi',
|
||||||
|
'he', 'is', 'nb'])
|
||||||
|
self.check_valid_value(self.time_period, "Time period", ['now', '3d', '7d', '10d', '15d', '30d'])
|
||||||
|
|
||||||
|
|
||||||
|
class QWeather(ComponentBase, ABC):
|
||||||
|
component_name = "QWeather"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return QWeather.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.get(
|
||||||
|
url="https://geoapi.qweather.com/v2/city/lookup?location=" + ans + "&key=" + self._param.web_apikey).json()
|
||||||
|
if response["code"] == "200":
|
||||||
|
location_id = response["location"][0]["id"]
|
||||||
|
else:
|
||||||
|
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||||
|
|
||||||
|
base_url = "https://api.qweather.com/v7/" if self._param.user_type == 'paid' else "https://devapi.qweather.com/v7/"
|
||||||
|
|
||||||
|
if self._param.type == "weather":
|
||||||
|
url = base_url + "weather/" + self._param.time_period + "?location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||||
|
response = requests.get(url=url).json()
|
||||||
|
if response["code"] == "200":
|
||||||
|
if self._param.time_period == "now":
|
||||||
|
return QWeather.be_output(str(response["now"]))
|
||||||
|
else:
|
||||||
|
qweather_res = [{"content": str(i) + "\n"} for i in response["daily"]]
|
||||||
|
if not qweather_res:
|
||||||
|
return QWeather.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(qweather_res)
|
||||||
|
return df
|
||||||
|
else:
|
||||||
|
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||||
|
|
||||||
|
elif self._param.type == "indices":
|
||||||
|
url = base_url + "indices/1d?type=0&location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||||
|
response = requests.get(url=url).json()
|
||||||
|
if response["code"] == "200":
|
||||||
|
indices_res = response["daily"][0]["date"] + "\n" + "\n".join(
|
||||||
|
[i["name"] + ": " + i["category"] + ", " + i["text"] for i in response["daily"]])
|
||||||
|
return QWeather.be_output(indices_res)
|
||||||
|
|
||||||
|
else:
|
||||||
|
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||||
|
|
||||||
|
elif self._param.type == "airquality":
|
||||||
|
url = base_url + "air/now?location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||||
|
response = requests.get(url=url).json()
|
||||||
|
if response["code"] == "200":
|
||||||
|
return QWeather.be_output(str(response["now"]))
|
||||||
|
else:
|
||||||
|
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||||
|
except Exception as e:
|
||||||
|
return QWeather.be_output("**Error**" + str(e))
|
||||||
@ -16,7 +16,7 @@
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from graph.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
from rag.utils import num_tokens_from_string, encoder
|
from rag.utils import num_tokens_from_string, encoder
|
||||||
|
|
||||||
|
|
||||||
@ -21,7 +21,7 @@ from api.db import LLMType
|
|||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.settings import retrievaler
|
from api.settings import retrievaler
|
||||||
from graph.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class RetrievalParam(ComponentParamBase):
|
class RetrievalParam(ComponentParamBase):
|
||||||
@ -54,8 +54,8 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
||||||
if role != "user":continue
|
if role != "user":continue
|
||||||
query.append(cnt)
|
query.append(cnt)
|
||||||
query = "\n".join(query)
|
# query = "\n".join(query)
|
||||||
|
query = query[0]
|
||||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
||||||
@ -75,8 +75,9 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
aggs=False, rerank_mdl=rerank_mdl)
|
aggs=False, rerank_mdl=rerank_mdl)
|
||||||
|
|
||||||
if not kbinfos["chunks"]:
|
if not kbinfos["chunks"]:
|
||||||
df = Retrieval.be_output(self._param.empty_response)
|
df = Retrieval.be_output("")
|
||||||
df["empty_response"] = True
|
if self._param.empty_response and self._param.empty_response.strip():
|
||||||
|
df["empty_response"] = self._param.empty_response
|
||||||
return df
|
return df
|
||||||
|
|
||||||
df = pd.DataFrame(kbinfos["chunks"])
|
df = pd.DataFrame(kbinfos["chunks"])
|
||||||
@ -16,7 +16,7 @@
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from graph.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
|
|
||||||
|
|
||||||
class RewriteQuestionParam(GenerateParam):
|
class RewriteQuestionParam(GenerateParam):
|
||||||
@ -54,7 +54,7 @@ class RewriteQuestion(Generate, ABC):
|
|||||||
setattr(self, "_loop", 0)
|
setattr(self, "_loop", 0)
|
||||||
if self._loop >= self._param.loop:
|
if self._loop >= self._param.loop:
|
||||||
self._loop = 0
|
self._loop = 0
|
||||||
raise Exception("Maximum loop time exceeds. Can't find relevant information.")
|
raise Exception("Sorry! Nothing relevant found.")
|
||||||
self._loop += 1
|
self._loop += 1
|
||||||
q = "Question: "
|
q = "Question: "
|
||||||
for r, c in self._canvas.history[::-1]:
|
for r, c in self._canvas.history[::-1]:
|
||||||
125
agent/component/switch.py
Normal file
125
agent/component/switch.py
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class SwitchParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Switch component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
"""
|
||||||
|
{
|
||||||
|
"logical_operator" : "and | or"
|
||||||
|
"items" : [
|
||||||
|
{"cpn_id": "categorize:0", "operator": "contains", "value": ""},
|
||||||
|
{"cpn_id": "categorize:0", "operator": "contains", "value": ""},...],
|
||||||
|
"to": ""
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
self.conditions = []
|
||||||
|
self.end_cpn_id = "answer:0"
|
||||||
|
self.operators = ['contains', 'not contains', 'start with', 'end with', 'empty', 'not empty', '=', '≠', '>',
|
||||||
|
'<', '≥', '≤']
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_empty(self.conditions, "[Switch] conditions")
|
||||||
|
for cond in self.conditions:
|
||||||
|
if not cond["to"]: raise ValueError(f"[Switch] 'To' can not be empty!")
|
||||||
|
|
||||||
|
|
||||||
|
class Switch(ComponentBase, ABC):
|
||||||
|
component_name = "Switch"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
for cond in self._param.conditions:
|
||||||
|
|
||||||
|
if len(cond["items"]) == 1:
|
||||||
|
out = self._canvas.get_component(cond["items"][0]["cpn_id"])["obj"].output()[1]
|
||||||
|
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
||||||
|
if self.process_operator(cpn_input, cond["items"][0]["operator"], cond["items"][0]["value"]):
|
||||||
|
return Switch.be_output(cond["to"])
|
||||||
|
continue
|
||||||
|
|
||||||
|
if cond["logical_operator"] == "and":
|
||||||
|
res = True
|
||||||
|
for item in cond["items"]:
|
||||||
|
out = self._canvas.get_component(item["cpn_id"])["obj"].output()[1]
|
||||||
|
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
||||||
|
if not self.process_operator(cpn_input, item["operator"], item["value"]):
|
||||||
|
res = False
|
||||||
|
break
|
||||||
|
if res:
|
||||||
|
return Switch.be_output(cond["to"])
|
||||||
|
continue
|
||||||
|
|
||||||
|
res = False
|
||||||
|
for item in cond["items"]:
|
||||||
|
out = self._canvas.get_component(item["cpn_id"])["obj"].output()[1]
|
||||||
|
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
||||||
|
if self.process_operator(cpn_input, item["operator"], item["value"]):
|
||||||
|
res = True
|
||||||
|
break
|
||||||
|
if res:
|
||||||
|
return Switch.be_output(cond["to"])
|
||||||
|
|
||||||
|
return Switch.be_output(self._param.end_cpn_id)
|
||||||
|
|
||||||
|
def process_operator(self, input: str, operator: str, value: str) -> bool:
|
||||||
|
if not isinstance(input, str) or not isinstance(value, str):
|
||||||
|
raise ValueError('Invalid input or value type: string')
|
||||||
|
|
||||||
|
if operator == "contains":
|
||||||
|
return True if value.lower() in input.lower() else False
|
||||||
|
elif operator == "not contains":
|
||||||
|
return True if value.lower() not in input.lower() else False
|
||||||
|
elif operator == "start with":
|
||||||
|
return True if input.lower().startswith(value.lower()) else False
|
||||||
|
elif operator == "end with":
|
||||||
|
return True if input.lower().endswith(value.lower()) else False
|
||||||
|
elif operator == "empty":
|
||||||
|
return True if not input else False
|
||||||
|
elif operator == "not empty":
|
||||||
|
return True if input else False
|
||||||
|
elif operator == "=":
|
||||||
|
return True if input == value else False
|
||||||
|
elif operator == "≠":
|
||||||
|
return True if input != value else False
|
||||||
|
elif operator == ">":
|
||||||
|
try:
|
||||||
|
return True if float(input) > float(value) else False
|
||||||
|
except Exception as e:
|
||||||
|
return True if input > value else False
|
||||||
|
elif operator == "<":
|
||||||
|
try:
|
||||||
|
return True if float(input) < float(value) else False
|
||||||
|
except Exception as e:
|
||||||
|
return True if input < value else False
|
||||||
|
elif operator == "≥":
|
||||||
|
try:
|
||||||
|
return True if float(input) >= float(value) else False
|
||||||
|
except Exception as e:
|
||||||
|
return True if input >= value else False
|
||||||
|
elif operator == "≤":
|
||||||
|
try:
|
||||||
|
return True if float(input) <= float(value) else False
|
||||||
|
except Exception as e:
|
||||||
|
return True if input <= value else False
|
||||||
|
|
||||||
|
raise ValueError('Not supported operator' + operator)
|
||||||
72
agent/component/tushare.py
Normal file
72
agent/component/tushare.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import json
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
import time
|
||||||
|
import requests
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class TuShareParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the TuShare component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.token = "xxx"
|
||||||
|
self.src = "eastmoney"
|
||||||
|
self.start_date = "2024-01-01 09:00:00"
|
||||||
|
self.end_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||||
|
self.keyword = ""
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_valid_value(self.src, "Quick News Source",
|
||||||
|
["sina", "wallstreetcn", "10jqka", "eastmoney", "yuncaijing", "fenghuang", "jinrongjie"])
|
||||||
|
|
||||||
|
|
||||||
|
class TuShare(ComponentBase, ABC):
|
||||||
|
component_name = "TuShare"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return TuShare.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
tus_res = []
|
||||||
|
params = {
|
||||||
|
"api_name": "news",
|
||||||
|
"token": self._param.token,
|
||||||
|
"params": {"src": self._param.src, "start_date": self._param.start_date,
|
||||||
|
"end_date": self._param.end_date}
|
||||||
|
}
|
||||||
|
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
||||||
|
response = response.json()
|
||||||
|
if response['code'] != 0:
|
||||||
|
return TuShare.be_output(response['msg'])
|
||||||
|
df = pd.DataFrame(response['data']['items'])
|
||||||
|
df.columns = response['data']['fields']
|
||||||
|
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
||||||
|
except Exception as e:
|
||||||
|
return TuShare.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not tus_res:
|
||||||
|
return TuShare.be_output("")
|
||||||
|
|
||||||
|
return pd.DataFrame(tus_res)
|
||||||
74
agent/component/wencai.py
Normal file
74
agent/component/wencai.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
import pywencai
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class WenCaiParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the WenCai component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
self.query_type = "stock"
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_valid_value(self.query_type, "Query type",
|
||||||
|
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
||||||
|
'futures', 'lccp',
|
||||||
|
'foreign_exchange'])
|
||||||
|
|
||||||
|
|
||||||
|
class WenCai(ComponentBase, ABC):
|
||||||
|
component_name = "WenCai"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return WenCai.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
wencai_res = []
|
||||||
|
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
||||||
|
if isinstance(res, pd.DataFrame):
|
||||||
|
wencai_res.append({"content": res.to_markdown()})
|
||||||
|
if isinstance(res, dict):
|
||||||
|
for item in res.items():
|
||||||
|
if isinstance(item[1], list):
|
||||||
|
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
||||||
|
continue
|
||||||
|
if isinstance(item[1], str):
|
||||||
|
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
||||||
|
continue
|
||||||
|
if isinstance(item[1], dict):
|
||||||
|
if "meta" in item[1].keys():
|
||||||
|
continue
|
||||||
|
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
||||||
|
continue
|
||||||
|
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
||||||
|
except Exception as e:
|
||||||
|
return WenCai.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not wencai_res:
|
||||||
|
return WenCai.be_output("")
|
||||||
|
|
||||||
|
return pd.DataFrame(wencai_res)
|
||||||
69
agent/component/wikipedia.py
Normal file
69
agent/component/wikipedia.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import random
|
||||||
|
from abc import ABC
|
||||||
|
from functools import partial
|
||||||
|
import wikipedia
|
||||||
|
import pandas as pd
|
||||||
|
from agent.settings import DEBUG
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class WikipediaParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Wikipedia component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
self.language = "en"
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
self.check_valid_value(self.language, "Wikipedia languages",
|
||||||
|
['af', 'pl', 'ar', 'ast', 'az', 'bg', 'nan', 'bn', 'be', 'ca', 'cs', 'cy', 'da', 'de',
|
||||||
|
'et', 'el', 'en', 'es', 'eo', 'eu', 'fa', 'fr', 'gl', 'ko', 'hy', 'hi', 'hr', 'id',
|
||||||
|
'it', 'he', 'ka', 'lld', 'la', 'lv', 'lt', 'hu', 'mk', 'arz', 'ms', 'min', 'my', 'nl',
|
||||||
|
'ja', 'nb', 'nn', 'ce', 'uz', 'pt', 'kk', 'ro', 'ru', 'ceb', 'sk', 'sl', 'sr', 'sh',
|
||||||
|
'fi', 'sv', 'ta', 'tt', 'th', 'tg', 'azb', 'tr', 'uk', 'ur', 'vi', 'war', 'zh', 'yue'])
|
||||||
|
|
||||||
|
|
||||||
|
class Wikipedia(ComponentBase, ABC):
|
||||||
|
component_name = "Wikipedia"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return Wikipedia.be_output("")
|
||||||
|
|
||||||
|
try:
|
||||||
|
wiki_res = []
|
||||||
|
wikipedia.set_lang(self._param.language)
|
||||||
|
wiki_engine = wikipedia
|
||||||
|
for wiki_key in wiki_engine.search(ans, results=self._param.top_n):
|
||||||
|
page = wiki_engine.page(title=wiki_key, auto_suggest=False)
|
||||||
|
wiki_res.append({"content": '<a href="' + page.url + '">' + page.title + '</a> ' + page.summary})
|
||||||
|
except Exception as e:
|
||||||
|
return Wikipedia.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
|
if not wiki_res:
|
||||||
|
return Wikipedia.be_output("")
|
||||||
|
|
||||||
|
df = pd.DataFrame(wiki_res)
|
||||||
|
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||||
|
return df
|
||||||
83
agent/component/yahoofinance.py
Normal file
83
agent/component/yahoofinance.py
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
import yfinance as yf
|
||||||
|
|
||||||
|
|
||||||
|
class YahooFinanceParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the YahooFinance component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.info = True
|
||||||
|
self.history = False
|
||||||
|
self.count = False
|
||||||
|
self.financials = False
|
||||||
|
self.income_stmt = False
|
||||||
|
self.balance_sheet = False
|
||||||
|
self.cash_flow_statement = False
|
||||||
|
self.news = True
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_boolean(self.info, "get all stock info")
|
||||||
|
self.check_boolean(self.history, "get historical market data")
|
||||||
|
self.check_boolean(self.count, "show share count")
|
||||||
|
self.check_boolean(self.financials, "show financials")
|
||||||
|
self.check_boolean(self.income_stmt, "income statement")
|
||||||
|
self.check_boolean(self.balance_sheet, "balance sheet")
|
||||||
|
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
||||||
|
self.check_boolean(self.news, "show news")
|
||||||
|
|
||||||
|
|
||||||
|
class YahooFinance(ComponentBase, ABC):
|
||||||
|
component_name = "YahooFinance"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not ans:
|
||||||
|
return YahooFinance.be_output("")
|
||||||
|
|
||||||
|
yohoo_res = []
|
||||||
|
try:
|
||||||
|
msft = yf.Ticker(ans)
|
||||||
|
if self._param.info:
|
||||||
|
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
||||||
|
if self._param.history:
|
||||||
|
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
||||||
|
if self._param.financials:
|
||||||
|
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
||||||
|
if self._param.balance_sheet:
|
||||||
|
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
||||||
|
yohoo_res.append(
|
||||||
|
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
||||||
|
if self._param.cash_flow_statement:
|
||||||
|
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
||||||
|
yohoo_res.append(
|
||||||
|
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||||
|
if self._param.news:
|
||||||
|
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||||
|
except Exception as e:
|
||||||
|
print("**ERROR** " + str(e))
|
||||||
|
|
||||||
|
if not yohoo_res:
|
||||||
|
return YahooFinance.be_output("")
|
||||||
|
|
||||||
|
return pd.DataFrame(yohoo_res)
|
||||||
687
agent/templates/DB Assistant.json
Normal file
687
agent/templates/DB Assistant.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
492
agent/templates/medical_consultation.json
Normal file
492
agent/templates/medical_consultation.json
Normal file
File diff suppressed because one or more lines are too long
445
agent/templates/text2sql.json
Normal file
445
agent/templates/text2sql.json
Normal file
File diff suppressed because one or more lines are too long
547
agent/templates/websearch_assistant.json
Normal file
547
agent/templates/websearch_assistant.json
Normal file
File diff suppressed because one or more lines are too long
@ -16,9 +16,8 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
from functools import partial
|
from functools import partial
|
||||||
import readline
|
from agent.canvas import Canvas
|
||||||
from graph.canvas import Canvas
|
from agent.settings import DEBUG
|
||||||
from graph.settings import DEBUG
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
129
agent/test/dsl_examples/baidu_generate_and_switch.json
Normal file
129
agent/test/dsl_examples/baidu_generate_and_switch.json
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
{
|
||||||
|
"components": {
|
||||||
|
"begin": {
|
||||||
|
"obj":{
|
||||||
|
"component_name": "Begin",
|
||||||
|
"params": {
|
||||||
|
"prologue": "Hi there!"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"downstream": ["answer:0"],
|
||||||
|
"upstream": []
|
||||||
|
},
|
||||||
|
"answer:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Answer",
|
||||||
|
"params": {}
|
||||||
|
},
|
||||||
|
"downstream": ["baidu:0"],
|
||||||
|
"upstream": ["begin", "message:0","message:1"]
|
||||||
|
},
|
||||||
|
"baidu:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Baidu",
|
||||||
|
"params": {}
|
||||||
|
},
|
||||||
|
"downstream": ["generate:0"],
|
||||||
|
"upstream": ["answer:0"]
|
||||||
|
},
|
||||||
|
"generate:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Generate",
|
||||||
|
"params": {
|
||||||
|
"llm_id": "deepseek-chat",
|
||||||
|
"prompt": "You are an intelligent assistant. Please answer the user's question based on what Baidu searched. First, please output the user's question and the content searched by Baidu, and then answer yes, no, or i don't know.Here is the user's question:{user_input}The above is the user's question.Here is what Baidu searched for:{baidu}The above is the content searched by Baidu.",
|
||||||
|
"temperature": 0.2
|
||||||
|
},
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"component_id": "answer:0",
|
||||||
|
"id": "69415446-49bf-4d4b-8ec9-ac86066f7709",
|
||||||
|
"key": "user_input"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"component_id": "baidu:0",
|
||||||
|
"id": "83363c2a-00a8-402f-a45c-ddc4097d7d8b",
|
||||||
|
"key": "baidu"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"downstream": ["switch:0"],
|
||||||
|
"upstream": ["baidu:0"]
|
||||||
|
},
|
||||||
|
"switch:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Switch",
|
||||||
|
"params": {
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"logical_operator" : "or",
|
||||||
|
"items" : [
|
||||||
|
{"cpn_id": "generate:0", "operator": "contains", "value": "yes"},
|
||||||
|
{"cpn_id": "generate:0", "operator": "contains", "value": "yeah"}
|
||||||
|
],
|
||||||
|
"to": "message:0"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"logical_operator" : "and",
|
||||||
|
"items" : [
|
||||||
|
{"cpn_id": "generate:0", "operator": "contains", "value": "no"},
|
||||||
|
{"cpn_id": "generate:0", "operator": "not contains", "value": "yes"},
|
||||||
|
{"cpn_id": "generate:0", "operator": "not contains", "value": "know"}
|
||||||
|
],
|
||||||
|
"to": "message:1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"logical_operator" : "",
|
||||||
|
"items" : [
|
||||||
|
{"cpn_id": "generate:0", "operator": "contains", "value": "know"}
|
||||||
|
],
|
||||||
|
"to": "message:2"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"end_cpn_id": "answer:0"
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"downstream": ["message:0","message:1"],
|
||||||
|
"upstream": ["generate:0"]
|
||||||
|
},
|
||||||
|
"message:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Message",
|
||||||
|
"params": {
|
||||||
|
"messages": ["YES YES YES YES YES YES YES YES YES YES YES YES"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"upstream": ["switch:0"],
|
||||||
|
"downstream": ["answer:0"]
|
||||||
|
},
|
||||||
|
"message:1": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Message",
|
||||||
|
"params": {
|
||||||
|
"messages": ["NO NO NO NO NO NO NO NO NO NO NO NO NO NO"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"upstream": ["switch:0"],
|
||||||
|
"downstream": ["answer:0"]
|
||||||
|
},
|
||||||
|
"message:2": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Message",
|
||||||
|
"params": {
|
||||||
|
"messages": ["I DON'T KNOW---------------------------"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
"upstream": ["switch:0"],
|
||||||
|
"downstream": ["answer:0"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"history": [],
|
||||||
|
"messages": [],
|
||||||
|
"reference": {},
|
||||||
|
"path": [],
|
||||||
|
"answer": []
|
||||||
|
}
|
||||||
43
agent/test/dsl_examples/exesql.json
Normal file
43
agent/test/dsl_examples/exesql.json
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
{
|
||||||
|
"components": {
|
||||||
|
"begin": {
|
||||||
|
"obj":{
|
||||||
|
"component_name": "Begin",
|
||||||
|
"params": {
|
||||||
|
"prologue": "Hi there!"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"downstream": ["answer:0"],
|
||||||
|
"upstream": []
|
||||||
|
},
|
||||||
|
"answer:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Answer",
|
||||||
|
"params": {}
|
||||||
|
},
|
||||||
|
"downstream": ["exesql:0"],
|
||||||
|
"upstream": ["begin", "exesql:0"]
|
||||||
|
},
|
||||||
|
"exesql:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "ExeSQL",
|
||||||
|
"params": {
|
||||||
|
"database": "rag_flow",
|
||||||
|
"username": "root",
|
||||||
|
"host": "mysql",
|
||||||
|
"port": 3306,
|
||||||
|
"password": "infini_rag_flow",
|
||||||
|
"top_n": 3
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"downstream": ["answer:0"],
|
||||||
|
"upstream": ["answer:0"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"history": [],
|
||||||
|
"messages": [],
|
||||||
|
"reference": {},
|
||||||
|
"path": [],
|
||||||
|
"answer": []
|
||||||
|
}
|
||||||
|
|
||||||
62
agent/test/dsl_examples/keyword_wikipedia_and_generate.json
Normal file
62
agent/test/dsl_examples/keyword_wikipedia_and_generate.json
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
{
|
||||||
|
"components": {
|
||||||
|
"begin": {
|
||||||
|
"obj":{
|
||||||
|
"component_name": "Begin",
|
||||||
|
"params": {
|
||||||
|
"prologue": "Hi there!"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"downstream": ["answer:0"],
|
||||||
|
"upstream": []
|
||||||
|
},
|
||||||
|
"answer:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Answer",
|
||||||
|
"params": {}
|
||||||
|
},
|
||||||
|
"downstream": ["keyword:0"],
|
||||||
|
"upstream": ["begin"]
|
||||||
|
},
|
||||||
|
"keyword:0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "KeywordExtract",
|
||||||
|
"params": {
|
||||||
|
"llm_id": "deepseek-chat",
|
||||||
|
"prompt": "- Role: You're a question analyzer.\n - Requirements:\n - Summarize user's question, and give top %s important keyword/phrase.\n - Use comma as a delimiter to separate keywords/phrases.\n - Answer format: (in language of user's question)\n - keyword: ",
|
||||||
|
"temperature": 0.2,
|
||||||
|
"top_n": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"downstream": ["wikipedia:0"],
|
||||||
|
"upstream": ["answer:0"]
|
||||||
|
},
|
||||||
|
"wikipedia:0": {
|
||||||
|
"obj":{
|
||||||
|
"component_name": "Wikipedia",
|
||||||
|
"params": {
|
||||||
|
"top_n": 10
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"downstream": ["generate:0"],
|
||||||
|
"upstream": ["keyword:0"]
|
||||||
|
},
|
||||||
|
"generate:1": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Generate",
|
||||||
|
"params": {
|
||||||
|
"llm_id": "deepseek-chat",
|
||||||
|
"prompt": "You are an intelligent assistant. Please answer the question based on content from Wikipedia. When the answer from Wikipedia is incomplete, you need to output the URL link of the corresponding content as well. When all the content searched from Wikipedia is irrelevant to the question, your answer must include the sentence, \"The answer you are looking for is not found in the Wikipedia!\". Answers need to consider chat history.\n The content of Wikipedia is as follows:\n {input}\n The above is the content of Wikipedia.",
|
||||||
|
"temperature": 0.2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"downstream": ["answer:0"],
|
||||||
|
"upstream": ["wikipedia:0"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"history": [],
|
||||||
|
"path": [],
|
||||||
|
"messages": [],
|
||||||
|
"reference": {},
|
||||||
|
"answer": []
|
||||||
|
}
|
||||||
@ -1,124 +1,126 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
from importlib.util import module_from_spec, spec_from_file_location
|
from importlib.util import module_from_spec, spec_from_file_location
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from flask import Blueprint, Flask
|
from flask import Blueprint, Flask
|
||||||
from werkzeug.wrappers.request import Request
|
from werkzeug.wrappers.request import Request
|
||||||
from flask_cors import CORS
|
from flask_cors import CORS
|
||||||
|
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
from api.db.db_models import close_connection
|
from api.db.db_models import close_connection
|
||||||
from api.db.services import UserService
|
from api.db.services import UserService
|
||||||
from api.utils import CustomJSONEncoder
|
from api.utils import CustomJSONEncoder, commands
|
||||||
|
|
||||||
from flask_session import Session
|
from flask_session import Session
|
||||||
from flask_login import LoginManager
|
from flask_login import LoginManager
|
||||||
from api.settings import SECRET_KEY, stat_logger
|
from api.settings import SECRET_KEY, stat_logger
|
||||||
from api.settings import API_VERSION, access_logger
|
from api.settings import API_VERSION, access_logger
|
||||||
from api.utils.api_utils import server_error_response
|
from api.utils.api_utils import server_error_response
|
||||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||||
|
|
||||||
__all__ = ['app']
|
__all__ = ['app']
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('flask.app')
|
logger = logging.getLogger('flask.app')
|
||||||
for h in access_logger.handlers:
|
for h in access_logger.handlers:
|
||||||
logger.addHandler(h)
|
logger.addHandler(h)
|
||||||
|
|
||||||
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
CORS(app, supports_credentials=True,max_age=2592000)
|
CORS(app, supports_credentials=True,max_age=2592000)
|
||||||
app.url_map.strict_slashes = False
|
app.url_map.strict_slashes = False
|
||||||
app.json_encoder = CustomJSONEncoder
|
app.json_encoder = CustomJSONEncoder
|
||||||
app.errorhandler(Exception)(server_error_response)
|
app.errorhandler(Exception)(server_error_response)
|
||||||
|
|
||||||
|
|
||||||
## convince for dev and debug
|
## convince for dev and debug
|
||||||
#app.config["LOGIN_DISABLED"] = True
|
#app.config["LOGIN_DISABLED"] = True
|
||||||
app.config["SESSION_PERMANENT"] = False
|
app.config["SESSION_PERMANENT"] = False
|
||||||
app.config["SESSION_TYPE"] = "filesystem"
|
app.config["SESSION_TYPE"] = "filesystem"
|
||||||
app.config['MAX_CONTENT_LENGTH'] = int(os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024))
|
app.config['MAX_CONTENT_LENGTH'] = int(os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024))
|
||||||
|
|
||||||
Session(app)
|
Session(app)
|
||||||
login_manager = LoginManager()
|
login_manager = LoginManager()
|
||||||
login_manager.init_app(app)
|
login_manager.init_app(app)
|
||||||
|
|
||||||
|
commands.register_commands(app)
|
||||||
|
|
||||||
def search_pages_path(pages_dir):
|
|
||||||
app_path_list = [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
def search_pages_path(pages_dir):
|
||||||
api_path_list = [path for path in pages_dir.glob('*_api.py') if not path.name.startswith('.')]
|
app_path_list = [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
||||||
app_path_list.extend(api_path_list)
|
api_path_list = [path for path in pages_dir.glob('*sdk/*.py') if not path.name.startswith('.')]
|
||||||
return app_path_list
|
app_path_list.extend(api_path_list)
|
||||||
|
return app_path_list
|
||||||
|
|
||||||
def register_page(page_path):
|
|
||||||
path = f'{page_path}'
|
def register_page(page_path):
|
||||||
|
path = f'{page_path}'
|
||||||
page_name = page_path.stem.rstrip('_api') if "_api" in path else page_path.stem.rstrip('_app')
|
|
||||||
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name,))
|
page_name = page_path.stem.rstrip('_app')
|
||||||
|
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name,))
|
||||||
spec = spec_from_file_location(module_name, page_path)
|
|
||||||
page = module_from_spec(spec)
|
spec = spec_from_file_location(module_name, page_path)
|
||||||
page.app = app
|
page = module_from_spec(spec)
|
||||||
page.manager = Blueprint(page_name, module_name)
|
page.app = app
|
||||||
sys.modules[module_name] = page
|
page.manager = Blueprint(page_name, module_name)
|
||||||
spec.loader.exec_module(page)
|
sys.modules[module_name] = page
|
||||||
page_name = getattr(page, 'page_name', page_name)
|
spec.loader.exec_module(page)
|
||||||
url_prefix = f'/api/{API_VERSION}/{page_name}' if "_api" in path else f'/{API_VERSION}/{page_name}'
|
page_name = getattr(page, 'page_name', page_name)
|
||||||
|
url_prefix = f'/api/{API_VERSION}/{page_name}' if "/sdk/" in path else f'/{API_VERSION}/{page_name}'
|
||||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
|
||||||
return url_prefix
|
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||||
|
return url_prefix
|
||||||
|
|
||||||
pages_dir = [
|
|
||||||
Path(__file__).parent,
|
pages_dir = [
|
||||||
Path(__file__).parent.parent / 'api' / 'apps', # FIXME: ragflow/api/api/apps, can be remove?
|
Path(__file__).parent,
|
||||||
]
|
Path(__file__).parent.parent / 'api' / 'apps',
|
||||||
|
Path(__file__).parent.parent / 'api' / 'apps' / 'sdk',
|
||||||
client_urls_prefix = [
|
]
|
||||||
register_page(path)
|
|
||||||
for dir in pages_dir
|
client_urls_prefix = [
|
||||||
for path in search_pages_path(dir)
|
register_page(path)
|
||||||
]
|
for dir in pages_dir
|
||||||
|
for path in search_pages_path(dir)
|
||||||
|
]
|
||||||
@login_manager.request_loader
|
|
||||||
def load_user(web_request):
|
|
||||||
jwt = Serializer(secret_key=SECRET_KEY)
|
@login_manager.request_loader
|
||||||
authorization = web_request.headers.get("Authorization")
|
def load_user(web_request):
|
||||||
if authorization:
|
jwt = Serializer(secret_key=SECRET_KEY)
|
||||||
try:
|
authorization = web_request.headers.get("Authorization")
|
||||||
access_token = str(jwt.loads(authorization))
|
if authorization:
|
||||||
user = UserService.query(access_token=access_token, status=StatusEnum.VALID.value)
|
try:
|
||||||
if user:
|
access_token = str(jwt.loads(authorization))
|
||||||
return user[0]
|
user = UserService.query(access_token=access_token, status=StatusEnum.VALID.value)
|
||||||
else:
|
if user:
|
||||||
return None
|
return user[0]
|
||||||
except Exception as e:
|
else:
|
||||||
stat_logger.exception(e)
|
return None
|
||||||
return None
|
except Exception as e:
|
||||||
else:
|
stat_logger.exception(e)
|
||||||
return None
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
@app.teardown_request
|
|
||||||
def _db_close(exc):
|
|
||||||
|
@app.teardown_request
|
||||||
|
def _db_close(exc):
|
||||||
close_connection()
|
close_connection()
|
||||||
1369
api/apps/api_app.py
1369
api/apps/api_app.py
File diff suppressed because it is too large
Load Diff
@ -15,15 +15,14 @@
|
|||||||
#
|
#
|
||||||
import json
|
import json
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
from flask import request, Response
|
from flask import request, Response
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
from api.db.db_models import UserCanvas
|
|
||||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||||
|
from api.settings import RetCode
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request
|
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||||
from graph.canvas import Canvas
|
from agent.canvas import Canvas
|
||||||
|
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/templates', methods=['GET'])
|
@manager.route('/templates', methods=['GET'])
|
||||||
@ -45,6 +44,10 @@ def canvas_list():
|
|||||||
@login_required
|
@login_required
|
||||||
def rm():
|
def rm():
|
||||||
for i in request.json["canvas_ids"]:
|
for i in request.json["canvas_ids"]:
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id,id=i):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
UserCanvasService.delete_by_id(i)
|
UserCanvasService.delete_by_id(i)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
@ -63,10 +66,13 @@ def save():
|
|||||||
return server_error_response(ValueError("Duplicated title."))
|
return server_error_response(ValueError("Duplicated title."))
|
||||||
req["id"] = get_uuid()
|
req["id"] = get_uuid()
|
||||||
if not UserCanvasService.save(**req):
|
if not UserCanvasService.save(**req):
|
||||||
return server_error_response("Fail to save canvas.")
|
return get_data_error_result(retmsg="Fail to save canvas.")
|
||||||
else:
|
else:
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
UserCanvasService.update_by_id(req["id"], req)
|
UserCanvasService.update_by_id(req["id"], req)
|
||||||
|
|
||||||
return get_json_result(data=req)
|
return get_json_result(data=req)
|
||||||
|
|
||||||
|
|
||||||
@ -75,7 +81,7 @@ def save():
|
|||||||
def get(canvas_id):
|
def get(canvas_id):
|
||||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||||
if not e:
|
if not e:
|
||||||
return server_error_response("canvas not found.")
|
return get_data_error_result(retmsg="canvas not found.")
|
||||||
return get_json_result(data=c.to_dict())
|
return get_json_result(data=c.to_dict())
|
||||||
|
|
||||||
|
|
||||||
@ -87,26 +93,31 @@ def run():
|
|||||||
stream = req.get("stream", True)
|
stream = req.get("stream", True)
|
||||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return server_error_response("canvas not found.")
|
return get_data_error_result(retmsg="canvas not found.")
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
if not isinstance(cvs.dsl, str):
|
if not isinstance(cvs.dsl, str):
|
||||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||||
|
|
||||||
final_ans = {"reference": [], "content": ""}
|
final_ans = {"reference": [], "content": ""}
|
||||||
|
message_id = req.get("message_id", get_uuid())
|
||||||
try:
|
try:
|
||||||
canvas = Canvas(cvs.dsl, current_user.id)
|
canvas = Canvas(cvs.dsl, current_user.id)
|
||||||
if "message" in req:
|
if "message" in req:
|
||||||
canvas.messages.append({"role": "user", "content": req["message"]})
|
canvas.messages.append({"role": "user", "content": req["message"], "id": message_id})
|
||||||
canvas.add_user_input(req["message"])
|
canvas.add_user_input(req["message"])
|
||||||
answer = canvas.run(stream=stream)
|
answer = canvas.run(stream=stream)
|
||||||
print(canvas)
|
print(canvas)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
assert answer, "Nothing. Is it over?"
|
assert answer is not None, "Nothing. Is it over?"
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
assert isinstance(answer, partial)
|
assert isinstance(answer, partial), "Nothing. Is it over?"
|
||||||
|
|
||||||
def sse():
|
def sse():
|
||||||
nonlocal answer, cvs
|
nonlocal answer, cvs
|
||||||
@ -117,7 +128,7 @@ def run():
|
|||||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"]})
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
if final_ans.get("reference"):
|
if final_ans.get("reference"):
|
||||||
canvas.reference.append(final_ans["reference"])
|
canvas.reference.append(final_ans["reference"])
|
||||||
cvs.dsl = json.loads(str(canvas))
|
cvs.dsl = json.loads(str(canvas))
|
||||||
@ -135,12 +146,13 @@ def run():
|
|||||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"]})
|
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||||
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
if final_ans.get("reference"):
|
if final_ans.get("reference"):
|
||||||
canvas.reference.append(final_ans["reference"])
|
canvas.reference.append(final_ans["reference"])
|
||||||
cvs.dsl = json.loads(str(canvas))
|
cvs.dsl = json.loads(str(canvas))
|
||||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||||
return get_json_result(data=req["dsl"])
|
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/reset', methods=['POST'])
|
@manager.route('/reset', methods=['POST'])
|
||||||
@ -151,7 +163,11 @@ def reset():
|
|||||||
try:
|
try:
|
||||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return server_error_response("canvas not found.")
|
return get_data_error_result(retmsg="canvas not found.")
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||||
canvas.reset()
|
canvas.reset()
|
||||||
@ -160,3 +176,22 @@ def reset():
|
|||||||
return get_json_result(data=req["dsl"])
|
return get_json_result(data=req["dsl"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/test_db_connect', methods=['POST'])
|
||||||
|
@validate_request("db_type", "database", "username", "host", "port", "password")
|
||||||
|
@login_required
|
||||||
|
def test_db_connect():
|
||||||
|
req = request.json
|
||||||
|
try:
|
||||||
|
if req["db_type"] in ["mysql", "mariadb"]:
|
||||||
|
db = MySQLDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||||
|
password=req["password"])
|
||||||
|
elif req["db_type"] == 'postgresql':
|
||||||
|
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||||
|
password=req["password"])
|
||||||
|
db.connect()
|
||||||
|
db.close()
|
||||||
|
return get_json_result(data="Database Connection Successful!")
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|||||||
@ -1,287 +1,331 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import datetime
|
import datetime
|
||||||
|
import json
|
||||||
from flask import request
|
import traceback
|
||||||
from flask_login import login_required, current_user
|
|
||||||
from elasticsearch_dsl import Q
|
from flask import request
|
||||||
|
from flask_login import login_required, current_user
|
||||||
from rag.app.qa import rmPrefix, beAdoc
|
from elasticsearch_dsl import Q
|
||||||
from rag.nlp import search, rag_tokenizer, keyword_extraction
|
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
from rag.app.qa import rmPrefix, beAdoc
|
||||||
from rag.utils import rmSpace
|
from rag.nlp import search, rag_tokenizer, keyword_extraction
|
||||||
from api.db import LLMType, ParserType
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from rag.utils import rmSpace
|
||||||
from api.db.services.llm_service import TenantLLMService
|
from api.db import LLMType, ParserType
|
||||||
from api.db.services.user_service import UserTenantService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.db.services.llm_service import TenantLLMService
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.settings import RetCode, retrievaler
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.utils.api_utils import get_json_result
|
from api.db.services.document_service import DocumentService
|
||||||
import hashlib
|
from api.settings import RetCode, retrievaler, kg_retrievaler
|
||||||
import re
|
from api.utils.api_utils import get_json_result
|
||||||
|
import hashlib
|
||||||
|
import re
|
||||||
@manager.route('/list', methods=['POST'])
|
|
||||||
@login_required
|
|
||||||
@validate_request("doc_id")
|
@manager.route('/list', methods=['POST'])
|
||||||
def list_chunk():
|
@login_required
|
||||||
req = request.json
|
@validate_request("doc_id")
|
||||||
doc_id = req["doc_id"]
|
def list_chunk():
|
||||||
page = int(req.get("page", 1))
|
req = request.json
|
||||||
size = int(req.get("size", 30))
|
doc_id = req["doc_id"]
|
||||||
question = req.get("keywords", "")
|
page = int(req.get("page", 1))
|
||||||
try:
|
size = int(req.get("size", 30))
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
question = req.get("keywords", "")
|
||||||
if not tenant_id:
|
try:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
if not tenant_id:
|
||||||
if not e:
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
query = {
|
if not e:
|
||||||
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
}
|
query = {
|
||||||
if "available_int" in req:
|
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||||
query["available_int"] = int(req["available_int"])
|
}
|
||||||
sres = retrievaler.search(query, search.index_name(tenant_id))
|
if "available_int" in req:
|
||||||
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
query["available_int"] = int(req["available_int"])
|
||||||
for id in sres.ids:
|
sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
|
||||||
d = {
|
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||||
"chunk_id": id,
|
for id in sres.ids:
|
||||||
"content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[id].get(
|
d = {
|
||||||
"content_with_weight", ""),
|
"chunk_id": id,
|
||||||
"doc_id": sres.field[id]["doc_id"],
|
"content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[
|
||||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
id].get(
|
||||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
"content_with_weight", ""),
|
||||||
"img_id": sres.field[id].get("img_id", ""),
|
"doc_id": sres.field[id]["doc_id"],
|
||||||
"available_int": sres.field[id].get("available_int", 1),
|
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||||
"positions": sres.field[id].get("position_int", "").split("\t")
|
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||||
}
|
"img_id": sres.field[id].get("img_id", ""),
|
||||||
if len(d["positions"]) % 5 == 0:
|
"available_int": sres.field[id].get("available_int", 1),
|
||||||
poss = []
|
"positions": sres.field[id].get("position_int", "").split("\t")
|
||||||
for i in range(0, len(d["positions"]), 5):
|
}
|
||||||
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
if len(d["positions"]) % 5 == 0:
|
||||||
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
poss = []
|
||||||
d["positions"] = poss
|
for i in range(0, len(d["positions"]), 5):
|
||||||
res["chunks"].append(d)
|
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
||||||
return get_json_result(data=res)
|
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
||||||
except Exception as e:
|
d["positions"] = poss
|
||||||
if str(e).find("not_found") > 0:
|
res["chunks"].append(d)
|
||||||
return get_json_result(data=False, retmsg=f'No chunk found!',
|
return get_json_result(data=res)
|
||||||
retcode=RetCode.DATA_ERROR)
|
except Exception as e:
|
||||||
return server_error_response(e)
|
if str(e).find("not_found") > 0:
|
||||||
|
return get_json_result(data=False, retmsg=f'No chunk found!',
|
||||||
|
retcode=RetCode.DATA_ERROR)
|
||||||
@manager.route('/get', methods=['GET'])
|
return server_error_response(e)
|
||||||
@login_required
|
|
||||||
def get():
|
|
||||||
chunk_id = request.args["chunk_id"]
|
@manager.route('/get', methods=['GET'])
|
||||||
try:
|
@login_required
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
def get():
|
||||||
if not tenants:
|
chunk_id = request.args["chunk_id"]
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
try:
|
||||||
res = ELASTICSEARCH.get(
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
chunk_id, search.index_name(
|
if not tenants:
|
||||||
tenants[0].tenant_id))
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
if not res.get("found"):
|
res = ELASTICSEARCH.get(
|
||||||
return server_error_response("Chunk not found")
|
chunk_id, search.index_name(
|
||||||
id = res["_id"]
|
tenants[0].tenant_id))
|
||||||
res = res["_source"]
|
if not res.get("found"):
|
||||||
res["chunk_id"] = id
|
return server_error_response("Chunk not found")
|
||||||
k = []
|
id = res["_id"]
|
||||||
for n in res.keys():
|
res = res["_source"]
|
||||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
res["chunk_id"] = id
|
||||||
k.append(n)
|
k = []
|
||||||
for n in k:
|
for n in res.keys():
|
||||||
del res[n]
|
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||||
|
k.append(n)
|
||||||
return get_json_result(data=res)
|
for n in k:
|
||||||
except Exception as e:
|
del res[n]
|
||||||
if str(e).find("NotFoundError") >= 0:
|
|
||||||
return get_json_result(data=False, retmsg=f'Chunk not found!',
|
return get_json_result(data=res)
|
||||||
retcode=RetCode.DATA_ERROR)
|
except Exception as e:
|
||||||
return server_error_response(e)
|
if str(e).find("NotFoundError") >= 0:
|
||||||
|
return get_json_result(data=False, retmsg=f'Chunk not found!',
|
||||||
|
retcode=RetCode.DATA_ERROR)
|
||||||
@manager.route('/set', methods=['POST'])
|
return server_error_response(e)
|
||||||
@login_required
|
|
||||||
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
|
||||||
"important_kwd")
|
@manager.route('/set', methods=['POST'])
|
||||||
def set():
|
@login_required
|
||||||
req = request.json
|
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
||||||
d = {
|
"important_kwd")
|
||||||
"id": req["chunk_id"],
|
def set():
|
||||||
"content_with_weight": req["content_with_weight"]}
|
req = request.json
|
||||||
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
d = {
|
||||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
"id": req["chunk_id"],
|
||||||
d["important_kwd"] = req["important_kwd"]
|
"content_with_weight": req["content_with_weight"]}
|
||||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
||||||
if "available_int" in req:
|
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||||
d["available_int"] = req["available_int"]
|
d["important_kwd"] = req["important_kwd"]
|
||||||
|
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||||
try:
|
if "available_int" in req:
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
d["available_int"] = req["available_int"]
|
||||||
if not tenant_id:
|
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
try:
|
||||||
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
if not tenant_id:
|
||||||
embd_mdl = TenantLLMService.model_instance(
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
|
||||||
|
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
embd_mdl = TenantLLMService.model_instance(
|
||||||
if not e:
|
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
|
||||||
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if doc.parser_id == ParserType.QA:
|
if not e:
|
||||||
arr = [
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
t for t in re.split(
|
|
||||||
r"[\n\t]",
|
if doc.parser_id == ParserType.QA:
|
||||||
req["content_with_weight"]) if len(t) > 1]
|
arr = [
|
||||||
if len(arr) != 2:
|
t for t in re.split(
|
||||||
return get_data_error_result(
|
r"[\n\t]",
|
||||||
retmsg="Q&A must be separated by TAB/ENTER key.")
|
req["content_with_weight"]) if len(t) > 1]
|
||||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
if len(arr) != 2:
|
||||||
d = beAdoc(d, arr[0], arr[1], not any(
|
return get_data_error_result(
|
||||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
retmsg="Q&A must be separated by TAB/ENTER key.")
|
||||||
|
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
||||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
d = beAdoc(d, arr[0], arr[1], not any(
|
||||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||||
d["q_%d_vec" % len(v)] = v.tolist()
|
|
||||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||||
return get_json_result(data=True)
|
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||||
except Exception as e:
|
d["q_%d_vec" % len(v)] = v.tolist()
|
||||||
return server_error_response(e)
|
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
@manager.route('/switch', methods=['POST'])
|
return server_error_response(e)
|
||||||
@login_required
|
|
||||||
@validate_request("chunk_ids", "available_int", "doc_id")
|
|
||||||
def switch():
|
@manager.route('/switch', methods=['POST'])
|
||||||
req = request.json
|
@login_required
|
||||||
try:
|
@validate_request("chunk_ids", "available_int", "doc_id")
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
def switch():
|
||||||
if not tenant_id:
|
req = request.json
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
try:
|
||||||
if not ELASTICSEARCH.upsert([{"id": i, "available_int": int(req["available_int"])} for i in req["chunk_ids"]],
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
search.index_name(tenant_id)):
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Index updating failure")
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
return get_json_result(data=True)
|
if not ELASTICSEARCH.upsert([{"id": i, "available_int": int(req["available_int"])} for i in req["chunk_ids"]],
|
||||||
except Exception as e:
|
search.index_name(tenant_id)):
|
||||||
return server_error_response(e)
|
return get_data_error_result(retmsg="Index updating failure")
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
@manager.route('/rm', methods=['POST'])
|
return server_error_response(e)
|
||||||
@login_required
|
|
||||||
@validate_request("chunk_ids")
|
|
||||||
def rm():
|
@manager.route('/rm', methods=['POST'])
|
||||||
req = request.json
|
@login_required
|
||||||
try:
|
@validate_request("chunk_ids", "doc_id")
|
||||||
if not ELASTICSEARCH.deleteByQuery(
|
def rm():
|
||||||
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
req = request.json
|
||||||
return get_data_error_result(retmsg="Index updating failure")
|
try:
|
||||||
return get_json_result(data=True)
|
if not ELASTICSEARCH.deleteByQuery(
|
||||||
except Exception as e:
|
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
||||||
return server_error_response(e)
|
return get_data_error_result(retmsg="Index updating failure")
|
||||||
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
|
if not e:
|
||||||
@manager.route('/create', methods=['POST'])
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
@login_required
|
deleted_chunk_ids = req["chunk_ids"]
|
||||||
@validate_request("doc_id", "content_with_weight")
|
chunk_number = len(deleted_chunk_ids)
|
||||||
def create():
|
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||||
req = request.json
|
return get_json_result(data=True)
|
||||||
md5 = hashlib.md5()
|
except Exception as e:
|
||||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
return server_error_response(e)
|
||||||
chunck_id = md5.hexdigest()
|
|
||||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
|
||||||
"content_with_weight": req["content_with_weight"]}
|
@manager.route('/create', methods=['POST'])
|
||||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
@login_required
|
||||||
d["important_kwd"] = req.get("important_kwd", [])
|
@validate_request("doc_id", "content_with_weight")
|
||||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
def create():
|
||||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
req = request.json
|
||||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
md5 = hashlib.md5()
|
||||||
|
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
||||||
try:
|
chunck_id = md5.hexdigest()
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||||
if not e:
|
"content_with_weight": req["content_with_weight"]}
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||||
d["kb_id"] = [doc.kb_id]
|
d["important_kwd"] = req.get("important_kwd", [])
|
||||||
d["docnm_kwd"] = doc.name
|
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||||
d["doc_id"] = doc.id
|
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||||
|
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
|
||||||
if not tenant_id:
|
try:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
|
if not e:
|
||||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
embd_mdl = TenantLLMService.model_instance(
|
d["kb_id"] = [doc.kb_id]
|
||||||
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
d["docnm_kwd"] = doc.name
|
||||||
|
d["doc_id"] = doc.id
|
||||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
|
||||||
DocumentService.increment_chunk_num(req["doc_id"], doc.kb_id, c, 1, 0)
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
v = 0.1 * v[0] + 0.9 * v[1]
|
if not tenant_id:
|
||||||
d["q_%d_vec" % len(v)] = v.tolist()
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
|
||||||
|
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||||
DocumentService.increment_chunk_num(
|
embd_mdl = TenantLLMService.model_instance(
|
||||||
doc.id, doc.kb_id, c, 1, 0)
|
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||||
return get_json_result(data={"chunk_id": chunck_id})
|
|
||||||
except Exception as e:
|
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||||
return server_error_response(e)
|
v = 0.1 * v[0] + 0.9 * v[1]
|
||||||
|
d["q_%d_vec" % len(v)] = v.tolist()
|
||||||
|
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||||
@manager.route('/retrieval_test', methods=['POST'])
|
|
||||||
@login_required
|
DocumentService.increment_chunk_num(
|
||||||
@validate_request("kb_id", "question")
|
doc.id, doc.kb_id, c, 1, 0)
|
||||||
def retrieval_test():
|
return get_json_result(data={"chunk_id": chunck_id})
|
||||||
req = request.json
|
except Exception as e:
|
||||||
page = int(req.get("page", 1))
|
return server_error_response(e)
|
||||||
size = int(req.get("size", 30))
|
|
||||||
question = req["question"]
|
|
||||||
kb_id = req["kb_id"]
|
@manager.route('/retrieval_test', methods=['POST'])
|
||||||
doc_ids = req.get("doc_ids", [])
|
@login_required
|
||||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
@validate_request("kb_id", "question")
|
||||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
def retrieval_test():
|
||||||
top = int(req.get("top_k", 1024))
|
req = request.json
|
||||||
try:
|
page = int(req.get("page", 1))
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
size = int(req.get("size", 30))
|
||||||
if not e:
|
question = req["question"]
|
||||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
kb_id = req["kb_id"]
|
||||||
|
if isinstance(kb_id, str): kb_id = [kb_id]
|
||||||
embd_mdl = TenantLLMService.model_instance(
|
doc_ids = req.get("doc_ids", [])
|
||||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
||||||
|
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||||
rerank_mdl = None
|
top = int(req.get("top_k", 1024))
|
||||||
if req.get("rerank_id"):
|
|
||||||
rerank_mdl = TenantLLMService.model_instance(
|
try:
|
||||||
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
|
for kid in kb_id:
|
||||||
if req.get("keyword", False):
|
for tenant in tenants:
|
||||||
chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
|
if KnowledgebaseService.query(
|
||||||
question += keyword_extraction(chat_mdl, question)
|
tenant_id=tenant.tenant_id, id=kid):
|
||||||
|
break
|
||||||
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size,
|
else:
|
||||||
similarity_threshold, vector_similarity_weight, top,
|
return get_json_result(
|
||||||
doc_ids, rerank_mdl=rerank_mdl)
|
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||||
for c in ranks["chunks"]:
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
if "vector" in c:
|
|
||||||
del c["vector"]
|
e, kb = KnowledgebaseService.get_by_id(kb_id[0])
|
||||||
|
if not e:
|
||||||
return get_json_result(data=ranks)
|
return get_data_error_result(retmsg="Knowledgebase not found!")
|
||||||
except Exception as e:
|
|
||||||
if str(e).find("not_found") > 0:
|
embd_mdl = TenantLLMService.model_instance(
|
||||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||||
retcode=RetCode.DATA_ERROR)
|
|
||||||
return server_error_response(e)
|
rerank_mdl = None
|
||||||
|
if req.get("rerank_id"):
|
||||||
|
rerank_mdl = TenantLLMService.model_instance(
|
||||||
|
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||||
|
|
||||||
|
if req.get("keyword", False):
|
||||||
|
chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
|
||||||
|
question += keyword_extraction(chat_mdl, question)
|
||||||
|
|
||||||
|
retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
|
||||||
|
ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_id, page, size,
|
||||||
|
similarity_threshold, vector_similarity_weight, top,
|
||||||
|
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
|
||||||
|
for c in ranks["chunks"]:
|
||||||
|
if "vector" in c:
|
||||||
|
del c["vector"]
|
||||||
|
|
||||||
|
return get_json_result(data=ranks)
|
||||||
|
except Exception as e:
|
||||||
|
if str(e).find("not_found") > 0:
|
||||||
|
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
||||||
|
retcode=RetCode.DATA_ERROR)
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/knowledge_graph', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
def knowledge_graph():
|
||||||
|
doc_id = request.args["doc_id"]
|
||||||
|
req = {
|
||||||
|
"doc_ids":[doc_id],
|
||||||
|
"knowledge_graph_kwd": ["graph", "mind_map"]
|
||||||
|
}
|
||||||
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
|
sres = retrievaler.search(req, search.index_name(tenant_id))
|
||||||
|
obj = {"graph": {}, "mind_map": {}}
|
||||||
|
for id in sres.ids[:2]:
|
||||||
|
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||||
|
try:
|
||||||
|
obj[ty] = json.loads(sres.field[id]["content_with_weight"])
|
||||||
|
except Exception as e:
|
||||||
|
print(traceback.format_exc(), flush=True)
|
||||||
|
|
||||||
|
return get_json_result(data=obj)
|
||||||
|
|
||||||
|
|||||||
@ -1,175 +1,376 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from copy import deepcopy
|
import json
|
||||||
from flask import request, Response
|
import re
|
||||||
from flask_login import login_required
|
import traceback
|
||||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
from copy import deepcopy
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.utils import get_uuid
|
from flask import request, Response
|
||||||
from api.utils.api_utils import get_json_result
|
from flask_login import login_required, current_user
|
||||||
import json
|
|
||||||
|
from api.db import LLMType
|
||||||
|
from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
|
||||||
@manager.route('/set', methods=['POST'])
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
@login_required
|
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
||||||
def set_conversation():
|
from api.settings import RetCode, retrievaler
|
||||||
req = request.json
|
from api.utils import get_uuid
|
||||||
conv_id = req.get("conversation_id")
|
from api.utils.api_utils import get_json_result
|
||||||
if conv_id:
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
del req["conversation_id"]
|
from graphrag.mind_map_extractor import MindMapExtractor
|
||||||
try:
|
|
||||||
if not ConversationService.update_by_id(conv_id, req):
|
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
@manager.route('/set', methods=['POST'])
|
||||||
e, conv = ConversationService.get_by_id(conv_id)
|
@login_required
|
||||||
if not e:
|
def set_conversation():
|
||||||
return get_data_error_result(
|
req = request.json
|
||||||
retmsg="Fail to update a conversation!")
|
conv_id = req.get("conversation_id")
|
||||||
conv = conv.to_dict()
|
if conv_id:
|
||||||
return get_json_result(data=conv)
|
del req["conversation_id"]
|
||||||
except Exception as e:
|
try:
|
||||||
return server_error_response(e)
|
if not ConversationService.update_by_id(conv_id, req):
|
||||||
|
return get_data_error_result(retmsg="Conversation not found!")
|
||||||
try:
|
e, conv = ConversationService.get_by_id(conv_id)
|
||||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
if not e:
|
||||||
if not e:
|
return get_data_error_result(
|
||||||
return get_data_error_result(retmsg="Dialog not found")
|
retmsg="Fail to update a conversation!")
|
||||||
conv = {
|
conv = conv.to_dict()
|
||||||
"id": get_uuid(),
|
return get_json_result(data=conv)
|
||||||
"dialog_id": req["dialog_id"],
|
except Exception as e:
|
||||||
"name": req.get("name", "New conversation"),
|
return server_error_response(e)
|
||||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
|
||||||
}
|
try:
|
||||||
ConversationService.save(**conv)
|
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||||
e, conv = ConversationService.get_by_id(conv["id"])
|
if not e:
|
||||||
if not e:
|
return get_data_error_result(retmsg="Dialog not found")
|
||||||
return get_data_error_result(retmsg="Fail to new a conversation!")
|
conv = {
|
||||||
conv = conv.to_dict()
|
"id": get_uuid(),
|
||||||
return get_json_result(data=conv)
|
"dialog_id": req["dialog_id"],
|
||||||
except Exception as e:
|
"name": req.get("name", "New conversation"),
|
||||||
return server_error_response(e)
|
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||||
|
}
|
||||||
|
ConversationService.save(**conv)
|
||||||
@manager.route('/get', methods=['GET'])
|
e, conv = ConversationService.get_by_id(conv["id"])
|
||||||
@login_required
|
if not e:
|
||||||
def get():
|
return get_data_error_result(retmsg="Fail to new a conversation!")
|
||||||
conv_id = request.args["conversation_id"]
|
conv = conv.to_dict()
|
||||||
try:
|
return get_json_result(data=conv)
|
||||||
e, conv = ConversationService.get_by_id(conv_id)
|
except Exception as e:
|
||||||
if not e:
|
return server_error_response(e)
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
|
||||||
conv = conv.to_dict()
|
|
||||||
return get_json_result(data=conv)
|
@manager.route('/get', methods=['GET'])
|
||||||
except Exception as e:
|
@login_required
|
||||||
return server_error_response(e)
|
def get():
|
||||||
|
conv_id = request.args["conversation_id"]
|
||||||
|
try:
|
||||||
@manager.route('/rm', methods=['POST'])
|
e, conv = ConversationService.get_by_id(conv_id)
|
||||||
@login_required
|
if not e:
|
||||||
def rm():
|
return get_data_error_result(retmsg="Conversation not found!")
|
||||||
conv_ids = request.json["conversation_ids"]
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
try:
|
for tenant in tenants:
|
||||||
for cid in conv_ids:
|
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||||
ConversationService.delete_by_id(cid)
|
break
|
||||||
return get_json_result(data=True)
|
else:
|
||||||
except Exception as e:
|
return get_json_result(
|
||||||
return server_error_response(e)
|
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
conv = conv.to_dict()
|
||||||
@manager.route('/list', methods=['GET'])
|
return get_json_result(data=conv)
|
||||||
@login_required
|
except Exception as e:
|
||||||
def list_convsersation():
|
return server_error_response(e)
|
||||||
dialog_id = request.args["dialog_id"]
|
|
||||||
try:
|
|
||||||
convs = ConversationService.query(
|
@manager.route('/rm', methods=['POST'])
|
||||||
dialog_id=dialog_id,
|
@login_required
|
||||||
order_by=ConversationService.model.create_time,
|
def rm():
|
||||||
reverse=True)
|
conv_ids = request.json["conversation_ids"]
|
||||||
convs = [d.to_dict() for d in convs]
|
try:
|
||||||
return get_json_result(data=convs)
|
for cid in conv_ids:
|
||||||
except Exception as e:
|
exist, conv = ConversationService.get_by_id(cid)
|
||||||
return server_error_response(e)
|
if not exist:
|
||||||
|
return get_data_error_result(retmsg="Conversation not found!")
|
||||||
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
@manager.route('/completion', methods=['POST'])
|
for tenant in tenants:
|
||||||
@login_required
|
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||||
#@validate_request("conversation_id", "messages")
|
break
|
||||||
def completion():
|
else:
|
||||||
req = request.json
|
return get_json_result(
|
||||||
#req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
||||||
# {"role": "user", "content": "上海有吗?"}
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
#]}
|
ConversationService.delete_by_id(cid)
|
||||||
msg = []
|
return get_json_result(data=True)
|
||||||
for m in req["messages"]:
|
except Exception as e:
|
||||||
if m["role"] == "system":
|
return server_error_response(e)
|
||||||
continue
|
|
||||||
if m["role"] == "assistant" and not msg:
|
|
||||||
continue
|
@manager.route('/list', methods=['GET'])
|
||||||
msg.append({"role": m["role"], "content": m["content"]})
|
@login_required
|
||||||
try:
|
def list_convsersation():
|
||||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
dialog_id = request.args["dialog_id"]
|
||||||
if not e:
|
try:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
||||||
conv.message.append(deepcopy(msg[-1]))
|
return get_json_result(
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
||||||
if not e:
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
convs = ConversationService.query(
|
||||||
del req["conversation_id"]
|
dialog_id=dialog_id,
|
||||||
del req["messages"]
|
order_by=ConversationService.model.create_time,
|
||||||
|
reverse=True)
|
||||||
if not conv.reference:
|
convs = [d.to_dict() for d in convs]
|
||||||
conv.reference = []
|
return get_json_result(data=convs)
|
||||||
conv.message.append({"role": "assistant", "content": ""})
|
except Exception as e:
|
||||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
return server_error_response(e)
|
||||||
|
|
||||||
def fillin_conv(ans):
|
|
||||||
nonlocal conv
|
@manager.route('/completion', methods=['POST'])
|
||||||
if not conv.reference:
|
@login_required
|
||||||
conv.reference.append(ans["reference"])
|
@validate_request("conversation_id", "messages")
|
||||||
else: conv.reference[-1] = ans["reference"]
|
def completion():
|
||||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"]}
|
req = request.json
|
||||||
|
# req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
||||||
def stream():
|
# {"role": "user", "content": "上海有吗?"}
|
||||||
nonlocal dia, msg, req, conv
|
# ]}
|
||||||
try:
|
msg = []
|
||||||
for ans in chat(dia, msg, True, **req):
|
for m in req["messages"]:
|
||||||
fillin_conv(ans)
|
if m["role"] == "system":
|
||||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
continue
|
||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
if m["role"] == "assistant" and not msg:
|
||||||
except Exception as e:
|
continue
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
msg.append(m)
|
||||||
"data": {"answer": "**ERROR**: "+str(e), "reference": []}},
|
message_id = msg[-1].get("id")
|
||||||
ensure_ascii=False) + "\n\n"
|
try:
|
||||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
|
if not e:
|
||||||
if req.get("stream", True):
|
return get_data_error_result(retmsg="Conversation not found!")
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
conv.message = deepcopy(req["messages"])
|
||||||
resp.headers.add_header("Cache-control", "no-cache")
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
resp.headers.add_header("Connection", "keep-alive")
|
if not e:
|
||||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
return get_data_error_result(retmsg="Dialog not found!")
|
||||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
del req["conversation_id"]
|
||||||
return resp
|
del req["messages"]
|
||||||
|
|
||||||
else:
|
if not conv.reference:
|
||||||
answer = None
|
conv.reference = []
|
||||||
for ans in chat(dia, msg, **req):
|
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||||
answer = ans
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
fillin_conv(ans)
|
|
||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
def fillin_conv(ans):
|
||||||
break
|
nonlocal conv, message_id
|
||||||
return get_json_result(data=answer)
|
if not conv.reference:
|
||||||
except Exception as e:
|
conv.reference.append(ans["reference"])
|
||||||
return server_error_response(e)
|
else:
|
||||||
|
conv.reference[-1] = ans["reference"]
|
||||||
|
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||||
|
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||||
|
ans["id"] = message_id
|
||||||
|
|
||||||
|
def stream():
|
||||||
|
nonlocal dia, msg, req, conv
|
||||||
|
try:
|
||||||
|
for ans in chat(dia, msg, True, **req):
|
||||||
|
fillin_conv(ans)
|
||||||
|
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
|
except Exception as e:
|
||||||
|
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||||
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
|
if req.get("stream", True):
|
||||||
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
|
return resp
|
||||||
|
|
||||||
|
else:
|
||||||
|
answer = None
|
||||||
|
for ans in chat(dia, msg, **req):
|
||||||
|
answer = ans
|
||||||
|
fillin_conv(ans)
|
||||||
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
|
break
|
||||||
|
return get_json_result(data=answer)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/tts', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
def tts():
|
||||||
|
req = request.json
|
||||||
|
text = req["text"]
|
||||||
|
|
||||||
|
tenants = TenantService.get_by_user_id(current_user.id)
|
||||||
|
if not tenants:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
|
|
||||||
|
tts_id = tenants[0]["tts_id"]
|
||||||
|
if not tts_id:
|
||||||
|
return get_data_error_result(retmsg="No default TTS model is set")
|
||||||
|
|
||||||
|
tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
|
||||||
|
|
||||||
|
def stream_audio():
|
||||||
|
try:
|
||||||
|
for chunk in tts_mdl.tts(text):
|
||||||
|
yield chunk
|
||||||
|
except Exception as e:
|
||||||
|
yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||||
|
"data": {"answer": "**ERROR**: " + str(e)}},
|
||||||
|
ensure_ascii=False)).encode('utf-8')
|
||||||
|
|
||||||
|
resp = Response(stream_audio(), mimetype="audio/mpeg")
|
||||||
|
resp.headers.add_header("Cache-Control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/delete_msg', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@validate_request("conversation_id", "message_id")
|
||||||
|
def delete_msg():
|
||||||
|
req = request.json
|
||||||
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Conversation not found!")
|
||||||
|
|
||||||
|
conv = conv.to_dict()
|
||||||
|
for i, msg in enumerate(conv["message"]):
|
||||||
|
if req["message_id"] != msg.get("id", ""):
|
||||||
|
continue
|
||||||
|
assert conv["message"][i + 1]["id"] == req["message_id"]
|
||||||
|
conv["message"].pop(i)
|
||||||
|
conv["message"].pop(i)
|
||||||
|
conv["reference"].pop(max(0, i // 2 - 1))
|
||||||
|
break
|
||||||
|
|
||||||
|
ConversationService.update_by_id(conv["id"], conv)
|
||||||
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/thumbup', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@validate_request("conversation_id", "message_id")
|
||||||
|
def thumbup():
|
||||||
|
req = request.json
|
||||||
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Conversation not found!")
|
||||||
|
up_down = req.get("set")
|
||||||
|
feedback = req.get("feedback", "")
|
||||||
|
conv = conv.to_dict()
|
||||||
|
for i, msg in enumerate(conv["message"]):
|
||||||
|
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
||||||
|
if up_down:
|
||||||
|
msg["thumbup"] = True
|
||||||
|
if "feedback" in msg: del msg["feedback"]
|
||||||
|
else:
|
||||||
|
msg["thumbup"] = False
|
||||||
|
if feedback: msg["feedback"] = feedback
|
||||||
|
break
|
||||||
|
|
||||||
|
ConversationService.update_by_id(conv["id"], conv)
|
||||||
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/ask', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@validate_request("question", "kb_ids")
|
||||||
|
def ask_about():
|
||||||
|
req = request.json
|
||||||
|
uid = current_user.id
|
||||||
|
def stream():
|
||||||
|
nonlocal req, uid
|
||||||
|
try:
|
||||||
|
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||||
|
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
|
except Exception as e:
|
||||||
|
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||||
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/mindmap', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@validate_request("question", "kb_ids")
|
||||||
|
def mindmap():
|
||||||
|
req = request.json
|
||||||
|
kb_ids = req["kb_ids"]
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Knowledgebase not found!")
|
||||||
|
|
||||||
|
embd_mdl = TenantLLMService.model_instance(
|
||||||
|
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||||
|
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||||
|
ranks = retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||||
|
0.3, 0.3, aggs=False)
|
||||||
|
mindmap = MindMapExtractor(chat_mdl)
|
||||||
|
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
||||||
|
if "error" in mind_map:
|
||||||
|
return server_error_response(Exception(mind_map["error"]))
|
||||||
|
return get_json_result(data=mind_map)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/related_questions', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@validate_request("question")
|
||||||
|
def related_questions():
|
||||||
|
req = request.json
|
||||||
|
question = req["question"]
|
||||||
|
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||||
|
prompt = """
|
||||||
|
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
|
||||||
|
Instructions:
|
||||||
|
- Based on the keywords provided by the user, generate 5-10 related search terms.
|
||||||
|
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
|
||||||
|
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
|
||||||
|
- Keep the term length between 2-4 words, concise and clear.
|
||||||
|
- DO NOT translate, use the language of the original keywords.
|
||||||
|
|
||||||
|
### Example:
|
||||||
|
Keywords: Chinese football
|
||||||
|
Related search terms:
|
||||||
|
1. Current status of Chinese football
|
||||||
|
2. Reform of Chinese football
|
||||||
|
3. Youth training of Chinese football
|
||||||
|
4. Chinese football in the Asian Cup
|
||||||
|
5. Chinese football in the World Cup
|
||||||
|
|
||||||
|
Reason:
|
||||||
|
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
|
||||||
|
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
|
||||||
|
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
||||||
|
|
||||||
|
"""
|
||||||
|
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
||||||
|
Keywords: {question}
|
||||||
|
Related search terms:
|
||||||
|
"""}], {"temperature": 0.9})
|
||||||
|
return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||||
|
|||||||
@ -16,15 +16,16 @@ import os
|
|||||||
import pathlib
|
import pathlib
|
||||||
import re
|
import re
|
||||||
import warnings
|
import warnings
|
||||||
|
from functools import partial
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
|
from elasticsearch_dsl import Q
|
||||||
from flask import request, send_file
|
from flask import request, send_file
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from httpx import HTTPError
|
from httpx import HTTPError
|
||||||
from minio import S3Error
|
|
||||||
|
|
||||||
from api.contants import NAME_LENGTH_LIMIT
|
from api.contants import NAME_LENGTH_LIMIT
|
||||||
from api.db import FileType, ParserType, FileSource
|
from api.db import FileType, ParserType, FileSource, TaskStatus
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
from api.db.db_models import File
|
from api.db.db_models import File
|
||||||
from api.db.services import duplicate_name
|
from api.db.services import duplicate_name
|
||||||
@ -38,10 +39,14 @@ from api.utils import get_uuid
|
|||||||
from api.utils.api_utils import construct_json_result, construct_error_response
|
from api.utils.api_utils import construct_json_result, construct_error_response
|
||||||
from api.utils.api_utils import construct_result, validate_request
|
from api.utils.api_utils import construct_result, validate_request
|
||||||
from api.utils.file_utils import filename_type, thumbnail
|
from api.utils.file_utils import filename_type, thumbnail
|
||||||
from rag.utils.minio_conn import MINIO
|
from rag.app import book, laws, manual, naive, one, paper, presentation, qa, resume, table, picture, audio, email
|
||||||
|
from rag.nlp import search
|
||||||
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------ create a dataset ---------------------------------------
|
# ------------------------------ create a dataset ---------------------------------------
|
||||||
|
|
||||||
@manager.route("/", methods=["POST"])
|
@manager.route("/", methods=["POST"])
|
||||||
@ -116,6 +121,7 @@ def create_dataset():
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return construct_error_response(e)
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------list datasets-------------------------------------------------------
|
# -----------------------------list datasets-------------------------------------------------------
|
||||||
|
|
||||||
@manager.route("/", methods=["GET"])
|
@manager.route("/", methods=["GET"])
|
||||||
@ -135,6 +141,7 @@ def list_datasets():
|
|||||||
except HTTPError as http_err:
|
except HTTPError as http_err:
|
||||||
return construct_json_result(http_err)
|
return construct_json_result(http_err)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------delete a dataset ----------------------------
|
# ---------------------------------delete a dataset ----------------------------
|
||||||
|
|
||||||
@manager.route("/<dataset_id>", methods=["DELETE"])
|
@manager.route("/<dataset_id>", methods=["DELETE"])
|
||||||
@ -162,13 +169,15 @@ def remove_dataset(dataset_id):
|
|||||||
|
|
||||||
# delete the dataset
|
# delete the dataset
|
||||||
if not KnowledgebaseService.delete_by_id(dataset_id):
|
if not KnowledgebaseService.delete_by_id(dataset_id):
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message="There was an error during the dataset removal process. "
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
"Please check the status of the RAGFlow server and try the removal again.")
|
message="There was an error during the dataset removal process. "
|
||||||
|
"Please check the status of the RAGFlow server and try the removal again.")
|
||||||
# success
|
# success
|
||||||
return construct_json_result(code=RetCode.SUCCESS, message=f"Remove dataset: {dataset_id} successfully")
|
return construct_json_result(code=RetCode.SUCCESS, message=f"Remove dataset: {dataset_id} successfully")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return construct_error_response(e)
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------ get details of a dataset ----------------------------------------
|
# ------------------------------ get details of a dataset ----------------------------------------
|
||||||
|
|
||||||
@manager.route("/<dataset_id>", methods=["GET"])
|
@manager.route("/<dataset_id>", methods=["GET"])
|
||||||
@ -182,6 +191,7 @@ def get_dataset(dataset_id):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return construct_json_result(e)
|
return construct_json_result(e)
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------ update a dataset --------------------------------------------
|
# ------------------------------ update a dataset --------------------------------------------
|
||||||
|
|
||||||
@manager.route("/<dataset_id>", methods=["PUT"])
|
@manager.route("/<dataset_id>", methods=["PUT"])
|
||||||
@ -209,8 +219,9 @@ def update_dataset(dataset_id):
|
|||||||
if name.lower() != dataset.name.lower() \
|
if name.lower() != dataset.name.lower() \
|
||||||
and len(KnowledgebaseService.query(name=name, tenant_id=current_user.id,
|
and len(KnowledgebaseService.query(name=name, tenant_id=current_user.id,
|
||||||
status=StatusEnum.VALID.value)) > 1:
|
status=StatusEnum.VALID.value)) > 1:
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"The name: {name.lower()} is already used by other "
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
f"datasets. Please choose a different name.")
|
message=f"The name: {name.lower()} is already used by other "
|
||||||
|
f"datasets. Please choose a different name.")
|
||||||
|
|
||||||
dataset_updating_data = {}
|
dataset_updating_data = {}
|
||||||
chunk_num = req.get("chunk_num")
|
chunk_num = req.get("chunk_num")
|
||||||
@ -222,17 +233,22 @@ def update_dataset(dataset_id):
|
|||||||
if chunk_num == 0:
|
if chunk_num == 0:
|
||||||
dataset_updating_data["embd_id"] = req["embedding_model_id"]
|
dataset_updating_data["embd_id"] = req["embedding_model_id"]
|
||||||
else:
|
else:
|
||||||
construct_json_result(code=RetCode.DATA_ERROR, message="You have already parsed the document in this "
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
"dataset, so you cannot change the embedding "
|
message="You have already parsed the document in this "
|
||||||
"model.")
|
"dataset, so you cannot change the embedding "
|
||||||
|
"model.")
|
||||||
# only if chunk_num is 0, the user can update the chunk_method
|
# only if chunk_num is 0, the user can update the chunk_method
|
||||||
if req.get("chunk_method"):
|
if "chunk_method" in req:
|
||||||
if chunk_num == 0:
|
type_value = req["chunk_method"]
|
||||||
dataset_updating_data['parser_id'] = req["chunk_method"]
|
if is_illegal_value_for_enum(type_value, ParserType):
|
||||||
else:
|
return construct_json_result(message=f"Illegal value {type_value} for 'chunk_method' field.",
|
||||||
|
code=RetCode.DATA_ERROR)
|
||||||
|
if chunk_num != 0:
|
||||||
construct_json_result(code=RetCode.DATA_ERROR, message="You have already parsed the document "
|
construct_json_result(code=RetCode.DATA_ERROR, message="You have already parsed the document "
|
||||||
"in this dataset, so you cannot "
|
"in this dataset, so you cannot "
|
||||||
"change the chunk method.")
|
"change the chunk method.")
|
||||||
|
dataset_updating_data["parser_id"] = req["template_type"]
|
||||||
|
|
||||||
# convert the photo parameter to avatar
|
# convert the photo parameter to avatar
|
||||||
if req.get("photo"):
|
if req.get("photo"):
|
||||||
dataset_updating_data["avatar"] = req["photo"]
|
dataset_updating_data["avatar"] = req["photo"]
|
||||||
@ -265,6 +281,7 @@ def update_dataset(dataset_id):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return construct_error_response(e)
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------content management ----------------------------------------------
|
# --------------------------------content management ----------------------------------------------
|
||||||
|
|
||||||
# ----------------------------upload files-----------------------------------------------------
|
# ----------------------------upload files-----------------------------------------------------
|
||||||
@ -335,15 +352,16 @@ def upload_documents(dataset_id):
|
|||||||
|
|
||||||
# upload to the minio
|
# upload to the minio
|
||||||
location = filename
|
location = filename
|
||||||
while MINIO.obj_exist(dataset_id, location):
|
while STORAGE_IMPL.obj_exist(dataset_id, location):
|
||||||
location += "_"
|
location += "_"
|
||||||
|
|
||||||
blob = file.read()
|
blob = file.read()
|
||||||
|
|
||||||
# the content is empty, raising a warning
|
# the content is empty, raising a warning
|
||||||
if blob == b'':
|
if blob == b'':
|
||||||
warnings.warn(f"[WARNING]: The file {filename} is empty.")
|
warnings.warn(f"[WARNING]: The content of the file {filename} is empty.")
|
||||||
|
|
||||||
MINIO.put(dataset_id, location, blob)
|
STORAGE_IMPL.put(dataset_id, location, blob)
|
||||||
|
|
||||||
doc = {
|
doc = {
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
@ -359,6 +377,8 @@ def upload_documents(dataset_id):
|
|||||||
}
|
}
|
||||||
if doc["type"] == FileType.VISUAL:
|
if doc["type"] == FileType.VISUAL:
|
||||||
doc["parser_id"] = ParserType.PICTURE.value
|
doc["parser_id"] = ParserType.PICTURE.value
|
||||||
|
if doc["type"] == FileType.AURAL:
|
||||||
|
doc["parser_id"] = ParserType.AUDIO.value
|
||||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||||
DocumentService.insert(doc)
|
DocumentService.insert(doc)
|
||||||
@ -421,7 +441,7 @@ def delete_document(document_id, dataset_id): # string
|
|||||||
File2DocumentService.delete_by_document_id(document_id)
|
File2DocumentService.delete_by_document_id(document_id)
|
||||||
|
|
||||||
# delete it from minio
|
# delete it from minio
|
||||||
MINIO.rm(dataset_id, location)
|
STORAGE_IMPL.rm(dataset_id, location)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
errors += str(e)
|
errors += str(e)
|
||||||
if errors:
|
if errors:
|
||||||
@ -453,6 +473,7 @@ def list_documents(dataset_id):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return construct_error_response(e)
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------update: enable rename-----------------------------------------------------
|
# ----------------------------update: enable rename-----------------------------------------------------
|
||||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["PUT"])
|
@manager.route("/<dataset_id>/documents/<document_id>", methods=["PUT"])
|
||||||
@login_required
|
@login_required
|
||||||
@ -555,6 +576,7 @@ def update_document(dataset_id, document_id):
|
|||||||
def is_illegal_value_for_enum(value, enum_class):
|
def is_illegal_value_for_enum(value, enum_class):
|
||||||
return value not in enum_class.__members__.values()
|
return value not in enum_class.__members__.values()
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------download a file-----------------------------------------------------
|
# ----------------------------download a file-----------------------------------------------------
|
||||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["GET"])
|
@manager.route("/<dataset_id>/documents/<document_id>", methods=["GET"])
|
||||||
@login_required
|
@login_required
|
||||||
@ -563,7 +585,8 @@ def download_document(dataset_id, document_id):
|
|||||||
# Check whether there is this dataset
|
# Check whether there is this dataset
|
||||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||||
if not exist:
|
if not exist:
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"This dataset '{dataset_id}' cannot be found!")
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
|
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||||
|
|
||||||
# Check whether there is this document
|
# Check whether there is this document
|
||||||
exist, document = DocumentService.get_by_id(document_id)
|
exist, document = DocumentService.get_by_id(document_id)
|
||||||
@ -573,7 +596,7 @@ def download_document(dataset_id, document_id):
|
|||||||
|
|
||||||
# The process of downloading
|
# The process of downloading
|
||||||
doc_id, doc_location = File2DocumentService.get_minio_address(doc_id=document_id) # minio address
|
doc_id, doc_location = File2DocumentService.get_minio_address(doc_id=document_id) # minio address
|
||||||
file_stream = MINIO.get(doc_id, doc_location)
|
file_stream = STORAGE_IMPL.get(doc_id, doc_location)
|
||||||
if not file_stream:
|
if not file_stream:
|
||||||
return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
|
return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
|
||||||
|
|
||||||
@ -591,11 +614,254 @@ def download_document(dataset_id, document_id):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return construct_error_response(e)
|
return construct_error_response(e)
|
||||||
|
|
||||||
# ----------------------------start parsing-----------------------------------------------------
|
|
||||||
|
|
||||||
# ----------------------------stop parsing-----------------------------------------------------
|
# ----------------------------start parsing a document-----------------------------------------------------
|
||||||
|
# helper method for parsing
|
||||||
|
# callback method
|
||||||
|
def doc_parse_callback(doc_id, prog=None, msg=""):
|
||||||
|
cancel = DocumentService.do_cancel(doc_id)
|
||||||
|
if cancel:
|
||||||
|
raise Exception("The parsing process has been cancelled!")
|
||||||
|
|
||||||
|
"""
|
||||||
|
def doc_parse(binary, doc_name, parser_name, tenant_id, doc_id):
|
||||||
|
match parser_name:
|
||||||
|
case "book":
|
||||||
|
book.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "laws":
|
||||||
|
laws.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "manual":
|
||||||
|
manual.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "naive":
|
||||||
|
# It's the mode by default, which is general in the front-end
|
||||||
|
naive.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "one":
|
||||||
|
one.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "paper":
|
||||||
|
paper.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "picture":
|
||||||
|
picture.chunk(doc_name, binary=binary, tenant_id=tenant_id, lang="Chinese",
|
||||||
|
callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "presentation":
|
||||||
|
presentation.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "qa":
|
||||||
|
qa.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "resume":
|
||||||
|
resume.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "table":
|
||||||
|
table.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "audio":
|
||||||
|
audio.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case "email":
|
||||||
|
email.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||||
|
case _:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/<dataset_id>/documents/<document_id>/status", methods=["POST"])
|
||||||
|
@login_required
|
||||||
|
def parse_document(dataset_id, document_id):
|
||||||
|
try:
|
||||||
|
# valid dataset
|
||||||
|
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
|
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||||
|
|
||||||
|
return parsing_document_internal(document_id)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------------start parsing documents-----------------------------------------------------
|
||||||
|
@manager.route("/<dataset_id>/documents/status", methods=["POST"])
|
||||||
|
@login_required
|
||||||
|
def parse_documents(dataset_id):
|
||||||
|
doc_ids = request.json["doc_ids"]
|
||||||
|
try:
|
||||||
|
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
|
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||||
|
# two conditions
|
||||||
|
if not doc_ids:
|
||||||
|
# documents inside the dataset
|
||||||
|
docs, total = DocumentService.list_documents_in_dataset(dataset_id, 0, -1, "create_time",
|
||||||
|
True, "")
|
||||||
|
doc_ids = [doc["id"] for doc in docs]
|
||||||
|
|
||||||
|
message = ""
|
||||||
|
# for loop
|
||||||
|
for id in doc_ids:
|
||||||
|
res = parsing_document_internal(id)
|
||||||
|
res_body = res.json
|
||||||
|
if res_body["code"] == RetCode.SUCCESS:
|
||||||
|
message += res_body["message"]
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
return construct_json_result(data=True, code=RetCode.SUCCESS, message=message)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
# helper method for parsing the document
|
||||||
|
def parsing_document_internal(id):
|
||||||
|
message = ""
|
||||||
|
try:
|
||||||
|
# Check whether there is this document
|
||||||
|
exist, document = DocumentService.get_by_id(id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(message=f"This document '{id}' cannot be found!",
|
||||||
|
code=RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
tenant_id = DocumentService.get_tenant_id(id)
|
||||||
|
if not tenant_id:
|
||||||
|
return construct_json_result(message="Tenant not found!", code=RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
|
info = {"run": "1", "progress": 0}
|
||||||
|
info["progress_msg"] = ""
|
||||||
|
info["chunk_num"] = 0
|
||||||
|
info["token_num"] = 0
|
||||||
|
|
||||||
|
DocumentService.update_by_id(id, info)
|
||||||
|
|
||||||
|
ELASTICSEARCH.deleteByQuery(Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||||
|
|
||||||
|
_, doc_attributes = DocumentService.get_by_id(id)
|
||||||
|
doc_attributes = doc_attributes.to_dict()
|
||||||
|
doc_id = doc_attributes["id"]
|
||||||
|
|
||||||
|
bucket, doc_name = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||||
|
binary = STORAGE_IMPL.get(bucket, doc_name)
|
||||||
|
parser_name = doc_attributes["parser_id"]
|
||||||
|
if binary:
|
||||||
|
res = doc_parse(binary, doc_name, parser_name, tenant_id, doc_id)
|
||||||
|
if res is False:
|
||||||
|
message += f"The parser id: {parser_name} of the document {doc_id} is not supported; "
|
||||||
|
else:
|
||||||
|
message += f"Empty data in the document: {doc_name}; "
|
||||||
|
# failed in parsing
|
||||||
|
if doc_attributes["status"] == TaskStatus.FAIL.value:
|
||||||
|
message += f"Failed in parsing the document: {doc_id}; "
|
||||||
|
return construct_json_result(code=RetCode.SUCCESS, message=message)
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------------stop parsing a doc-----------------------------------------------------
|
||||||
|
@manager.route("<dataset_id>/documents/<document_id>/status", methods=["DELETE"])
|
||||||
|
@login_required
|
||||||
|
def stop_parsing_document(dataset_id, document_id):
|
||||||
|
try:
|
||||||
|
# valid dataset
|
||||||
|
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
|
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||||
|
|
||||||
|
return stop_parsing_document_internal(document_id)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
# ----------------------------stop parsing docs-----------------------------------------------------
|
||||||
|
@manager.route("<dataset_id>/documents/status", methods=["DELETE"])
|
||||||
|
@login_required
|
||||||
|
def stop_parsing_documents(dataset_id):
|
||||||
|
doc_ids = request.json["doc_ids"]
|
||||||
|
try:
|
||||||
|
# valid dataset?
|
||||||
|
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
|
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||||
|
if not doc_ids:
|
||||||
|
# documents inside the dataset
|
||||||
|
docs, total = DocumentService.list_documents_in_dataset(dataset_id, 0, -1, "create_time",
|
||||||
|
True, "")
|
||||||
|
doc_ids = [doc["id"] for doc in docs]
|
||||||
|
|
||||||
|
message = ""
|
||||||
|
# for loop
|
||||||
|
for id in doc_ids:
|
||||||
|
res = stop_parsing_document_internal(id)
|
||||||
|
res_body = res.json
|
||||||
|
if res_body["code"] == RetCode.SUCCESS:
|
||||||
|
message += res_body["message"]
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
return construct_json_result(data=True, code=RetCode.SUCCESS, message=message)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
# Helper method
|
||||||
|
def stop_parsing_document_internal(document_id):
|
||||||
|
try:
|
||||||
|
# valid doc?
|
||||||
|
exist, doc = DocumentService.get_by_id(document_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(message=f"This document '{document_id}' cannot be found!",
|
||||||
|
code=RetCode.ARGUMENT_ERROR)
|
||||||
|
doc_attributes = doc.to_dict()
|
||||||
|
|
||||||
|
# only when the status is parsing, we need to stop it
|
||||||
|
if doc_attributes["status"] == TaskStatus.RUNNING.value:
|
||||||
|
tenant_id = DocumentService.get_tenant_id(document_id)
|
||||||
|
if not tenant_id:
|
||||||
|
return construct_json_result(message="Tenant not found!", code=RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
|
# update successfully?
|
||||||
|
if not DocumentService.update_by_id(document_id, {"status": "2"}): # cancel
|
||||||
|
return construct_json_result(
|
||||||
|
code=RetCode.OPERATING_ERROR,
|
||||||
|
message="There was an error during the stopping parsing the document process. "
|
||||||
|
"Please check the status of the RAGFlow server and try the update again."
|
||||||
|
)
|
||||||
|
|
||||||
|
_, doc_attributes = DocumentService.get_by_id(document_id)
|
||||||
|
doc_attributes = doc_attributes.to_dict()
|
||||||
|
|
||||||
|
# failed in stop parsing
|
||||||
|
if doc_attributes["status"] == TaskStatus.RUNNING.value:
|
||||||
|
return construct_json_result(message=f"Failed in parsing the document: {document_id}; ", code=RetCode.SUCCESS)
|
||||||
|
return construct_json_result(code=RetCode.SUCCESS, message="")
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------show the status of the file-----------------------------------------------------
|
# ----------------------------show the status of the file-----------------------------------------------------
|
||||||
|
@manager.route("/<dataset_id>/documents/<document_id>/status", methods=["GET"])
|
||||||
|
@login_required
|
||||||
|
def show_parsing_status(dataset_id, document_id):
|
||||||
|
try:
|
||||||
|
# valid dataset
|
||||||
|
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
|
message=f"This dataset: '{dataset_id}' cannot be found!")
|
||||||
|
# valid document
|
||||||
|
exist, _ = DocumentService.get_by_id(document_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
|
message=f"This document: '{document_id}' is not a valid document.")
|
||||||
|
|
||||||
|
_, doc = DocumentService.get_by_id(document_id) # get doc object
|
||||||
|
doc_attributes = doc.to_dict()
|
||||||
|
|
||||||
|
return construct_json_result(
|
||||||
|
data={"progress": doc_attributes["progress"], "status": TaskStatus(doc_attributes["status"]).name},
|
||||||
|
code=RetCode.SUCCESS
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
# ----------------------------list the chunks of the file-----------------------------------------------------
|
# ----------------------------list the chunks of the file-----------------------------------------------------
|
||||||
|
|
||||||
@ -610,6 +876,3 @@ def download_document(dataset_id, document_id):
|
|||||||
# ----------------------------get a specific chunk-----------------------------------------------------
|
# ----------------------------get a specific chunk-----------------------------------------------------
|
||||||
|
|
||||||
# ----------------------------retrieval test-----------------------------------------------------
|
# ----------------------------retrieval test-----------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,172 +1,183 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.dialog_service import DialogService
|
from api.db.services.dialog_service import DialogService
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.user_service import TenantService
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.settings import RetCode
|
||||||
from api.utils import get_uuid
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils import get_uuid
|
||||||
|
from api.utils.api_utils import get_json_result
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST'])
|
|
||||||
@login_required
|
@manager.route('/set', methods=['POST'])
|
||||||
def set_dialog():
|
@login_required
|
||||||
req = request.json
|
def set_dialog():
|
||||||
dialog_id = req.get("dialog_id")
|
req = request.json
|
||||||
name = req.get("name", "New Dialog")
|
dialog_id = req.get("dialog_id")
|
||||||
description = req.get("description", "A helpful Dialog")
|
name = req.get("name", "New Dialog")
|
||||||
icon = req.get("icon", "")
|
description = req.get("description", "A helpful Dialog")
|
||||||
top_n = req.get("top_n", 6)
|
icon = req.get("icon", "")
|
||||||
top_k = req.get("top_k", 1024)
|
top_n = req.get("top_n", 6)
|
||||||
rerank_id = req.get("rerank_id", "")
|
top_k = req.get("top_k", 1024)
|
||||||
if not rerank_id: req["rerank_id"] = ""
|
rerank_id = req.get("rerank_id", "")
|
||||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
if not rerank_id: req["rerank_id"] = ""
|
||||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||||
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||||
llm_setting = req.get("llm_setting", {})
|
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
||||||
default_prompt = {
|
llm_setting = req.get("llm_setting", {})
|
||||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
default_prompt = {
|
||||||
以下是知识库:
|
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||||
{knowledge}
|
以下是知识库:
|
||||||
以上是知识库。""",
|
{knowledge}
|
||||||
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
以上是知识库。""",
|
||||||
"parameters": [
|
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
||||||
{"key": "knowledge", "optional": False}
|
"parameters": [
|
||||||
],
|
{"key": "knowledge", "optional": False}
|
||||||
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
],
|
||||||
}
|
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
||||||
prompt_config = req.get("prompt_config", default_prompt)
|
}
|
||||||
|
prompt_config = req.get("prompt_config", default_prompt)
|
||||||
if not prompt_config["system"]:
|
|
||||||
prompt_config["system"] = default_prompt["system"]
|
if not prompt_config["system"]:
|
||||||
# if len(prompt_config["parameters"]) < 1:
|
prompt_config["system"] = default_prompt["system"]
|
||||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
# if len(prompt_config["parameters"]) < 1:
|
||||||
# for p in prompt_config["parameters"]:
|
# prompt_config["parameters"] = default_prompt["parameters"]
|
||||||
# if p["key"] == "knowledge":break
|
# for p in prompt_config["parameters"]:
|
||||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
# if p["key"] == "knowledge":break
|
||||||
|
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
||||||
for p in prompt_config["parameters"]:
|
|
||||||
if p["optional"]:
|
for p in prompt_config["parameters"]:
|
||||||
continue
|
if p["optional"]:
|
||||||
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
continue
|
||||||
return get_data_error_result(
|
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
||||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
return get_data_error_result(
|
||||||
|
retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||||
try:
|
|
||||||
e, tenant = TenantService.get_by_id(current_user.id)
|
try:
|
||||||
if not e:
|
e, tenant = TenantService.get_by_id(current_user.id)
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
if not e:
|
||||||
llm_id = req.get("llm_id", tenant.llm_id)
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
if not dialog_id:
|
llm_id = req.get("llm_id", tenant.llm_id)
|
||||||
if not req.get("kb_ids"):
|
if not dialog_id:
|
||||||
return get_data_error_result(
|
if not req.get("kb_ids"):
|
||||||
retmsg="Fail! Please select knowledgebase!")
|
return get_data_error_result(
|
||||||
dia = {
|
retmsg="Fail! Please select knowledgebase!")
|
||||||
"id": get_uuid(),
|
dia = {
|
||||||
"tenant_id": current_user.id,
|
"id": get_uuid(),
|
||||||
"name": name,
|
"tenant_id": current_user.id,
|
||||||
"kb_ids": req["kb_ids"],
|
"name": name,
|
||||||
"description": description,
|
"kb_ids": req["kb_ids"],
|
||||||
"llm_id": llm_id,
|
"description": description,
|
||||||
"llm_setting": llm_setting,
|
"llm_id": llm_id,
|
||||||
"prompt_config": prompt_config,
|
"llm_setting": llm_setting,
|
||||||
"top_n": top_n,
|
"prompt_config": prompt_config,
|
||||||
"top_k": top_k,
|
"top_n": top_n,
|
||||||
"rerank_id": rerank_id,
|
"top_k": top_k,
|
||||||
"similarity_threshold": similarity_threshold,
|
"rerank_id": rerank_id,
|
||||||
"vector_similarity_weight": vector_similarity_weight,
|
"similarity_threshold": similarity_threshold,
|
||||||
"icon": icon
|
"vector_similarity_weight": vector_similarity_weight,
|
||||||
}
|
"icon": icon
|
||||||
if not DialogService.save(**dia):
|
}
|
||||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
if not DialogService.save(**dia):
|
||||||
e, dia = DialogService.get_by_id(dia["id"])
|
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||||
if not e:
|
e, dia = DialogService.get_by_id(dia["id"])
|
||||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
if not e:
|
||||||
return get_json_result(data=dia.to_json())
|
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||||
else:
|
return get_json_result(data=dia.to_json())
|
||||||
del req["dialog_id"]
|
else:
|
||||||
if "kb_names" in req:
|
del req["dialog_id"]
|
||||||
del req["kb_names"]
|
if "kb_names" in req:
|
||||||
if not DialogService.update_by_id(dialog_id, req):
|
del req["kb_names"]
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
if not DialogService.update_by_id(dialog_id, req):
|
||||||
e, dia = DialogService.get_by_id(dialog_id)
|
return get_data_error_result(retmsg="Dialog not found!")
|
||||||
if not e:
|
e, dia = DialogService.get_by_id(dialog_id)
|
||||||
return get_data_error_result(retmsg="Fail to update a dialog!")
|
if not e:
|
||||||
dia = dia.to_dict()
|
return get_data_error_result(retmsg="Fail to update a dialog!")
|
||||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
dia = dia.to_dict()
|
||||||
return get_json_result(data=dia)
|
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||||
except Exception as e:
|
return get_json_result(data=dia)
|
||||||
return server_error_response(e)
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET'])
|
|
||||||
@login_required
|
@manager.route('/get', methods=['GET'])
|
||||||
def get():
|
@login_required
|
||||||
dialog_id = request.args["dialog_id"]
|
def get():
|
||||||
try:
|
dialog_id = request.args["dialog_id"]
|
||||||
e, dia = DialogService.get_by_id(dialog_id)
|
try:
|
||||||
if not e:
|
e, dia = DialogService.get_by_id(dialog_id)
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
if not e:
|
||||||
dia = dia.to_dict()
|
return get_data_error_result(retmsg="Dialog not found!")
|
||||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
dia = dia.to_dict()
|
||||||
return get_json_result(data=dia)
|
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||||
except Exception as e:
|
return get_json_result(data=dia)
|
||||||
return server_error_response(e)
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
def get_kb_names(kb_ids):
|
|
||||||
ids, nms = [], []
|
def get_kb_names(kb_ids):
|
||||||
for kid in kb_ids:
|
ids, nms = [], []
|
||||||
e, kb = KnowledgebaseService.get_by_id(kid)
|
for kid in kb_ids:
|
||||||
if not e or kb.status != StatusEnum.VALID.value:
|
e, kb = KnowledgebaseService.get_by_id(kid)
|
||||||
continue
|
if not e or kb.status != StatusEnum.VALID.value:
|
||||||
ids.append(kid)
|
continue
|
||||||
nms.append(kb.name)
|
ids.append(kid)
|
||||||
return ids, nms
|
nms.append(kb.name)
|
||||||
|
return ids, nms
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
|
||||||
@login_required
|
@manager.route('/list', methods=['GET'])
|
||||||
def list_dialogs():
|
@login_required
|
||||||
try:
|
def list_dialogs():
|
||||||
diags = DialogService.query(
|
try:
|
||||||
tenant_id=current_user.id,
|
diags = DialogService.query(
|
||||||
status=StatusEnum.VALID.value,
|
tenant_id=current_user.id,
|
||||||
reverse=True,
|
status=StatusEnum.VALID.value,
|
||||||
order_by=DialogService.model.create_time)
|
reverse=True,
|
||||||
diags = [d.to_dict() for d in diags]
|
order_by=DialogService.model.create_time)
|
||||||
for d in diags:
|
diags = [d.to_dict() for d in diags]
|
||||||
d["kb_ids"], d["kb_names"] = get_kb_names(d["kb_ids"])
|
for d in diags:
|
||||||
return get_json_result(data=diags)
|
d["kb_ids"], d["kb_names"] = get_kb_names(d["kb_ids"])
|
||||||
except Exception as e:
|
return get_json_result(data=diags)
|
||||||
return server_error_response(e)
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
|
||||||
@login_required
|
@manager.route('/rm', methods=['POST'])
|
||||||
@validate_request("dialog_ids")
|
@login_required
|
||||||
def rm():
|
@validate_request("dialog_ids")
|
||||||
req = request.json
|
def rm():
|
||||||
try:
|
req = request.json
|
||||||
DialogService.update_many_by_id(
|
dialog_list=[]
|
||||||
[{"id": id, "status": StatusEnum.INVALID.value} for id in req["dialog_ids"]])
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
return get_json_result(data=True)
|
try:
|
||||||
except Exception as e:
|
for id in req["dialog_ids"]:
|
||||||
return server_error_response(e)
|
for tenant in tenants:
|
||||||
|
if DialogService.query(tenant_id=tenant.tenant_id, id=id):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
dialog_list.append({"id": id,"status":StatusEnum.INVALID.value})
|
||||||
|
DialogService.update_many_by_id(dialog_list)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|||||||
@ -1,482 +1,484 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License
|
# limitations under the License
|
||||||
#
|
#
|
||||||
|
import datetime
|
||||||
import os
|
import hashlib
|
||||||
import pathlib
|
import json
|
||||||
import re
|
import os
|
||||||
|
import pathlib
|
||||||
import flask
|
import re
|
||||||
from elasticsearch_dsl import Q
|
import traceback
|
||||||
from flask import request
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from flask_login import login_required, current_user
|
from copy import deepcopy
|
||||||
|
from io import BytesIO
|
||||||
from api.db.db_models import Task, File
|
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
import flask
|
||||||
from api.db.services.file_service import FileService
|
from elasticsearch_dsl import Q
|
||||||
from api.db.services.task_service import TaskService, queue_tasks
|
from flask import request
|
||||||
from rag.nlp import search
|
from flask_login import login_required, current_user
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
from api.db.services import duplicate_name
|
from api.db.db_models import Task, File
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.dialog_service import DialogService, ConversationService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.utils import get_uuid
|
from api.db.services.file_service import FileService
|
||||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.task_service import TaskService, queue_tasks
|
||||||
from api.settings import RetCode
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api.utils.api_utils import get_json_result
|
from graphrag.mind_map_extractor import MindMapExtractor
|
||||||
from rag.utils.minio_conn import MINIO
|
from rag.app import naive
|
||||||
from api.utils.file_utils import filename_type, thumbnail
|
from rag.nlp import search
|
||||||
from api.utils.web_utils import html2pdf, is_valid_url
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
from api.utils.web_utils import html2pdf, is_valid_url
|
from api.db.services import duplicate_name
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
@manager.route('/upload', methods=['POST'])
|
from api.utils import get_uuid
|
||||||
@login_required
|
from api.db import FileType, TaskStatus, ParserType, FileSource, LLMType
|
||||||
@validate_request("kb_id")
|
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||||
def upload():
|
from api.settings import RetCode, stat_logger
|
||||||
kb_id = request.form.get("kb_id")
|
from api.utils.api_utils import get_json_result
|
||||||
if not kb_id:
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
return get_json_result(
|
from api.utils.file_utils import filename_type, thumbnail, get_project_base_directory
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
from api.utils.web_utils import html2pdf, is_valid_url
|
||||||
if 'file' not in request.files:
|
|
||||||
return get_json_result(
|
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
@manager.route('/upload', methods=['POST'])
|
||||||
|
@login_required
|
||||||
file_objs = request.files.getlist('file')
|
@validate_request("kb_id")
|
||||||
for file_obj in file_objs:
|
def upload():
|
||||||
if file_obj.filename == '':
|
kb_id = request.form.get("kb_id")
|
||||||
return get_json_result(
|
if not kb_id:
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
return get_json_result(
|
||||||
|
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
if 'file' not in request.files:
|
||||||
if not e:
|
return get_json_result(
|
||||||
raise LookupError("Can't find this knowledgebase!")
|
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
file_objs = request.files.getlist('file')
|
||||||
pf_id = root_folder["id"]
|
for file_obj in file_objs:
|
||||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
if file_obj.filename == '':
|
||||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
return get_json_result(
|
||||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
err = []
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
for file in file_objs:
|
if not e:
|
||||||
try:
|
raise LookupError("Can't find this knowledgebase!")
|
||||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
|
||||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
||||||
raise RuntimeError("Exceed the maximum file number of a free user!")
|
if err:
|
||||||
|
return get_json_result(
|
||||||
filename = duplicate_name(
|
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
||||||
DocumentService.query,
|
return get_json_result(data=True)
|
||||||
name=file.filename,
|
|
||||||
kb_id=kb.id)
|
|
||||||
filetype = filename_type(filename)
|
@manager.route('/web_crawl', methods=['POST'])
|
||||||
if filetype == FileType.OTHER.value:
|
@login_required
|
||||||
raise RuntimeError("This type of file has not been supported yet!")
|
@validate_request("kb_id", "name", "url")
|
||||||
|
def web_crawl():
|
||||||
location = filename
|
kb_id = request.form.get("kb_id")
|
||||||
while MINIO.obj_exist(kb_id, location):
|
if not kb_id:
|
||||||
location += "_"
|
return get_json_result(
|
||||||
blob = file.read()
|
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
MINIO.put(kb_id, location, blob)
|
name = request.form.get("name")
|
||||||
doc = {
|
url = request.form.get("url")
|
||||||
"id": get_uuid(),
|
if not is_valid_url(url):
|
||||||
"kb_id": kb.id,
|
return get_json_result(
|
||||||
"parser_id": kb.parser_id,
|
data=False, retmsg='The URL format is invalid', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
"parser_config": kb.parser_config,
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
"created_by": current_user.id,
|
if not e:
|
||||||
"type": filetype,
|
raise LookupError("Can't find this knowledgebase!")
|
||||||
"name": filename,
|
|
||||||
"location": location,
|
blob = html2pdf(url)
|
||||||
"size": len(blob),
|
if not blob: return server_error_response(ValueError("Download failure."))
|
||||||
"thumbnail": thumbnail(filename, blob)
|
|
||||||
}
|
root_folder = FileService.get_root_folder(current_user.id)
|
||||||
if doc["type"] == FileType.VISUAL:
|
pf_id = root_folder["id"]
|
||||||
doc["parser_id"] = ParserType.PICTURE.value
|
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
||||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||||
DocumentService.insert(doc)
|
|
||||||
|
try:
|
||||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
filename = duplicate_name(
|
||||||
except Exception as e:
|
DocumentService.query,
|
||||||
err.append(file.filename + ": " + str(e))
|
name=name + ".pdf",
|
||||||
if err:
|
kb_id=kb.id)
|
||||||
return get_json_result(
|
filetype = filename_type(filename)
|
||||||
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
if filetype == FileType.OTHER.value:
|
||||||
return get_json_result(data=True)
|
raise RuntimeError("This type of file has not been supported yet!")
|
||||||
|
|
||||||
|
location = filename
|
||||||
@manager.route('/web_crawl', methods=['POST'])
|
while STORAGE_IMPL.obj_exist(kb_id, location):
|
||||||
@login_required
|
location += "_"
|
||||||
@validate_request("kb_id", "name", "url")
|
STORAGE_IMPL.put(kb_id, location, blob)
|
||||||
def web_crawl():
|
doc = {
|
||||||
kb_id = request.form.get("kb_id")
|
"id": get_uuid(),
|
||||||
if not kb_id:
|
"kb_id": kb.id,
|
||||||
return get_json_result(
|
"parser_id": kb.parser_id,
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
"parser_config": kb.parser_config,
|
||||||
name = request.form.get("name")
|
"created_by": current_user.id,
|
||||||
url = request.form.get("url")
|
"type": filetype,
|
||||||
if not is_valid_url(url):
|
"name": filename,
|
||||||
return get_json_result(
|
"location": location,
|
||||||
data=False, retmsg='The URL format is invalid', retcode=RetCode.ARGUMENT_ERROR)
|
"size": len(blob),
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
"thumbnail": thumbnail(filename, blob)
|
||||||
if not e:
|
}
|
||||||
raise LookupError("Can't find this knowledgebase!")
|
if doc["type"] == FileType.VISUAL:
|
||||||
|
doc["parser_id"] = ParserType.PICTURE.value
|
||||||
blob = html2pdf(url)
|
if doc["type"] == FileType.AURAL:
|
||||||
if not blob: return server_error_response(ValueError("Download failure."))
|
doc["parser_id"] = ParserType.AUDIO.value
|
||||||
|
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||||
pf_id = root_folder["id"]
|
DocumentService.insert(doc)
|
||||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
except Exception as e:
|
||||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
return server_error_response(e)
|
||||||
|
return get_json_result(data=True)
|
||||||
try:
|
|
||||||
filename = duplicate_name(
|
|
||||||
DocumentService.query,
|
@manager.route('/create', methods=['POST'])
|
||||||
name=name+".pdf",
|
@login_required
|
||||||
kb_id=kb.id)
|
@validate_request("name", "kb_id")
|
||||||
filetype = filename_type(filename)
|
def create():
|
||||||
if filetype == FileType.OTHER.value:
|
req = request.json
|
||||||
raise RuntimeError("This type of file has not been supported yet!")
|
kb_id = req["kb_id"]
|
||||||
|
if not kb_id:
|
||||||
location = filename
|
return get_json_result(
|
||||||
while MINIO.obj_exist(kb_id, location):
|
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
location += "_"
|
|
||||||
MINIO.put(kb_id, location, blob)
|
try:
|
||||||
doc = {
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
"id": get_uuid(),
|
if not e:
|
||||||
"kb_id": kb.id,
|
return get_data_error_result(
|
||||||
"parser_id": kb.parser_id,
|
retmsg="Can't find this knowledgebase!")
|
||||||
"parser_config": kb.parser_config,
|
|
||||||
"created_by": current_user.id,
|
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
||||||
"type": filetype,
|
return get_data_error_result(
|
||||||
"name": filename,
|
retmsg="Duplicated document name in the same knowledgebase.")
|
||||||
"location": location,
|
|
||||||
"size": len(blob),
|
doc = DocumentService.insert({
|
||||||
"thumbnail": thumbnail(filename, blob)
|
"id": get_uuid(),
|
||||||
}
|
"kb_id": kb.id,
|
||||||
if doc["type"] == FileType.VISUAL:
|
"parser_id": kb.parser_id,
|
||||||
doc["parser_id"] = ParserType.PICTURE.value
|
"parser_config": kb.parser_config,
|
||||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
"created_by": current_user.id,
|
||||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
"type": FileType.VIRTUAL,
|
||||||
DocumentService.insert(doc)
|
"name": req["name"],
|
||||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
"location": "",
|
||||||
except Exception as e:
|
"size": 0
|
||||||
return server_error_response(e)
|
})
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=doc.to_json())
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
@manager.route('/create', methods=['POST'])
|
|
||||||
@login_required
|
|
||||||
@validate_request("name", "kb_id")
|
@manager.route('/list', methods=['GET'])
|
||||||
def create():
|
@login_required
|
||||||
req = request.json
|
def list_docs():
|
||||||
kb_id = req["kb_id"]
|
kb_id = request.args.get("kb_id")
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
try:
|
for tenant in tenants:
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
if KnowledgebaseService.query(
|
||||||
if not e:
|
tenant_id=tenant.tenant_id, id=kb_id):
|
||||||
return get_data_error_result(
|
break
|
||||||
retmsg="Can't find this knowledgebase!")
|
else:
|
||||||
|
return get_json_result(
|
||||||
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||||
return get_data_error_result(
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
retmsg="Duplicated document name in the same knowledgebase.")
|
keywords = request.args.get("keywords", "")
|
||||||
|
|
||||||
doc = DocumentService.insert({
|
page_number = int(request.args.get("page", 1))
|
||||||
"id": get_uuid(),
|
items_per_page = int(request.args.get("page_size", 15))
|
||||||
"kb_id": kb.id,
|
orderby = request.args.get("orderby", "create_time")
|
||||||
"parser_id": kb.parser_id,
|
desc = request.args.get("desc", True)
|
||||||
"parser_config": kb.parser_config,
|
try:
|
||||||
"created_by": current_user.id,
|
docs, tol = DocumentService.get_by_kb_id(
|
||||||
"type": FileType.VIRTUAL,
|
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||||
"name": req["name"],
|
return get_json_result(data={"total": tol, "docs": docs})
|
||||||
"location": "",
|
except Exception as e:
|
||||||
"size": 0
|
return server_error_response(e)
|
||||||
})
|
|
||||||
return get_json_result(data=doc.to_json())
|
|
||||||
except Exception as e:
|
@manager.route('/infos', methods=['POST'])
|
||||||
return server_error_response(e)
|
def docinfos():
|
||||||
|
req = request.json
|
||||||
|
doc_ids = req["doc_ids"]
|
||||||
@manager.route('/list', methods=['GET'])
|
docs = DocumentService.get_by_ids(doc_ids)
|
||||||
@login_required
|
return get_json_result(data=list(docs.dicts()))
|
||||||
def list_docs():
|
|
||||||
kb_id = request.args.get("kb_id")
|
|
||||||
if not kb_id:
|
@manager.route('/thumbnails', methods=['GET'])
|
||||||
return get_json_result(
|
#@login_required
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
def thumbnails():
|
||||||
keywords = request.args.get("keywords", "")
|
doc_ids = request.args.get("doc_ids").split(",")
|
||||||
|
if not doc_ids:
|
||||||
page_number = int(request.args.get("page", 1))
|
return get_json_result(
|
||||||
items_per_page = int(request.args.get("page_size", 15))
|
data=False, retmsg='Lack of "Document ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
orderby = request.args.get("orderby", "create_time")
|
|
||||||
desc = request.args.get("desc", True)
|
try:
|
||||||
try:
|
docs = DocumentService.get_thumbnails(doc_ids)
|
||||||
docs, tol = DocumentService.get_by_kb_id(
|
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
||||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
except Exception as e:
|
||||||
return get_json_result(data={"total": tol, "docs": docs})
|
return server_error_response(e)
|
||||||
except Exception as e:
|
|
||||||
return server_error_response(e)
|
|
||||||
|
@manager.route('/change_status', methods=['POST'])
|
||||||
|
@login_required
|
||||||
@manager.route('/thumbnails', methods=['GET'])
|
@validate_request("doc_id", "status")
|
||||||
@login_required
|
def change_status():
|
||||||
def thumbnails():
|
req = request.json
|
||||||
doc_ids = request.args.get("doc_ids").split(",")
|
if str(req["status"]) not in ["0", "1"]:
|
||||||
if not doc_ids:
|
get_json_result(
|
||||||
return get_json_result(
|
data=False,
|
||||||
data=False, retmsg='Lack of "Document ID"', retcode=RetCode.ARGUMENT_ERROR)
|
retmsg='"Status" must be either 0 or 1!',
|
||||||
|
retcode=RetCode.ARGUMENT_ERROR)
|
||||||
try:
|
|
||||||
docs = DocumentService.get_thumbnails(doc_ids)
|
try:
|
||||||
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
except Exception as e:
|
if not e:
|
||||||
return server_error_response(e)
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||||
|
if not e:
|
||||||
@manager.route('/change_status', methods=['POST'])
|
return get_data_error_result(
|
||||||
@login_required
|
retmsg="Can't find this knowledgebase!")
|
||||||
@validate_request("doc_id", "status")
|
|
||||||
def change_status():
|
if not DocumentService.update_by_id(
|
||||||
req = request.json
|
req["doc_id"], {"status": str(req["status"])}):
|
||||||
if str(req["status"]) not in ["0", "1"]:
|
return get_data_error_result(
|
||||||
get_json_result(
|
retmsg="Database error (Document update)!")
|
||||||
data=False,
|
|
||||||
retmsg='"Status" must be either 0 or 1!',
|
if str(req["status"]) == "0":
|
||||||
retcode=RetCode.ARGUMENT_ERROR)
|
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
||||||
|
scripts="ctx._source.available_int=0;",
|
||||||
try:
|
idxnm=search.index_name(
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
kb.tenant_id)
|
||||||
if not e:
|
)
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
else:
|
||||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
||||||
if not e:
|
scripts="ctx._source.available_int=1;",
|
||||||
return get_data_error_result(
|
idxnm=search.index_name(
|
||||||
retmsg="Can't find this knowledgebase!")
|
kb.tenant_id)
|
||||||
|
)
|
||||||
if not DocumentService.update_by_id(
|
return get_json_result(data=True)
|
||||||
req["doc_id"], {"status": str(req["status"])}):
|
except Exception as e:
|
||||||
return get_data_error_result(
|
return server_error_response(e)
|
||||||
retmsg="Database error (Document update)!")
|
|
||||||
|
|
||||||
if str(req["status"]) == "0":
|
@manager.route('/rm', methods=['POST'])
|
||||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
@login_required
|
||||||
scripts="ctx._source.available_int=0;",
|
@validate_request("doc_id")
|
||||||
idxnm=search.index_name(
|
def rm():
|
||||||
kb.tenant_id)
|
req = request.json
|
||||||
)
|
doc_ids = req["doc_id"]
|
||||||
else:
|
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
||||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
root_folder = FileService.get_root_folder(current_user.id)
|
||||||
scripts="ctx._source.available_int=1;",
|
pf_id = root_folder["id"]
|
||||||
idxnm=search.index_name(
|
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||||
kb.tenant_id)
|
errors = ""
|
||||||
)
|
for doc_id in doc_ids:
|
||||||
return get_json_result(data=True)
|
try:
|
||||||
except Exception as e:
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
return server_error_response(e)
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
@manager.route('/rm', methods=['POST'])
|
if not tenant_id:
|
||||||
@login_required
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
@validate_request("doc_id")
|
|
||||||
def rm():
|
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||||
req = request.json
|
|
||||||
doc_ids = req["doc_id"]
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
return get_data_error_result(
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
retmsg="Database error (Document removal)!")
|
||||||
pf_id = root_folder["id"]
|
|
||||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||||
errors = ""
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
for doc_id in doc_ids:
|
File2DocumentService.delete_by_document_id(doc_id)
|
||||||
try:
|
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
STORAGE_IMPL.rm(b, n)
|
||||||
if not e:
|
except Exception as e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
errors += str(e)
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
|
||||||
if not tenant_id:
|
if errors:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||||
|
|
||||||
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
|
||||||
return get_data_error_result(
|
@manager.route('/run', methods=['POST'])
|
||||||
retmsg="Database error (Document removal)!")
|
@login_required
|
||||||
|
@validate_request("doc_ids", "run")
|
||||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
def run():
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
req = request.json
|
||||||
File2DocumentService.delete_by_document_id(doc_id)
|
try:
|
||||||
|
for id in req["doc_ids"]:
|
||||||
MINIO.rm(b, n)
|
info = {"run": str(req["run"]), "progress": 0}
|
||||||
except Exception as e:
|
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||||
errors += str(e)
|
info["progress_msg"] = ""
|
||||||
|
info["chunk_num"] = 0
|
||||||
if errors:
|
info["token_num"] = 0
|
||||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
DocumentService.update_by_id(id, info)
|
||||||
|
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||||
return get_json_result(data=True)
|
tenant_id = DocumentService.get_tenant_id(id)
|
||||||
|
if not tenant_id:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
@manager.route('/run', methods=['POST'])
|
ELASTICSEARCH.deleteByQuery(
|
||||||
@login_required
|
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||||
@validate_request("doc_ids", "run")
|
|
||||||
def run():
|
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||||
req = request.json
|
TaskService.filter_delete([Task.doc_id == id])
|
||||||
try:
|
e, doc = DocumentService.get_by_id(id)
|
||||||
for id in req["doc_ids"]:
|
doc = doc.to_dict()
|
||||||
info = {"run": str(req["run"]), "progress": 0}
|
doc["tenant_id"] = tenant_id
|
||||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
bucket, name = File2DocumentService.get_minio_address(doc_id=doc["id"])
|
||||||
info["progress_msg"] = ""
|
queue_tasks(doc, bucket, name)
|
||||||
info["chunk_num"] = 0
|
|
||||||
info["token_num"] = 0
|
return get_json_result(data=True)
|
||||||
DocumentService.update_by_id(id, info)
|
except Exception as e:
|
||||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
return server_error_response(e)
|
||||||
tenant_id = DocumentService.get_tenant_id(id)
|
|
||||||
if not tenant_id:
|
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
@manager.route('/rename', methods=['POST'])
|
||||||
ELASTICSEARCH.deleteByQuery(
|
@login_required
|
||||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
@validate_request("doc_id", "name")
|
||||||
|
def rename():
|
||||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
req = request.json
|
||||||
TaskService.filter_delete([Task.doc_id == id])
|
try:
|
||||||
e, doc = DocumentService.get_by_id(id)
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
doc = doc.to_dict()
|
if not e:
|
||||||
doc["tenant_id"] = tenant_id
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
bucket, name = File2DocumentService.get_minio_address(doc_id=doc["id"])
|
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||||
queue_tasks(doc, bucket, name)
|
doc.name.lower()).suffix:
|
||||||
|
return get_json_result(
|
||||||
return get_json_result(data=True)
|
data=False,
|
||||||
except Exception as e:
|
retmsg="The extension of file can't be changed",
|
||||||
return server_error_response(e)
|
retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||||
|
if d.name == req["name"]:
|
||||||
@manager.route('/rename', methods=['POST'])
|
return get_data_error_result(
|
||||||
@login_required
|
retmsg="Duplicated document name in the same knowledgebase.")
|
||||||
@validate_request("doc_id", "name")
|
|
||||||
def rename():
|
if not DocumentService.update_by_id(
|
||||||
req = request.json
|
req["doc_id"], {"name": req["name"]}):
|
||||||
try:
|
return get_data_error_result(
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
retmsg="Database error (Document rename)!")
|
||||||
if not e:
|
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
if informs:
|
||||||
doc.name.lower()).suffix:
|
e, file = FileService.get_by_id(informs[0].file_id)
|
||||||
return get_json_result(
|
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||||
data=False,
|
|
||||||
retmsg="The extension of file can't be changed",
|
return get_json_result(data=True)
|
||||||
retcode=RetCode.ARGUMENT_ERROR)
|
except Exception as e:
|
||||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
return server_error_response(e)
|
||||||
if d.name == req["name"]:
|
|
||||||
return get_data_error_result(
|
|
||||||
retmsg="Duplicated document name in the same knowledgebase.")
|
@manager.route('/get/<doc_id>', methods=['GET'])
|
||||||
|
# @login_required
|
||||||
if not DocumentService.update_by_id(
|
def get(doc_id):
|
||||||
req["doc_id"], {"name": req["name"]}):
|
try:
|
||||||
return get_data_error_result(
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
retmsg="Database error (Document rename)!")
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
|
||||||
if informs:
|
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||||
e, file = FileService.get_by_id(informs[0].file_id)
|
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
|
||||||
|
ext = re.search(r"\.([^.]+)$", doc.name)
|
||||||
return get_json_result(data=True)
|
if ext:
|
||||||
except Exception as e:
|
if doc.type == FileType.VISUAL.value:
|
||||||
return server_error_response(e)
|
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
||||||
|
else:
|
||||||
|
response.headers.set(
|
||||||
@manager.route('/get/<doc_id>', methods=['GET'])
|
'Content-Type',
|
||||||
# @login_required
|
'application/%s' %
|
||||||
def get(doc_id):
|
ext.group(1))
|
||||||
try:
|
return response
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
except Exception as e:
|
||||||
if not e:
|
return server_error_response(e)
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
|
||||||
|
|
||||||
b,n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
@manager.route('/change_parser', methods=['POST'])
|
||||||
response = flask.make_response(MINIO.get(b, n))
|
@login_required
|
||||||
|
@validate_request("doc_id", "parser_id")
|
||||||
ext = re.search(r"\.([^.]+)$", doc.name)
|
def change_parser():
|
||||||
if ext:
|
req = request.json
|
||||||
if doc.type == FileType.VISUAL.value:
|
try:
|
||||||
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
else:
|
if not e:
|
||||||
response.headers.set(
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
'Content-Type',
|
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||||
'application/%s' %
|
if "parser_config" in req:
|
||||||
ext.group(1))
|
if req["parser_config"] == doc.parser_config:
|
||||||
return response
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
else:
|
||||||
return server_error_response(e)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
if doc.type == FileType.VISUAL or re.search(
|
||||||
@manager.route('/change_parser', methods=['POST'])
|
r"\.(ppt|pptx|pages)$", doc.name):
|
||||||
@login_required
|
return get_data_error_result(retmsg="Not supported yet!")
|
||||||
@validate_request("doc_id", "parser_id")
|
|
||||||
def change_parser():
|
e = DocumentService.update_by_id(doc.id,
|
||||||
req = request.json
|
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||||
try:
|
"run": TaskStatus.UNSTART.value})
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
if not e:
|
||||||
if not e:
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
if "parser_config" in req:
|
||||||
if doc.parser_id.lower() == req["parser_id"].lower():
|
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||||
if "parser_config" in req:
|
if doc.token_num > 0:
|
||||||
if req["parser_config"] == doc.parser_config:
|
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||||
return get_json_result(data=True)
|
doc.process_duation * -1)
|
||||||
else:
|
if not e:
|
||||||
return get_json_result(data=True)
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if doc.type == FileType.VISUAL or re.search(
|
if not tenant_id:
|
||||||
r"\.(ppt|pptx|pages)$", doc.name):
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
return get_data_error_result(retmsg="Not supported yet!")
|
ELASTICSEARCH.deleteByQuery(
|
||||||
|
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||||
e = DocumentService.update_by_id(doc.id,
|
|
||||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
return get_json_result(data=True)
|
||||||
"run": TaskStatus.UNSTART.value})
|
except Exception as e:
|
||||||
if not e:
|
return server_error_response(e)
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
|
||||||
if "parser_config" in req:
|
|
||||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
@manager.route('/image/<image_id>', methods=['GET'])
|
||||||
if doc.token_num > 0:
|
# @login_required
|
||||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
def get_image(image_id):
|
||||||
doc.process_duation * -1)
|
try:
|
||||||
if not e:
|
bkt, nm = image_id.split("-")
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
response.headers.set('Content-Type', 'image/JPEG')
|
||||||
if not tenant_id:
|
return response
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
except Exception as e:
|
||||||
ELASTICSEARCH.deleteByQuery(
|
return server_error_response(e)
|
||||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
|
||||||
|
|
||||||
return get_json_result(data=True)
|
@manager.route('/upload_and_parse', methods=['POST'])
|
||||||
except Exception as e:
|
@login_required
|
||||||
return server_error_response(e)
|
@validate_request("conversation_id")
|
||||||
|
def upload_and_parse():
|
||||||
|
if 'file' not in request.files:
|
||||||
@manager.route('/image/<image_id>', methods=['GET'])
|
return get_json_result(
|
||||||
# @login_required
|
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
def get_image(image_id):
|
|
||||||
try:
|
file_objs = request.files.getlist('file')
|
||||||
bkt, nm = image_id.split("-")
|
for file_obj in file_objs:
|
||||||
response = flask.make_response(MINIO.get(bkt, nm))
|
if file_obj.filename == '':
|
||||||
response.headers.set('Content-Type', 'image/JPEG')
|
return get_json_result(
|
||||||
return response
|
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
except Exception as e:
|
|
||||||
return server_error_response(e)
|
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
||||||
|
|
||||||
|
return get_json_result(data=doc_ids)
|
||||||
|
|||||||
@ -34,7 +34,7 @@ from api.utils.api_utils import get_json_result
|
|||||||
from api.utils.file_utils import filename_type
|
from api.utils.file_utils import filename_type
|
||||||
from rag.nlp import search
|
from rag.nlp import search
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
from rag.utils.minio_conn import MINIO
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/upload', methods=['POST'])
|
@manager.route('/upload', methods=['POST'])
|
||||||
@ -98,7 +98,7 @@ def upload():
|
|||||||
# file type
|
# file type
|
||||||
filetype = filename_type(file_obj_names[file_len - 1])
|
filetype = filename_type(file_obj_names[file_len - 1])
|
||||||
location = file_obj_names[file_len - 1]
|
location = file_obj_names[file_len - 1]
|
||||||
while MINIO.obj_exist(last_folder.id, location):
|
while STORAGE_IMPL.obj_exist(last_folder.id, location):
|
||||||
location += "_"
|
location += "_"
|
||||||
blob = file_obj.read()
|
blob = file_obj.read()
|
||||||
filename = duplicate_name(
|
filename = duplicate_name(
|
||||||
@ -116,7 +116,7 @@ def upload():
|
|||||||
"size": len(blob),
|
"size": len(blob),
|
||||||
}
|
}
|
||||||
file = FileService.insert(file)
|
file = FileService.insert(file)
|
||||||
MINIO.put(last_folder.id, location, blob)
|
STORAGE_IMPL.put(last_folder.id, location, blob)
|
||||||
file_res.append(file.to_json())
|
file_res.append(file.to_json())
|
||||||
return get_json_result(data=file_res)
|
return get_json_result(data=file_res)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -260,7 +260,7 @@ def rm():
|
|||||||
e, file = FileService.get_by_id(inner_file_id)
|
e, file = FileService.get_by_id(inner_file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File not found!")
|
return get_data_error_result(retmsg="File not found!")
|
||||||
MINIO.rm(file.parent_id, file.location)
|
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||||
else:
|
else:
|
||||||
if not FileService.delete(file):
|
if not FileService.delete(file):
|
||||||
@ -296,7 +296,8 @@ def rename():
|
|||||||
e, file = FileService.get_by_id(req["file_id"])
|
e, file = FileService.get_by_id(req["file_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File not found!")
|
return get_data_error_result(retmsg="File not found!")
|
||||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
if file.type != FileType.FOLDER.value \
|
||||||
|
and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||||
file.name.lower()).suffix:
|
file.name.lower()).suffix:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
@ -332,7 +333,7 @@ def get(file_id):
|
|||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
b, n = File2DocumentService.get_minio_address(file_id=file_id)
|
b, n = File2DocumentService.get_minio_address(file_id=file_id)
|
||||||
response = flask.make_response(MINIO.get(b, n))
|
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||||
ext = re.search(r"\.([^.]+)$", file.name)
|
ext = re.search(r"\.([^.]+)$", file.name)
|
||||||
if ext:
|
if ext:
|
||||||
if file.type == FileType.VISUAL.value:
|
if file.type == FileType.VISUAL.value:
|
||||||
|
|||||||
@ -1,153 +1,162 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from elasticsearch_dsl import Q
|
from elasticsearch_dsl import Q
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
from api.db.services import duplicate_name
|
from api.db.services import duplicate_name
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.utils import get_uuid, get_format_time
|
from api.utils import get_uuid, get_format_time
|
||||||
from api.db import StatusEnum, UserTenantRole, FileSource
|
from api.db import StatusEnum, UserTenantRole, FileSource
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.db_models import Knowledgebase, File
|
from api.db.db_models import Knowledgebase, File
|
||||||
from api.settings import stat_logger, RetCode
|
from api.settings import stat_logger, RetCode
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
from rag.nlp import search
|
from rag.nlp import search
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['post'])
|
@manager.route('/create', methods=['post'])
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("name")
|
@validate_request("name")
|
||||||
def create():
|
def create():
|
||||||
req = request.json
|
req = request.json
|
||||||
req["name"] = req["name"].strip()
|
req["name"] = req["name"].strip()
|
||||||
req["name"] = duplicate_name(
|
req["name"] = duplicate_name(
|
||||||
KnowledgebaseService.query,
|
KnowledgebaseService.query,
|
||||||
name=req["name"],
|
name=req["name"],
|
||||||
tenant_id=current_user.id,
|
tenant_id=current_user.id,
|
||||||
status=StatusEnum.VALID.value)
|
status=StatusEnum.VALID.value)
|
||||||
try:
|
try:
|
||||||
req["id"] = get_uuid()
|
req["id"] = get_uuid()
|
||||||
req["tenant_id"] = current_user.id
|
req["tenant_id"] = current_user.id
|
||||||
req["created_by"] = current_user.id
|
req["created_by"] = current_user.id
|
||||||
e, t = TenantService.get_by_id(current_user.id)
|
e, t = TenantService.get_by_id(current_user.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Tenant not found.")
|
return get_data_error_result(retmsg="Tenant not found.")
|
||||||
req["embd_id"] = t.embd_id
|
req["embd_id"] = t.embd_id
|
||||||
if not KnowledgebaseService.save(**req):
|
if not KnowledgebaseService.save(**req):
|
||||||
return get_data_error_result()
|
return get_data_error_result()
|
||||||
return get_json_result(data={"kb_id": req["id"]})
|
return get_json_result(data={"kb_id": req["id"]})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/update', methods=['post'])
|
@manager.route('/update', methods=['post'])
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
||||||
def update():
|
def update():
|
||||||
req = request.json
|
req = request.json
|
||||||
req["name"] = req["name"].strip()
|
req["name"] = req["name"].strip()
|
||||||
try:
|
try:
|
||||||
if not KnowledgebaseService.query(
|
if not KnowledgebaseService.query(
|
||||||
created_by=current_user.id, id=req["kb_id"]):
|
created_by=current_user.id, id=req["kb_id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
retmsg="Can't find this knowledgebase!")
|
||||||
|
|
||||||
if req["name"].lower() != kb.name.lower() \
|
if req["name"].lower() != kb.name.lower() \
|
||||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
and len(KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated knowledgebase name.")
|
retmsg="Duplicated knowledgebase name.")
|
||||||
|
|
||||||
del req["kb_id"]
|
del req["kb_id"]
|
||||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||||
return get_data_error_result()
|
return get_data_error_result()
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Knowledgebase rename)!")
|
retmsg="Database error (Knowledgebase rename)!")
|
||||||
|
|
||||||
return get_json_result(data=kb.to_json())
|
return get_json_result(data=kb.to_json())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/detail', methods=['GET'])
|
@manager.route('/detail', methods=['GET'])
|
||||||
@login_required
|
@login_required
|
||||||
def detail():
|
def detail():
|
||||||
kb_id = request.args["kb_id"]
|
kb_id = request.args["kb_id"]
|
||||||
try:
|
try:
|
||||||
kb = KnowledgebaseService.get_detail(kb_id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not kb:
|
for tenant in tenants:
|
||||||
return get_data_error_result(
|
if KnowledgebaseService.query(
|
||||||
retmsg="Can't find this knowledgebase!")
|
tenant_id=tenant.tenant_id, id=kb_id):
|
||||||
return get_json_result(data=kb)
|
break
|
||||||
except Exception as e:
|
else:
|
||||||
return server_error_response(e)
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
@manager.route('/list', methods=['GET'])
|
kb = KnowledgebaseService.get_detail(kb_id)
|
||||||
@login_required
|
if not kb:
|
||||||
def list_kbs():
|
return get_data_error_result(
|
||||||
page_number = request.args.get("page", 1)
|
retmsg="Can't find this knowledgebase!")
|
||||||
items_per_page = request.args.get("page_size", 150)
|
return get_json_result(data=kb)
|
||||||
orderby = request.args.get("orderby", "create_time")
|
except Exception as e:
|
||||||
desc = request.args.get("desc", True)
|
return server_error_response(e)
|
||||||
try:
|
|
||||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
|
||||||
kbs = KnowledgebaseService.get_by_tenant_ids(
|
@manager.route('/list', methods=['GET'])
|
||||||
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc)
|
@login_required
|
||||||
return get_json_result(data=kbs)
|
def list_kbs():
|
||||||
except Exception as e:
|
page_number = request.args.get("page", 1)
|
||||||
return server_error_response(e)
|
items_per_page = request.args.get("page_size", 150)
|
||||||
|
orderby = request.args.get("orderby", "create_time")
|
||||||
|
desc = request.args.get("desc", True)
|
||||||
@manager.route('/rm', methods=['post'])
|
try:
|
||||||
@login_required
|
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||||
@validate_request("kb_id")
|
kbs = KnowledgebaseService.get_by_tenant_ids(
|
||||||
def rm():
|
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc)
|
||||||
req = request.json
|
return get_json_result(data=kbs)
|
||||||
try:
|
except Exception as e:
|
||||||
kbs = KnowledgebaseService.query(
|
return server_error_response(e)
|
||||||
created_by=current_user.id, id=req["kb_id"])
|
|
||||||
if not kbs:
|
|
||||||
return get_json_result(
|
@manager.route('/rm', methods=['post'])
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
@login_required
|
||||||
|
@validate_request("kb_id")
|
||||||
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
def rm():
|
||||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
req = request.json
|
||||||
return get_data_error_result(
|
try:
|
||||||
retmsg="Database error (Document removal)!")
|
kbs = KnowledgebaseService.query(
|
||||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
created_by=current_user.id, id=req["kb_id"])
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
if not kbs:
|
||||||
File2DocumentService.delete_by_document_id(doc.id)
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||||
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
|
||||||
return get_data_error_result(
|
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
||||||
retmsg="Database error (Knowledgebase removal)!")
|
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||||
return get_json_result(data=True)
|
return get_data_error_result(
|
||||||
except Exception as e:
|
retmsg="Database error (Document removal)!")
|
||||||
return server_error_response(e)
|
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||||
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
|
File2DocumentService.delete_by_document_id(doc.id)
|
||||||
|
|
||||||
|
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Database error (Knowledgebase removal)!")
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|||||||
@ -1,242 +1,329 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.db import StatusEnum, LLMType
|
from api.db import StatusEnum, LLMType
|
||||||
from api.db.db_models import TenantLLM
|
from api.db.db_models import TenantLLM
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
from rag.llm import EmbeddingModel, ChatModel, RerankModel
|
from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
||||||
|
import requests
|
||||||
|
|
||||||
@manager.route('/factories', methods=['GET'])
|
|
||||||
@login_required
|
@manager.route('/factories', methods=['GET'])
|
||||||
def factories():
|
@login_required
|
||||||
try:
|
def factories():
|
||||||
fac = LLMFactoriesService.get_all()
|
try:
|
||||||
return get_json_result(data=[f.to_dict() for f in fac if f.name not in ["Youdao", "FastEmbed", "BAAI"]])
|
fac = LLMFactoriesService.get_all()
|
||||||
except Exception as e:
|
fac = [f.to_dict() for f in fac if f.name not in ["Youdao", "FastEmbed", "BAAI"]]
|
||||||
return server_error_response(e)
|
llms = LLMService.get_all()
|
||||||
|
mdl_types = {}
|
||||||
|
for m in llms:
|
||||||
@manager.route('/set_api_key', methods=['POST'])
|
if m.status != StatusEnum.VALID.value:
|
||||||
@login_required
|
continue
|
||||||
@validate_request("llm_factory", "api_key")
|
if m.fid not in mdl_types:
|
||||||
def set_api_key():
|
mdl_types[m.fid] = set([])
|
||||||
req = request.json
|
mdl_types[m.fid].add(m.model_type)
|
||||||
# test if api key works
|
for f in fac:
|
||||||
chat_passed, embd_passed, rerank_passed = False, False, False
|
f["model_types"] = list(mdl_types.get(f["name"], [LLMType.CHAT, LLMType.EMBEDDING, LLMType.RERANK,
|
||||||
factory = req["llm_factory"]
|
LLMType.IMAGE2TEXT, LLMType.SPEECH2TEXT, LLMType.TTS]))
|
||||||
msg = ""
|
return get_json_result(data=fac)
|
||||||
for llm in LLMService.query(fid=factory):
|
except Exception as e:
|
||||||
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
return server_error_response(e)
|
||||||
mdl = EmbeddingModel[factory](
|
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
|
||||||
try:
|
@manager.route('/set_api_key', methods=['POST'])
|
||||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
@login_required
|
||||||
if len(arr[0]) == 0 or tc == 0:
|
@validate_request("llm_factory", "api_key")
|
||||||
raise Exception("Fail")
|
def set_api_key():
|
||||||
embd_passed = True
|
req = request.json
|
||||||
except Exception as e:
|
# test if api key works
|
||||||
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
chat_passed, embd_passed, rerank_passed = False, False, False
|
||||||
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
factory = req["llm_factory"]
|
||||||
mdl = ChatModel[factory](
|
msg = ""
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
for llm in LLMService.query(fid=factory)[:3]:
|
||||||
try:
|
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
||||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
mdl = EmbeddingModel[factory](
|
||||||
"temperature": 0.9})
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
if not tc:
|
try:
|
||||||
raise Exception(m)
|
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||||
except Exception as e:
|
if len(arr[0]) == 0:
|
||||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
raise Exception("Fail")
|
||||||
e)
|
embd_passed = True
|
||||||
chat_passed = True
|
except Exception as e:
|
||||||
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
||||||
mdl = RerankModel[factory](
|
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
mdl = ChatModel[factory](
|
||||||
try:
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
arr, tc = mdl.similarity("What's the weather?", ["Is it sunny today?"])
|
try:
|
||||||
if len(arr) == 0 or tc == 0:
|
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
||||||
raise Exception("Fail")
|
{"temperature": 0.9,'max_tokens':50})
|
||||||
except Exception as e:
|
if m.find("**ERROR**") >=0:
|
||||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
raise Exception(m)
|
||||||
e)
|
except Exception as e:
|
||||||
rerank_passed = True
|
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||||
|
e)
|
||||||
if msg:
|
chat_passed = True
|
||||||
return get_data_error_result(retmsg=msg)
|
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
||||||
|
mdl = RerankModel[factory](
|
||||||
llm = {
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
"api_key": req["api_key"],
|
try:
|
||||||
"api_base": req.get("base_url", "")
|
arr, tc = mdl.similarity("What's the weather?", ["Is it sunny today?"])
|
||||||
}
|
if len(arr) == 0 or tc == 0:
|
||||||
for n in ["model_type", "llm_name"]:
|
raise Exception("Fail")
|
||||||
if n in req:
|
except Exception as e:
|
||||||
llm[n] = req[n]
|
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||||
|
e)
|
||||||
if not TenantLLMService.filter_update(
|
rerank_passed = True
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory], llm):
|
|
||||||
for llm in LLMService.query(fid=factory):
|
if msg:
|
||||||
TenantLLMService.save(
|
return get_data_error_result(retmsg=msg)
|
||||||
tenant_id=current_user.id,
|
|
||||||
llm_factory=factory,
|
llm = {
|
||||||
llm_name=llm.llm_name,
|
"api_key": req["api_key"],
|
||||||
model_type=llm.model_type,
|
"api_base": req.get("base_url", "")
|
||||||
api_key=req["api_key"],
|
}
|
||||||
api_base=req.get("base_url", "")
|
for n in ["model_type", "llm_name"]:
|
||||||
)
|
if n in req:
|
||||||
|
llm[n] = req[n]
|
||||||
return get_json_result(data=True)
|
|
||||||
|
if not TenantLLMService.filter_update(
|
||||||
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory], llm):
|
||||||
@manager.route('/add_llm', methods=['POST'])
|
for llm in LLMService.query(fid=factory):
|
||||||
@login_required
|
TenantLLMService.save(
|
||||||
@validate_request("llm_factory", "llm_name", "model_type")
|
tenant_id=current_user.id,
|
||||||
def add_llm():
|
llm_factory=factory,
|
||||||
req = request.json
|
llm_name=llm.llm_name,
|
||||||
factory = req["llm_factory"]
|
model_type=llm.model_type,
|
||||||
|
api_key=req["api_key"],
|
||||||
if factory == "VolcEngine":
|
api_base=req.get("base_url", "")
|
||||||
# For VolcEngine, due to its special authentication method
|
)
|
||||||
# Assemble volc_ak, volc_sk, endpoint_id into api_key
|
|
||||||
temp = list(eval(req["llm_name"]).items())[0]
|
return get_json_result(data=True)
|
||||||
llm_name = temp[0]
|
|
||||||
endpoint_id = temp[1]
|
|
||||||
api_key = '{' + f'"volc_ak": "{req.get("volc_ak", "")}", ' \
|
@manager.route('/add_llm', methods=['POST'])
|
||||||
f'"volc_sk": "{req.get("volc_sk", "")}", ' \
|
@login_required
|
||||||
f'"ep_id": "{endpoint_id}", ' + '}'
|
@validate_request("llm_factory")
|
||||||
elif factory == "Bedrock":
|
def add_llm():
|
||||||
# For Bedrock, due to its special authentication method
|
req = request.json
|
||||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
factory = req["llm_factory"]
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = '{' + f'"bedrock_ak": "{req.get("bedrock_ak", "")}", ' \
|
if factory == "VolcEngine":
|
||||||
f'"bedrock_sk": "{req.get("bedrock_sk", "")}", ' \
|
# For VolcEngine, due to its special authentication method
|
||||||
f'"bedrock_region": "{req.get("bedrock_region", "")}", ' + '}'
|
# Assemble ark_api_key endpoint_id into api_key
|
||||||
else:
|
llm_name = req["llm_name"]
|
||||||
llm_name = req["llm_name"]
|
api_key = '{' + f'"ark_api_key": "{req.get("ark_api_key", "")}", ' \
|
||||||
api_key = "xxxxxxxxxxxxxxx"
|
f'"ep_id": "{req.get("endpoint_id", "")}", ' + '}'
|
||||||
|
elif factory == "Tencent Hunyuan":
|
||||||
llm = {
|
api_key = '{' + f'"hunyuan_sid": "{req.get("hunyuan_sid", "")}", ' \
|
||||||
"tenant_id": current_user.id,
|
f'"hunyuan_sk": "{req.get("hunyuan_sk", "")}"' + '}'
|
||||||
"llm_factory": factory,
|
req["api_key"] = api_key
|
||||||
"model_type": req["model_type"],
|
return set_api_key()
|
||||||
"llm_name": llm_name,
|
elif factory == "Tencent Cloud":
|
||||||
"api_base": req.get("api_base", ""),
|
api_key = '{' + f'"tencent_cloud_sid": "{req.get("tencent_cloud_sid", "")}", ' \
|
||||||
"api_key": api_key
|
f'"tencent_cloud_sk": "{req.get("tencent_cloud_sk", "")}"' + '}'
|
||||||
}
|
req["api_key"] = api_key
|
||||||
|
elif factory == "Bedrock":
|
||||||
msg = ""
|
# For Bedrock, due to its special authentication method
|
||||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||||
mdl = EmbeddingModel[factory](
|
llm_name = req["llm_name"]
|
||||||
key=llm['api_key'] if factory in ["VolcEngine", "Bedrock"] else None,
|
api_key = '{' + f'"bedrock_ak": "{req.get("bedrock_ak", "")}", ' \
|
||||||
model_name=llm["llm_name"],
|
f'"bedrock_sk": "{req.get("bedrock_sk", "")}", ' \
|
||||||
base_url=llm["api_base"])
|
f'"bedrock_region": "{req.get("bedrock_region", "")}", ' + '}'
|
||||||
try:
|
elif factory == "LocalAI":
|
||||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
llm_name = req["llm_name"]+"___LocalAI"
|
||||||
if len(arr[0]) == 0 or tc == 0:
|
api_key = "xxxxxxxxxxxxxxx"
|
||||||
raise Exception("Fail")
|
elif factory == "OpenAI-API-Compatible":
|
||||||
except Exception as e:
|
llm_name = req["llm_name"]+"___OpenAI-API"
|
||||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
||||||
elif llm["model_type"] == LLMType.CHAT.value:
|
elif factory =="XunFei Spark":
|
||||||
mdl = ChatModel[factory](
|
llm_name = req["llm_name"]
|
||||||
key=llm['api_key'] if factory in ["VolcEngine", "Bedrock"] else None,
|
api_key = req.get("spark_api_password","xxxxxxxxxxxxxxx")
|
||||||
model_name=llm["llm_name"],
|
elif factory == "BaiduYiyan":
|
||||||
base_url=llm["api_base"]
|
llm_name = req["llm_name"]
|
||||||
)
|
api_key = '{' + f'"yiyan_ak": "{req.get("yiyan_ak", "")}", ' \
|
||||||
try:
|
f'"yiyan_sk": "{req.get("yiyan_sk", "")}"' + '}'
|
||||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
elif factory == "Fish Audio":
|
||||||
"temperature": 0.9})
|
llm_name = req["llm_name"]
|
||||||
if not tc:
|
api_key = '{' + f'"fish_audio_ak": "{req.get("fish_audio_ak", "")}", ' \
|
||||||
raise Exception(m)
|
f'"fish_audio_refid": "{req.get("fish_audio_refid", "59cb5986671546eaa6ca8ae6f29f6d22")}"' + '}'
|
||||||
except Exception as e:
|
elif factory == "Google Cloud":
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
llm_name = req["llm_name"]
|
||||||
e)
|
api_key = (
|
||||||
else:
|
"{" + f'"google_project_id": "{req.get("google_project_id", "")}", '
|
||||||
# TODO: check other type of models
|
f'"google_region": "{req.get("google_region", "")}", '
|
||||||
pass
|
f'"google_service_account_key": "{req.get("google_service_account_key", "")}"'
|
||||||
|
+ "}"
|
||||||
if msg:
|
)
|
||||||
return get_data_error_result(retmsg=msg)
|
else:
|
||||||
|
llm_name = req["llm_name"]
|
||||||
if not TenantLLMService.filter_update(
|
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
|
|
||||||
TenantLLMService.save(**llm)
|
llm = {
|
||||||
|
"tenant_id": current_user.id,
|
||||||
return get_json_result(data=True)
|
"llm_factory": factory,
|
||||||
|
"model_type": req["model_type"],
|
||||||
|
"llm_name": llm_name,
|
||||||
@manager.route('/delete_llm', methods=['POST'])
|
"api_base": req.get("api_base", ""),
|
||||||
@login_required
|
"api_key": api_key
|
||||||
@validate_request("llm_factory", "llm_name")
|
}
|
||||||
def delete_llm():
|
|
||||||
req = request.json
|
msg = ""
|
||||||
TenantLLMService.filter_delete(
|
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
|
mdl = EmbeddingModel[factory](
|
||||||
return get_json_result(data=True)
|
key=llm['api_key'],
|
||||||
|
model_name=llm["llm_name"],
|
||||||
|
base_url=llm["api_base"])
|
||||||
@manager.route('/my_llms', methods=['GET'])
|
try:
|
||||||
@login_required
|
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||||
def my_llms():
|
if len(arr[0]) == 0 or tc == 0:
|
||||||
try:
|
raise Exception("Fail")
|
||||||
res = {}
|
except Exception as e:
|
||||||
for o in TenantLLMService.get_my_llms(current_user.id):
|
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||||
if o["llm_factory"] not in res:
|
elif llm["model_type"] == LLMType.CHAT.value:
|
||||||
res[o["llm_factory"]] = {
|
mdl = ChatModel[factory](
|
||||||
"tags": o["tags"],
|
key=llm['api_key'],
|
||||||
"llm": []
|
model_name=llm["llm_name"],
|
||||||
}
|
base_url=llm["api_base"]
|
||||||
res[o["llm_factory"]]["llm"].append({
|
)
|
||||||
"type": o["model_type"],
|
try:
|
||||||
"name": o["llm_name"],
|
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||||
"used_token": o["used_tokens"]
|
"temperature": 0.9})
|
||||||
})
|
if not tc:
|
||||||
return get_json_result(data=res)
|
raise Exception(m)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||||
|
e)
|
||||||
|
elif llm["model_type"] == LLMType.RERANK:
|
||||||
@manager.route('/list', methods=['GET'])
|
mdl = RerankModel[factory](
|
||||||
@login_required
|
key=llm["api_key"],
|
||||||
def list_app():
|
model_name=llm["llm_name"],
|
||||||
model_type = request.args.get("model_type")
|
base_url=llm["api_base"]
|
||||||
try:
|
)
|
||||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
try:
|
||||||
facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key])
|
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!"])
|
||||||
llms = LLMService.get_all()
|
if len(arr) == 0 or tc == 0:
|
||||||
llms = [m.to_dict()
|
raise Exception("Not known.")
|
||||||
for m in llms if m.status == StatusEnum.VALID.value]
|
except Exception as e:
|
||||||
for m in llms:
|
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in ["Youdao","FastEmbed", "BAAI"]
|
e)
|
||||||
|
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||||
llm_set = set([m["llm_name"] for m in llms])
|
mdl = CvModel[factory](
|
||||||
for o in objs:
|
key=llm["api_key"],
|
||||||
if not o.api_key:continue
|
model_name=llm["llm_name"],
|
||||||
if o.llm_name in llm_set:continue
|
base_url=llm["api_base"]
|
||||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
)
|
||||||
|
try:
|
||||||
res = {}
|
img_url = (
|
||||||
for m in llms:
|
"https://upload.wikimedia.org/wikipedia/comm"
|
||||||
if model_type and m["model_type"].find(model_type)<0:
|
"ons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/256"
|
||||||
continue
|
"0px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||||
if m["fid"] not in res:
|
)
|
||||||
res[m["fid"]] = []
|
res = requests.get(img_url)
|
||||||
res[m["fid"]].append(m)
|
if res.status_code == 200:
|
||||||
|
m, tc = mdl.describe(res.content)
|
||||||
return get_json_result(data=res)
|
if not tc:
|
||||||
except Exception as e:
|
raise Exception(m)
|
||||||
return server_error_response(e)
|
else:
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||||
|
elif llm["model_type"] == LLMType.TTS:
|
||||||
|
mdl = TTSModel[factory](
|
||||||
|
key=llm["api_key"], model_name=llm["llm_name"], base_url=llm["api_base"]
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
for resp in mdl.tts("Hello~ Ragflower!"):
|
||||||
|
pass
|
||||||
|
except RuntimeError as e:
|
||||||
|
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||||
|
else:
|
||||||
|
# TODO: check other type of models
|
||||||
|
pass
|
||||||
|
|
||||||
|
if msg:
|
||||||
|
return get_data_error_result(retmsg=msg)
|
||||||
|
|
||||||
|
if not TenantLLMService.filter_update(
|
||||||
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||||
|
TenantLLMService.save(**llm)
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/delete_llm', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@validate_request("llm_factory", "llm_name")
|
||||||
|
def delete_llm():
|
||||||
|
req = request.json
|
||||||
|
TenantLLMService.filter_delete(
|
||||||
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/my_llms', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
def my_llms():
|
||||||
|
try:
|
||||||
|
res = {}
|
||||||
|
for o in TenantLLMService.get_my_llms(current_user.id):
|
||||||
|
if o["llm_factory"] not in res:
|
||||||
|
res[o["llm_factory"]] = {
|
||||||
|
"tags": o["tags"],
|
||||||
|
"llm": []
|
||||||
|
}
|
||||||
|
res[o["llm_factory"]]["llm"].append({
|
||||||
|
"type": o["model_type"],
|
||||||
|
"name": o["llm_name"],
|
||||||
|
"used_token": o["used_tokens"]
|
||||||
|
})
|
||||||
|
return get_json_result(data=res)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/list', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
def list_app():
|
||||||
|
model_type = request.args.get("model_type")
|
||||||
|
try:
|
||||||
|
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||||
|
facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key])
|
||||||
|
llms = LLMService.get_all()
|
||||||
|
llms = [m.to_dict()
|
||||||
|
for m in llms if m.status == StatusEnum.VALID.value]
|
||||||
|
for m in llms:
|
||||||
|
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in ["Youdao","FastEmbed", "BAAI"]
|
||||||
|
|
||||||
|
llm_set = set([m["llm_name"] for m in llms])
|
||||||
|
for o in objs:
|
||||||
|
if not o.api_key:continue
|
||||||
|
if o.llm_name in llm_set:continue
|
||||||
|
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||||
|
|
||||||
|
res = {}
|
||||||
|
for m in llms:
|
||||||
|
if model_type and m["model_type"].find(model_type)<0:
|
||||||
|
continue
|
||||||
|
if m["fid"] not in res:
|
||||||
|
res[m["fid"]] = []
|
||||||
|
res[m["fid"]].append(m)
|
||||||
|
|
||||||
|
return get_json_result(data=res)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|||||||
304
api/apps/sdk/assistant.py
Normal file
304
api/apps/sdk/assistant.py
Normal file
@ -0,0 +1,304 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from flask import request
|
||||||
|
|
||||||
|
from api.db import StatusEnum
|
||||||
|
from api.db.db_models import TenantLLM
|
||||||
|
from api.db.services.dialog_service import DialogService
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.db.services.llm_service import LLMService, TenantLLMService
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from api.settings import RetCode
|
||||||
|
from api.utils import get_uuid
|
||||||
|
from api.utils.api_utils import get_data_error_result, token_required
|
||||||
|
from api.utils.api_utils import get_json_result
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/save', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
def save(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
# dataset
|
||||||
|
if req.get("knowledgebases") == []:
|
||||||
|
return get_data_error_result(retmsg="knowledgebases can not be empty list")
|
||||||
|
kb_list = []
|
||||||
|
if req.get("knowledgebases"):
|
||||||
|
for kb in req.get("knowledgebases"):
|
||||||
|
if not kb["id"]:
|
||||||
|
return get_data_error_result(retmsg="knowledgebase needs id")
|
||||||
|
if not KnowledgebaseService.query(id=kb["id"], tenant_id=tenant_id):
|
||||||
|
return get_data_error_result(retmsg="you do not own the knowledgebase")
|
||||||
|
# if not DocumentService.query(kb_id=kb["id"]):
|
||||||
|
# return get_data_error_result(retmsg="There is a invalid knowledgebase")
|
||||||
|
kb_list.append(kb["id"])
|
||||||
|
req["kb_ids"] = kb_list
|
||||||
|
# llm
|
||||||
|
llm = req.get("llm")
|
||||||
|
if llm:
|
||||||
|
if "model_name" in llm:
|
||||||
|
req["llm_id"] = llm.pop("model_name")
|
||||||
|
req["llm_setting"] = req.pop("llm")
|
||||||
|
e, tenant = TenantService.get_by_id(tenant_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
|
# prompt
|
||||||
|
prompt = req.get("prompt")
|
||||||
|
key_mapping = {"parameters": "variables",
|
||||||
|
"prologue": "opener",
|
||||||
|
"quote": "show_quote",
|
||||||
|
"system": "prompt",
|
||||||
|
"rerank_id": "rerank_model",
|
||||||
|
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||||
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||||
|
if prompt:
|
||||||
|
for new_key, old_key in key_mapping.items():
|
||||||
|
if old_key in prompt:
|
||||||
|
prompt[new_key] = prompt.pop(old_key)
|
||||||
|
for key in key_list:
|
||||||
|
if key in prompt:
|
||||||
|
req[key] = prompt.pop(key)
|
||||||
|
req["prompt_config"] = req.pop("prompt")
|
||||||
|
# create
|
||||||
|
if "id" not in req:
|
||||||
|
# dataset
|
||||||
|
if not kb_list:
|
||||||
|
return get_data_error_result(retmsg="knowledgebases are required!")
|
||||||
|
# init
|
||||||
|
req["id"] = get_uuid()
|
||||||
|
req["description"] = req.get("description", "A helpful Assistant")
|
||||||
|
req["icon"] = req.get("avatar", "")
|
||||||
|
req["top_n"] = req.get("top_n", 6)
|
||||||
|
req["top_k"] = req.get("top_k", 1024)
|
||||||
|
req["rerank_id"] = req.get("rerank_id", "")
|
||||||
|
if req.get("llm_id"):
|
||||||
|
if not TenantLLMService.query(llm_name=req["llm_id"]):
|
||||||
|
return get_data_error_result(retmsg="the model_name does not exist.")
|
||||||
|
else:
|
||||||
|
req["llm_id"] = tenant.llm_id
|
||||||
|
if not req.get("name"):
|
||||||
|
return get_data_error_result(retmsg="name is required.")
|
||||||
|
if DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_data_error_result(retmsg="Duplicated assistant name in creating dataset.")
|
||||||
|
# tenant_id
|
||||||
|
if req.get("tenant_id"):
|
||||||
|
return get_data_error_result(retmsg="tenant_id must not be provided.")
|
||||||
|
req["tenant_id"] = tenant_id
|
||||||
|
# prompt more parameter
|
||||||
|
default_prompt = {
|
||||||
|
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||||
|
以下是知识库:
|
||||||
|
{knowledge}
|
||||||
|
以上是知识库。""",
|
||||||
|
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
||||||
|
"parameters": [
|
||||||
|
{"key": "knowledge", "optional": False}
|
||||||
|
],
|
||||||
|
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
||||||
|
}
|
||||||
|
key_list_2 = ["system", "prologue", "parameters", "empty_response"]
|
||||||
|
if "prompt_config" not in req:
|
||||||
|
req['prompt_config'] = {}
|
||||||
|
for key in key_list_2:
|
||||||
|
temp = req['prompt_config'].get(key)
|
||||||
|
if not temp:
|
||||||
|
req['prompt_config'][key] = default_prompt[key]
|
||||||
|
for p in req['prompt_config']["parameters"]:
|
||||||
|
if p["optional"]:
|
||||||
|
continue
|
||||||
|
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||||
|
# save
|
||||||
|
if not DialogService.save(**req):
|
||||||
|
return get_data_error_result(retmsg="Fail to new an assistant!")
|
||||||
|
# response
|
||||||
|
e, res = DialogService.get_by_id(req["id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Fail to new an assistant!")
|
||||||
|
res = res.to_json()
|
||||||
|
renamed_dict = {}
|
||||||
|
for key, value in res["prompt_config"].items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_dict[new_key] = value
|
||||||
|
res["prompt"] = renamed_dict
|
||||||
|
del res["prompt_config"]
|
||||||
|
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||||
|
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||||
|
"top_n": res["top_n"],
|
||||||
|
"rerank_model": res['rerank_id']}
|
||||||
|
res["prompt"].update(new_dict)
|
||||||
|
for key in key_list:
|
||||||
|
del res[key]
|
||||||
|
res["llm"] = res.pop("llm_setting")
|
||||||
|
res["llm"]["model_name"] = res.pop("llm_id")
|
||||||
|
del res["kb_ids"]
|
||||||
|
res["knowledgebases"] = req["knowledgebases"]
|
||||||
|
res["avatar"] = res.pop("icon")
|
||||||
|
return get_json_result(data=res)
|
||||||
|
else:
|
||||||
|
# authorization
|
||||||
|
if not DialogService.query(tenant_id=tenant_id, id=req["id"], status=StatusEnum.VALID.value):
|
||||||
|
return get_json_result(data=False, retmsg='You do not own the assistant', retcode=RetCode.OPERATING_ERROR)
|
||||||
|
# prompt
|
||||||
|
if not req["id"]:
|
||||||
|
return get_data_error_result(retmsg="id can not be empty")
|
||||||
|
e, res = DialogService.get_by_id(req["id"])
|
||||||
|
res = res.to_json()
|
||||||
|
if "llm_id" in req:
|
||||||
|
if not TenantLLMService.query(llm_name=req["llm_id"]):
|
||||||
|
return get_data_error_result(retmsg="the model_name does not exist.")
|
||||||
|
if "name" in req:
|
||||||
|
if not req.get("name"):
|
||||||
|
return get_data_error_result(retmsg="name is not empty.")
|
||||||
|
if req["name"].lower() != res["name"].lower() \
|
||||||
|
and len(
|
||||||
|
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||||
|
return get_data_error_result(retmsg="Duplicated assistant name in updating dataset.")
|
||||||
|
if "prompt_config" in req:
|
||||||
|
res["prompt_config"].update(req["prompt_config"])
|
||||||
|
for p in res["prompt_config"]["parameters"]:
|
||||||
|
if p["optional"]:
|
||||||
|
continue
|
||||||
|
if res["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||||
|
return get_data_error_result(retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||||
|
if "llm_setting" in req:
|
||||||
|
res["llm_setting"].update(req["llm_setting"])
|
||||||
|
req["prompt_config"] = res["prompt_config"]
|
||||||
|
req["llm_setting"] = res["llm_setting"]
|
||||||
|
# avatar
|
||||||
|
if "avatar" in req:
|
||||||
|
req["icon"] = req.pop("avatar")
|
||||||
|
assistant_id = req.pop("id")
|
||||||
|
if "knowledgebases" in req:
|
||||||
|
req.pop("knowledgebases")
|
||||||
|
if not DialogService.update_by_id(assistant_id, req):
|
||||||
|
return get_data_error_result(retmsg="Assistant not found!")
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/delete', methods=['DELETE'])
|
||||||
|
@token_required
|
||||||
|
def delete(tenant_id):
|
||||||
|
req = request.args
|
||||||
|
if "id" not in req:
|
||||||
|
return get_data_error_result(retmsg="id is required")
|
||||||
|
id = req['id']
|
||||||
|
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||||
|
return get_json_result(data=False, retmsg='you do not own the assistant.', retcode=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||||
|
DialogService.update_by_id(req["id"], temp_dict)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/get', methods=['GET'])
|
||||||
|
@token_required
|
||||||
|
def get(tenant_id):
|
||||||
|
req = request.args
|
||||||
|
if "id" in req:
|
||||||
|
id = req["id"]
|
||||||
|
ass = DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value)
|
||||||
|
if not ass:
|
||||||
|
return get_json_result(data=False, retmsg='You do not own the assistant.', retcode=RetCode.OPERATING_ERROR)
|
||||||
|
if "name" in req:
|
||||||
|
name = req["name"]
|
||||||
|
if ass[0].name != name:
|
||||||
|
return get_json_result(data=False, retmsg='name does not match id.', retcode=RetCode.OPERATING_ERROR)
|
||||||
|
res = ass[0].to_json()
|
||||||
|
else:
|
||||||
|
if "name" in req:
|
||||||
|
name = req["name"]
|
||||||
|
ass = DialogService.query(name=name, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||||
|
if not ass:
|
||||||
|
return get_json_result(data=False, retmsg='You do not own the assistant.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
res = ass[0].to_json()
|
||||||
|
else:
|
||||||
|
return get_data_error_result(retmsg="At least one of `id` or `name` must be provided.")
|
||||||
|
renamed_dict = {}
|
||||||
|
key_mapping = {"parameters": "variables",
|
||||||
|
"prologue": "opener",
|
||||||
|
"quote": "show_quote",
|
||||||
|
"system": "prompt",
|
||||||
|
"rerank_id": "rerank_model",
|
||||||
|
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||||
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||||
|
for key, value in res["prompt_config"].items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_dict[new_key] = value
|
||||||
|
res["prompt"] = renamed_dict
|
||||||
|
del res["prompt_config"]
|
||||||
|
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||||
|
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||||
|
"top_n": res["top_n"],
|
||||||
|
"rerank_model": res['rerank_id']}
|
||||||
|
res["prompt"].update(new_dict)
|
||||||
|
for key in key_list:
|
||||||
|
del res[key]
|
||||||
|
res["llm"] = res.pop("llm_setting")
|
||||||
|
res["llm"]["model_name"] = res.pop("llm_id")
|
||||||
|
kb_list = []
|
||||||
|
for kb_id in res["kb_ids"]:
|
||||||
|
kb = KnowledgebaseService.query(id=kb_id)
|
||||||
|
kb_list.append(kb[0].to_json())
|
||||||
|
del res["kb_ids"]
|
||||||
|
res["knowledgebases"] = kb_list
|
||||||
|
res["avatar"] = res.pop("icon")
|
||||||
|
return get_json_result(data=res)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/list', methods=['GET'])
|
||||||
|
@token_required
|
||||||
|
def list_assistants(tenant_id):
|
||||||
|
assts = DialogService.query(
|
||||||
|
tenant_id=tenant_id,
|
||||||
|
status=StatusEnum.VALID.value,
|
||||||
|
reverse=True,
|
||||||
|
order_by=DialogService.model.create_time)
|
||||||
|
assts = [d.to_dict() for d in assts]
|
||||||
|
list_assts = []
|
||||||
|
renamed_dict = {}
|
||||||
|
key_mapping = {"parameters": "variables",
|
||||||
|
"prologue": "opener",
|
||||||
|
"quote": "show_quote",
|
||||||
|
"system": "prompt",
|
||||||
|
"rerank_id": "rerank_model",
|
||||||
|
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||||
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||||
|
for res in assts:
|
||||||
|
for key, value in res["prompt_config"].items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_dict[new_key] = value
|
||||||
|
res["prompt"] = renamed_dict
|
||||||
|
del res["prompt_config"]
|
||||||
|
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||||
|
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||||
|
"top_n": res["top_n"],
|
||||||
|
"rerank_model": res['rerank_id']}
|
||||||
|
res["prompt"].update(new_dict)
|
||||||
|
for key in key_list:
|
||||||
|
del res[key]
|
||||||
|
res["llm"] = res.pop("llm_setting")
|
||||||
|
res["llm"]["model_name"] = res.pop("llm_id")
|
||||||
|
kb_list = []
|
||||||
|
for kb_id in res["kb_ids"]:
|
||||||
|
kb = KnowledgebaseService.query(id=kb_id)
|
||||||
|
kb_list.append(kb[0].to_json())
|
||||||
|
del res["kb_ids"]
|
||||||
|
res["knowledgebases"] = kb_list
|
||||||
|
res["avatar"] = res.pop("icon")
|
||||||
|
list_assts.append(res)
|
||||||
|
return get_json_result(data=list_assts)
|
||||||
224
api/apps/sdk/dataset.py
Normal file
224
api/apps/sdk/dataset.py
Normal file
@ -0,0 +1,224 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from flask import request
|
||||||
|
|
||||||
|
from api.db import StatusEnum, FileSource
|
||||||
|
from api.db.db_models import File
|
||||||
|
from api.db.services.document_service import DocumentService
|
||||||
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
|
from api.db.services.file_service import FileService
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from api.settings import RetCode
|
||||||
|
from api.utils import get_uuid
|
||||||
|
from api.utils.api_utils import get_json_result, token_required, get_data_error_result
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/save', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
def save(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
e, t = TenantService.get_by_id(tenant_id)
|
||||||
|
if "id" not in req:
|
||||||
|
if "tenant_id" in req or "embedding_model" in req:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Tenant_id or embedding_model must not be provided")
|
||||||
|
if "name" not in req:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Name is not empty!")
|
||||||
|
req['id'] = get_uuid()
|
||||||
|
req["name"] = req["name"].strip()
|
||||||
|
if req["name"] == "":
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Name is not empty string!")
|
||||||
|
if KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Duplicated knowledgebase name in creating dataset.")
|
||||||
|
req["tenant_id"] = req['created_by'] = tenant_id
|
||||||
|
req['embedding_model'] = t.embd_id
|
||||||
|
key_mapping = {
|
||||||
|
"chunk_num": "chunk_count",
|
||||||
|
"doc_num": "document_count",
|
||||||
|
"parser_id": "parse_method",
|
||||||
|
"embd_id": "embedding_model"
|
||||||
|
}
|
||||||
|
mapped_keys = {new_key: req[old_key] for new_key, old_key in key_mapping.items() if old_key in req}
|
||||||
|
req.update(mapped_keys)
|
||||||
|
if not KnowledgebaseService.save(**req):
|
||||||
|
return get_data_error_result(retmsg="Create dataset error.(Database error)")
|
||||||
|
renamed_data = {}
|
||||||
|
e, k = KnowledgebaseService.get_by_id(req["id"])
|
||||||
|
for key, value in k.to_dict().items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_data[new_key] = value
|
||||||
|
return get_json_result(data=renamed_data)
|
||||||
|
else:
|
||||||
|
invalid_keys = {"embd_id", "chunk_num", "doc_num", "parser_id"}
|
||||||
|
if any(key in req for key in invalid_keys):
|
||||||
|
return get_data_error_result(retmsg="The input parameters are invalid.")
|
||||||
|
|
||||||
|
if "tenant_id" in req:
|
||||||
|
if req["tenant_id"] != tenant_id:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Can't change tenant_id.")
|
||||||
|
|
||||||
|
if "embedding_model" in req:
|
||||||
|
if req["embedding_model"] != t.embd_id:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Can't change embedding_model.")
|
||||||
|
req.pop("embedding_model")
|
||||||
|
|
||||||
|
if not KnowledgebaseService.query(
|
||||||
|
created_by=tenant_id, id=req["id"]):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='You do not own the dataset.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
if not req["id"]:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="id can not be empty.")
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(req["id"])
|
||||||
|
|
||||||
|
if "chunk_count" in req:
|
||||||
|
if req["chunk_count"] != kb.chunk_num:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Can't change chunk_count.")
|
||||||
|
req.pop("chunk_count")
|
||||||
|
|
||||||
|
if "document_count" in req:
|
||||||
|
if req['document_count'] != kb.doc_num:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Can't change document_count.")
|
||||||
|
req.pop("document_count")
|
||||||
|
|
||||||
|
if "parse_method" in req:
|
||||||
|
if kb.chunk_num != 0 and req['parse_method'] != kb.parser_id:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="If chunk count is not 0, parse method is not changable.")
|
||||||
|
req['parser_id'] = req.pop('parse_method')
|
||||||
|
if "name" in req:
|
||||||
|
req["name"] = req["name"].strip()
|
||||||
|
if req["name"].lower() != kb.name.lower() \
|
||||||
|
and len(KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id,
|
||||||
|
status=StatusEnum.VALID.value)) > 0:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Duplicated knowledgebase name in updating dataset.")
|
||||||
|
|
||||||
|
del req["id"]
|
||||||
|
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||||
|
return get_data_error_result(retmsg="Update dataset error.(Database error)")
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/delete', methods=['DELETE'])
|
||||||
|
@token_required
|
||||||
|
def delete(tenant_id):
|
||||||
|
req = request.args
|
||||||
|
if "id" not in req:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="id is required")
|
||||||
|
kbs = KnowledgebaseService.query(
|
||||||
|
created_by=tenant_id, id=req["id"])
|
||||||
|
if not kbs:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='You do not own the dataset',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
for doc in DocumentService.query(kb_id=req["id"]):
|
||||||
|
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Remove document error.(Database error)")
|
||||||
|
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||||
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
|
File2DocumentService.delete_by_document_id(doc.id)
|
||||||
|
|
||||||
|
if not KnowledgebaseService.delete_by_id(req["id"]):
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Delete dataset error.(Database serror)")
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/list', methods=['GET'])
|
||||||
|
@token_required
|
||||||
|
def list_datasets(tenant_id):
|
||||||
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 1024))
|
||||||
|
orderby = request.args.get("orderby", "create_time")
|
||||||
|
desc = bool(request.args.get("desc", True))
|
||||||
|
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||||
|
kbs = KnowledgebaseService.get_by_tenant_ids(
|
||||||
|
[m["tenant_id"] for m in tenants], tenant_id, page_number, items_per_page, orderby, desc)
|
||||||
|
renamed_list = []
|
||||||
|
for kb in kbs:
|
||||||
|
key_mapping = {
|
||||||
|
"chunk_num": "chunk_count",
|
||||||
|
"doc_num": "document_count",
|
||||||
|
"parser_id": "parse_method",
|
||||||
|
"embd_id": "embedding_model"
|
||||||
|
}
|
||||||
|
renamed_data = {}
|
||||||
|
for key, value in kb.items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_data[new_key] = value
|
||||||
|
renamed_list.append(renamed_data)
|
||||||
|
return get_json_result(data=renamed_list)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/detail', methods=['GET'])
|
||||||
|
@token_required
|
||||||
|
def detail(tenant_id):
|
||||||
|
req = request.args
|
||||||
|
key_mapping = {
|
||||||
|
"chunk_num": "chunk_count",
|
||||||
|
"doc_num": "document_count",
|
||||||
|
"parser_id": "parse_method",
|
||||||
|
"embd_id": "embedding_model"
|
||||||
|
}
|
||||||
|
renamed_data = {}
|
||||||
|
if "id" in req:
|
||||||
|
id = req["id"]
|
||||||
|
kb = KnowledgebaseService.query(created_by=tenant_id, id=req["id"])
|
||||||
|
if not kb:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='You do not own the dataset.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
if "name" in req:
|
||||||
|
name = req["name"]
|
||||||
|
if kb[0].name != name:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='You do not own the dataset.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
e, k = KnowledgebaseService.get_by_id(id)
|
||||||
|
for key, value in k.to_dict().items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_data[new_key] = value
|
||||||
|
return get_json_result(data=renamed_data)
|
||||||
|
else:
|
||||||
|
if "name" in req:
|
||||||
|
name = req["name"]
|
||||||
|
e, k = KnowledgebaseService.get_by_name(kb_name=name, tenant_id=tenant_id)
|
||||||
|
if not e:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='You do not own the dataset.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
for key, value in k.to_dict().items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_data[new_key] = value
|
||||||
|
return get_json_result(data=renamed_data)
|
||||||
|
else:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="At least one of `id` or `name` must be provided.")
|
||||||
529
api/apps/sdk/doc.py
Normal file
529
api/apps/sdk/doc.py
Normal file
@ -0,0 +1,529 @@
|
|||||||
|
import pathlib
|
||||||
|
import re
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from flask import request
|
||||||
|
from flask_login import login_required, current_user
|
||||||
|
from elasticsearch_dsl import Q
|
||||||
|
|
||||||
|
from rag.app.qa import rmPrefix, beAdoc
|
||||||
|
from rag.nlp import search, rag_tokenizer, keyword_extraction
|
||||||
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
|
from rag.utils import rmSpace
|
||||||
|
from api.db import LLMType, ParserType
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.db.services.llm_service import TenantLLMService
|
||||||
|
from api.db.services.user_service import UserTenantService
|
||||||
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
|
from api.db.services.document_service import DocumentService
|
||||||
|
from api.settings import RetCode, retrievaler, kg_retrievaler
|
||||||
|
from api.utils.api_utils import get_json_result
|
||||||
|
import hashlib
|
||||||
|
import re
|
||||||
|
from api.utils.api_utils import get_json_result, token_required, get_data_error_result
|
||||||
|
|
||||||
|
from api.db.db_models import Task, File
|
||||||
|
|
||||||
|
from api.db.services.task_service import TaskService, queue_tasks
|
||||||
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
|
|
||||||
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
|
|
||||||
|
from api.utils.api_utils import get_json_result
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
from elasticsearch_dsl import Q
|
||||||
|
from flask import request, send_file
|
||||||
|
from flask_login import login_required
|
||||||
|
|
||||||
|
from api.db import FileSource, TaskStatus, FileType
|
||||||
|
from api.db.db_models import File
|
||||||
|
from api.db.services.document_service import DocumentService
|
||||||
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
|
from api.db.services.file_service import FileService
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.settings import RetCode, retrievaler
|
||||||
|
from api.utils.api_utils import construct_json_result, construct_error_response
|
||||||
|
from rag.app import book, laws, manual, naive, one, paper, presentation, qa, resume, table, picture, audio, email
|
||||||
|
from rag.nlp import search
|
||||||
|
from rag.utils import rmSpace
|
||||||
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
|
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||||
|
|
||||||
|
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/dataset/<dataset_id>/documents/upload', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
def upload(dataset_id, tenant_id):
|
||||||
|
if 'file' not in request.files:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
file_objs = request.files.getlist('file')
|
||||||
|
for file_obj in file_objs:
|
||||||
|
if file_obj.filename == '':
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||||
|
if not e:
|
||||||
|
raise LookupError(f"Can't find the knowledgebase with ID {dataset_id}!")
|
||||||
|
err, _ = FileService.upload_document(kb, file_objs, tenant_id)
|
||||||
|
if err:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/infos', methods=['GET'])
|
||||||
|
@token_required
|
||||||
|
def docinfos(tenant_id):
|
||||||
|
req = request.args
|
||||||
|
if "id" in req:
|
||||||
|
doc_id = req["id"]
|
||||||
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
|
return get_json_result(data=doc.to_json())
|
||||||
|
if "name" in req:
|
||||||
|
doc_name = req["name"]
|
||||||
|
doc_id = DocumentService.get_doc_id_by_doc_name(doc_name)
|
||||||
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
|
return get_json_result(data=doc.to_json())
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/save', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
def save_doc(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
#get doc by id or name
|
||||||
|
doc_id = None
|
||||||
|
if "id" in req:
|
||||||
|
doc_id = req["id"]
|
||||||
|
elif "name" in req:
|
||||||
|
doc_name = req["name"]
|
||||||
|
doc_id = DocumentService.get_doc_id_by_doc_name(doc_name)
|
||||||
|
if not doc_id:
|
||||||
|
return get_json_result(retcode=400, retmsg="Document ID or name is required")
|
||||||
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
#other value can't be changed
|
||||||
|
if "chunk_num" in req:
|
||||||
|
if req["chunk_num"] != doc.chunk_num:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Can't change chunk_count.")
|
||||||
|
if "progress" in req:
|
||||||
|
if req['progress'] != doc.progress:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Can't change progress.")
|
||||||
|
#change name or parse_method
|
||||||
|
if "name" in req and req["name"] != doc.name:
|
||||||
|
try:
|
||||||
|
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||||
|
doc.name.lower()).suffix:
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
retmsg="The extension of file can't be changed",
|
||||||
|
retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||||
|
if d.name == req["name"]:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Duplicated document name in the same knowledgebase.")
|
||||||
|
|
||||||
|
if not DocumentService.update_by_id(
|
||||||
|
doc_id, {"name": req["name"]}):
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Database error (Document rename)!")
|
||||||
|
|
||||||
|
informs = File2DocumentService.get_by_document_id(doc_id)
|
||||||
|
if informs:
|
||||||
|
e, file = FileService.get_by_id(informs[0].file_id)
|
||||||
|
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
if "parser_id" in req:
|
||||||
|
try:
|
||||||
|
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||||
|
if "parser_config" in req:
|
||||||
|
if req["parser_config"] == doc.parser_config:
|
||||||
|
return get_json_result(data=True)
|
||||||
|
else:
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
if doc.type == FileType.VISUAL or re.search(
|
||||||
|
r"\.(ppt|pptx|pages)$", doc.name):
|
||||||
|
return get_data_error_result(retmsg="Not supported yet!")
|
||||||
|
|
||||||
|
e = DocumentService.update_by_id(doc.id,
|
||||||
|
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||||
|
"run": TaskStatus.UNSTART.value})
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
if "parser_config" in req:
|
||||||
|
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||||
|
if doc.token_num > 0:
|
||||||
|
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||||
|
doc.process_duation * -1)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
|
if not tenant_id:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
|
ELASTICSEARCH.deleteByQuery(
|
||||||
|
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/change_parser', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
def change_parser(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
try:
|
||||||
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||||
|
if "parser_config" in req:
|
||||||
|
if req["parser_config"] == doc.parser_config:
|
||||||
|
return get_json_result(data=True)
|
||||||
|
else:
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
if doc.type == FileType.VISUAL or re.search(
|
||||||
|
r"\.(ppt|pptx|pages)$", doc.name):
|
||||||
|
return get_data_error_result(retmsg="Not supported yet!")
|
||||||
|
|
||||||
|
e = DocumentService.update_by_id(doc.id,
|
||||||
|
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||||
|
"run": TaskStatus.UNSTART.value})
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
if "parser_config" in req:
|
||||||
|
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||||
|
if doc.token_num > 0:
|
||||||
|
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||||
|
doc.process_duation * -1)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
|
if not tenant_id:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
|
ELASTICSEARCH.deleteByQuery(
|
||||||
|
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
@manager.route('/rename', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@validate_request("doc_id", "name")
|
||||||
|
def rename():
|
||||||
|
req = request.json
|
||||||
|
try:
|
||||||
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||||
|
doc.name.lower()).suffix:
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
retmsg="The extension of file can't be changed",
|
||||||
|
retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||||
|
if d.name == req["name"]:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Duplicated document name in the same knowledgebase.")
|
||||||
|
|
||||||
|
if not DocumentService.update_by_id(
|
||||||
|
req["doc_id"], {"name": req["name"]}):
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Database error (Document rename)!")
|
||||||
|
|
||||||
|
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||||
|
if informs:
|
||||||
|
e, file = FileService.get_by_id(informs[0].file_id)
|
||||||
|
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/<document_id>", methods=["GET"])
|
||||||
|
@token_required
|
||||||
|
def download_document(dataset_id, document_id):
|
||||||
|
try:
|
||||||
|
# Check whether there is this document
|
||||||
|
exist, document = DocumentService.get_by_id(document_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(message=f"This document '{document_id}' cannot be found!",
|
||||||
|
code=RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
# The process of downloading
|
||||||
|
doc_id, doc_location = File2DocumentService.get_minio_address(doc_id=document_id) # minio address
|
||||||
|
file_stream = STORAGE_IMPL.get(doc_id, doc_location)
|
||||||
|
if not file_stream:
|
||||||
|
return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
|
||||||
|
|
||||||
|
file = BytesIO(file_stream)
|
||||||
|
|
||||||
|
# Use send_file with a proper filename and MIME type
|
||||||
|
return send_file(
|
||||||
|
file,
|
||||||
|
as_attachment=True,
|
||||||
|
download_name=document.name,
|
||||||
|
mimetype='application/octet-stream' # Set a default MIME type
|
||||||
|
)
|
||||||
|
|
||||||
|
# Error
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/dataset/<dataset_id>/documents', methods=['GET'])
|
||||||
|
@token_required
|
||||||
|
def list_docs(dataset_id, tenant_id):
|
||||||
|
kb_id = request.args.get("kb_id")
|
||||||
|
if not kb_id:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
tenants = UserTenantService.query(user_id=tenant_id)
|
||||||
|
for tenant in tenants:
|
||||||
|
if KnowledgebaseService.query(
|
||||||
|
tenant_id=tenant.tenant_id, id=kb_id):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
keywords = request.args.get("keywords", "")
|
||||||
|
|
||||||
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 15))
|
||||||
|
orderby = request.args.get("orderby", "create_time")
|
||||||
|
desc = request.args.get("desc", True)
|
||||||
|
try:
|
||||||
|
docs, tol = DocumentService.get_by_kb_id(
|
||||||
|
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||||
|
return get_json_result(data={"total": tol, "docs": docs})
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/delete', methods=['DELETE'])
|
||||||
|
@token_required
|
||||||
|
def rm(tenant_id):
|
||||||
|
req = request.args
|
||||||
|
if "doc_id" not in req:
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="doc_id is required")
|
||||||
|
doc_ids = req["doc_id"]
|
||||||
|
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
||||||
|
root_folder = FileService.get_root_folder(tenant_id)
|
||||||
|
pf_id = root_folder["id"]
|
||||||
|
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||||
|
errors = ""
|
||||||
|
for doc_id in doc_ids:
|
||||||
|
try:
|
||||||
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
|
if not tenant_id:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
|
|
||||||
|
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||||
|
|
||||||
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
|
return get_data_error_result(
|
||||||
|
retmsg="Database error (Document removal)!")
|
||||||
|
|
||||||
|
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||||
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
|
File2DocumentService.delete_by_document_id(doc_id)
|
||||||
|
|
||||||
|
STORAGE_IMPL.rm(b, n)
|
||||||
|
except Exception as e:
|
||||||
|
errors += str(e)
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||||
|
|
||||||
|
return get_json_result(data=True, retmsg="success")
|
||||||
|
|
||||||
|
@manager.route("/<document_id>/status", methods=["GET"])
|
||||||
|
@token_required
|
||||||
|
def show_parsing_status(tenant_id, document_id):
|
||||||
|
try:
|
||||||
|
# valid document
|
||||||
|
exist, _ = DocumentService.get_by_id(document_id)
|
||||||
|
if not exist:
|
||||||
|
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||||
|
message=f"This document: '{document_id}' is not a valid document.")
|
||||||
|
|
||||||
|
_, doc = DocumentService.get_by_id(document_id) # get doc object
|
||||||
|
doc_attributes = doc.to_dict()
|
||||||
|
|
||||||
|
return construct_json_result(
|
||||||
|
data={"progress": doc_attributes["progress"], "status": TaskStatus(doc_attributes["status"]).name},
|
||||||
|
code=RetCode.SUCCESS
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
return construct_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/run', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
def run(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
try:
|
||||||
|
for id in req["doc_ids"]:
|
||||||
|
info = {"run": str(req["run"]), "progress": 0}
|
||||||
|
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||||
|
info["progress_msg"] = ""
|
||||||
|
info["chunk_num"] = 0
|
||||||
|
info["token_num"] = 0
|
||||||
|
DocumentService.update_by_id(id, info)
|
||||||
|
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||||
|
tenant_id = DocumentService.get_tenant_id(id)
|
||||||
|
if not tenant_id:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
|
ELASTICSEARCH.deleteByQuery(
|
||||||
|
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||||
|
|
||||||
|
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||||
|
TaskService.filter_delete([Task.doc_id == id])
|
||||||
|
e, doc = DocumentService.get_by_id(id)
|
||||||
|
doc = doc.to_dict()
|
||||||
|
doc["tenant_id"] = tenant_id
|
||||||
|
bucket, name = File2DocumentService.get_minio_address(doc_id=doc["id"])
|
||||||
|
queue_tasks(doc, bucket, name)
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/chunk/list', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
@validate_request("doc_id")
|
||||||
|
def list_chunk(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
doc_id = req["doc_id"]
|
||||||
|
page = int(req.get("page", 1))
|
||||||
|
size = int(req.get("size", 30))
|
||||||
|
question = req.get("keywords", "")
|
||||||
|
try:
|
||||||
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
|
if not tenant_id:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
query = {
|
||||||
|
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||||
|
}
|
||||||
|
if "available_int" in req:
|
||||||
|
query["available_int"] = int(req["available_int"])
|
||||||
|
sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
|
||||||
|
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||||
|
for id in sres.ids:
|
||||||
|
d = {
|
||||||
|
"chunk_id": id,
|
||||||
|
"content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[
|
||||||
|
id].get(
|
||||||
|
"content_with_weight", ""),
|
||||||
|
"doc_id": sres.field[id]["doc_id"],
|
||||||
|
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||||
|
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||||
|
"img_id": sres.field[id].get("img_id", ""),
|
||||||
|
"available_int": sres.field[id].get("available_int", 1),
|
||||||
|
"positions": sres.field[id].get("position_int", "").split("\t")
|
||||||
|
}
|
||||||
|
if len(d["positions"]) % 5 == 0:
|
||||||
|
poss = []
|
||||||
|
for i in range(0, len(d["positions"]), 5):
|
||||||
|
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
||||||
|
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
||||||
|
d["positions"] = poss
|
||||||
|
res["chunks"].append(d)
|
||||||
|
return get_json_result(data=res)
|
||||||
|
except Exception as e:
|
||||||
|
if str(e).find("not_found") > 0:
|
||||||
|
return get_json_result(data=False, retmsg=f'No chunk found!',
|
||||||
|
retcode=RetCode.DATA_ERROR)
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/chunk/create', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
@validate_request("doc_id", "content_with_weight")
|
||||||
|
def create(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
md5 = hashlib.md5()
|
||||||
|
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
||||||
|
chunck_id = md5.hexdigest()
|
||||||
|
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||||
|
"content_with_weight": req["content_with_weight"]}
|
||||||
|
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||||
|
d["important_kwd"] = req.get("important_kwd", [])
|
||||||
|
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||||
|
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||||
|
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
|
try:
|
||||||
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
d["kb_id"] = [doc.kb_id]
|
||||||
|
d["docnm_kwd"] = doc.name
|
||||||
|
d["doc_id"] = doc.id
|
||||||
|
|
||||||
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
|
if not tenant_id:
|
||||||
|
return get_data_error_result(retmsg="Tenant not found!")
|
||||||
|
|
||||||
|
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||||
|
embd_mdl = TenantLLMService.model_instance(
|
||||||
|
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||||
|
|
||||||
|
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||||
|
v = 0.1 * v[0] + 0.9 * v[1]
|
||||||
|
d["q_%d_vec" % len(v)] = v.tolist()
|
||||||
|
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||||
|
|
||||||
|
DocumentService.increment_chunk_num(
|
||||||
|
doc.id, doc.kb_id, c, 1, 0)
|
||||||
|
return get_json_result(data={"chunk": d})
|
||||||
|
# return get_json_result(data={"chunk_id": chunck_id})
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/chunk/rm', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
@validate_request("chunk_ids", "doc_id")
|
||||||
|
def rm_chunk():
|
||||||
|
req = request.json
|
||||||
|
try:
|
||||||
|
if not ELASTICSEARCH.deleteByQuery(
|
||||||
|
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
||||||
|
return get_data_error_result(retmsg="Index updating failure")
|
||||||
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Document not found!")
|
||||||
|
deleted_chunk_ids = req["chunk_ids"]
|
||||||
|
chunk_number = len(deleted_chunk_ids)
|
||||||
|
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
263
api/apps/sdk/session.py
Normal file
263
api/apps/sdk/session.py
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import json
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from flask import request, Response
|
||||||
|
|
||||||
|
from api.db import StatusEnum
|
||||||
|
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||||
|
from api.settings import RetCode
|
||||||
|
from api.utils import get_uuid
|
||||||
|
from api.utils.api_utils import get_data_error_result
|
||||||
|
from api.utils.api_utils import get_json_result, token_required
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/save', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
def set_conversation(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
conv_id = req.get("id")
|
||||||
|
if "assistant_id" in req:
|
||||||
|
req["dialog_id"] = req.pop("assistant_id")
|
||||||
|
if "id" in req:
|
||||||
|
del req["id"]
|
||||||
|
conv = ConversationService.query(id=conv_id)
|
||||||
|
if not conv:
|
||||||
|
return get_data_error_result(retmsg="Session does not exist")
|
||||||
|
if not DialogService.query(id=conv[0].dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_data_error_result(retmsg="You do not own the session")
|
||||||
|
if req.get("dialog_id"):
|
||||||
|
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||||
|
if not dia:
|
||||||
|
return get_data_error_result(retmsg="You do not own the assistant")
|
||||||
|
if "dialog_id" in req and not req.get("dialog_id"):
|
||||||
|
return get_data_error_result(retmsg="assistant_id can not be empty.")
|
||||||
|
if "message" in req:
|
||||||
|
return get_data_error_result(retmsg="message can not be change")
|
||||||
|
if "reference" in req:
|
||||||
|
return get_data_error_result(retmsg="reference can not be change")
|
||||||
|
if "name" in req and not req.get("name"):
|
||||||
|
return get_data_error_result(retmsg="name can not be empty.")
|
||||||
|
if not ConversationService.update_by_id(conv_id, req):
|
||||||
|
return get_data_error_result(retmsg="Session updates error")
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
if not req.get("dialog_id"):
|
||||||
|
return get_data_error_result(retmsg="assistant_id is required.")
|
||||||
|
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||||
|
if not dia:
|
||||||
|
return get_data_error_result(retmsg="You do not own the assistant")
|
||||||
|
conv = {
|
||||||
|
"id": get_uuid(),
|
||||||
|
"dialog_id": req["dialog_id"],
|
||||||
|
"name": req.get("name", "New session"),
|
||||||
|
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||||
|
}
|
||||||
|
if not conv.get("name"):
|
||||||
|
return get_data_error_result(retmsg="name can not be empty.")
|
||||||
|
ConversationService.save(**conv)
|
||||||
|
e, conv = ConversationService.get_by_id(conv["id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(retmsg="Fail to new session!")
|
||||||
|
conv = conv.to_dict()
|
||||||
|
conv['messages'] = conv.pop("message")
|
||||||
|
conv["assistant_id"] = conv.pop("dialog_id")
|
||||||
|
del conv["reference"]
|
||||||
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/completion', methods=['POST'])
|
||||||
|
@token_required
|
||||||
|
def completion(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
# req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
||||||
|
# {"role": "user", "content": "上海有吗?"}
|
||||||
|
# ]}
|
||||||
|
if "id" not in req:
|
||||||
|
return get_data_error_result(retmsg="id is required")
|
||||||
|
conv = ConversationService.query(id=req["id"])
|
||||||
|
if not conv:
|
||||||
|
return get_data_error_result(retmsg="Session does not exist")
|
||||||
|
conv = conv[0]
|
||||||
|
if not DialogService.query(id=conv.dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_data_error_result(retmsg="You do not own the session")
|
||||||
|
msg = []
|
||||||
|
question = {
|
||||||
|
"content": req.get("question"),
|
||||||
|
"role": "user",
|
||||||
|
"id": str(uuid4())
|
||||||
|
}
|
||||||
|
conv.message.append(question)
|
||||||
|
for m in conv.message:
|
||||||
|
if m["role"] == "system": continue
|
||||||
|
if m["role"] == "assistant" and not msg: continue
|
||||||
|
msg.append(m)
|
||||||
|
message_id = msg[-1].get("id")
|
||||||
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
|
del req["id"]
|
||||||
|
|
||||||
|
if not conv.reference:
|
||||||
|
conv.reference = []
|
||||||
|
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||||
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
|
def fillin_conv(ans):
|
||||||
|
nonlocal conv, message_id
|
||||||
|
if not conv.reference:
|
||||||
|
conv.reference.append(ans["reference"])
|
||||||
|
else:
|
||||||
|
conv.reference[-1] = ans["reference"]
|
||||||
|
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||||
|
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||||
|
ans["id"] = message_id
|
||||||
|
|
||||||
|
def stream():
|
||||||
|
nonlocal dia, msg, req, conv
|
||||||
|
try:
|
||||||
|
for ans in chat(dia, msg, **req):
|
||||||
|
fillin_conv(ans)
|
||||||
|
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
|
except Exception as e:
|
||||||
|
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||||
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
|
if req.get("stream", True):
|
||||||
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
|
return resp
|
||||||
|
|
||||||
|
else:
|
||||||
|
answer = None
|
||||||
|
for ans in chat(dia, msg, **req):
|
||||||
|
answer = ans
|
||||||
|
fillin_conv(ans)
|
||||||
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
|
break
|
||||||
|
return get_json_result(data=answer)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/get', methods=['GET'])
|
||||||
|
@token_required
|
||||||
|
def get(tenant_id):
|
||||||
|
req = request.args
|
||||||
|
if "id" not in req:
|
||||||
|
return get_data_error_result(retmsg="id is required")
|
||||||
|
conv_id = req["id"]
|
||||||
|
conv = ConversationService.query(id=conv_id)
|
||||||
|
if not conv:
|
||||||
|
return get_data_error_result(retmsg="Session does not exist")
|
||||||
|
if not DialogService.query(id=conv[0].dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_data_error_result(retmsg="You do not own the session")
|
||||||
|
conv = conv[0].to_dict()
|
||||||
|
conv['messages'] = conv.pop("message")
|
||||||
|
conv["assistant_id"] = conv.pop("dialog_id")
|
||||||
|
if conv["reference"]:
|
||||||
|
messages = conv["messages"]
|
||||||
|
message_num = 0
|
||||||
|
chunk_num = 0
|
||||||
|
while message_num < len(messages):
|
||||||
|
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||||
|
chunk_list = []
|
||||||
|
if "chunks" in conv["reference"][chunk_num]:
|
||||||
|
chunks = conv["reference"][chunk_num]["chunks"]
|
||||||
|
for chunk in chunks:
|
||||||
|
new_chunk = {
|
||||||
|
"id": chunk["chunk_id"],
|
||||||
|
"content": chunk["content_with_weight"],
|
||||||
|
"document_id": chunk["doc_id"],
|
||||||
|
"document_name": chunk["docnm_kwd"],
|
||||||
|
"knowledgebase_id": chunk["kb_id"],
|
||||||
|
"image_id": chunk["img_id"],
|
||||||
|
"similarity": chunk["similarity"],
|
||||||
|
"vector_similarity": chunk["vector_similarity"],
|
||||||
|
"term_similarity": chunk["term_similarity"],
|
||||||
|
"positions": chunk["positions"],
|
||||||
|
}
|
||||||
|
chunk_list.append(new_chunk)
|
||||||
|
chunk_num += 1
|
||||||
|
messages[message_num]["reference"] = chunk_list
|
||||||
|
message_num += 1
|
||||||
|
del conv["reference"]
|
||||||
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/list', methods=["GET"])
|
||||||
|
@token_required
|
||||||
|
def list(tenant_id):
|
||||||
|
assistant_id = request.args["assistant_id"]
|
||||||
|
if not DialogService.query(tenant_id=tenant_id, id=assistant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg=f'Only owner of the assistant is authorized for this operation.',
|
||||||
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
convs = ConversationService.query(
|
||||||
|
dialog_id=assistant_id,
|
||||||
|
order_by=ConversationService.model.create_time,
|
||||||
|
reverse=True)
|
||||||
|
convs = [d.to_dict() for d in convs]
|
||||||
|
for conv in convs:
|
||||||
|
conv['messages'] = conv.pop("message")
|
||||||
|
conv["assistant_id"] = conv.pop("dialog_id")
|
||||||
|
if conv["reference"]:
|
||||||
|
messages = conv["messages"]
|
||||||
|
message_num = 0
|
||||||
|
chunk_num = 0
|
||||||
|
while message_num < len(messages):
|
||||||
|
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||||
|
chunk_list = []
|
||||||
|
if "chunks" in conv["reference"][chunk_num]:
|
||||||
|
chunks = conv["reference"][chunk_num]["chunks"]
|
||||||
|
for chunk in chunks:
|
||||||
|
new_chunk = {
|
||||||
|
"id": chunk["chunk_id"],
|
||||||
|
"content": chunk["content_with_weight"],
|
||||||
|
"document_id": chunk["doc_id"],
|
||||||
|
"document_name": chunk["docnm_kwd"],
|
||||||
|
"knowledgebase_id": chunk["kb_id"],
|
||||||
|
"image_id": chunk["img_id"],
|
||||||
|
"similarity": chunk["similarity"],
|
||||||
|
"vector_similarity": chunk["vector_similarity"],
|
||||||
|
"term_similarity": chunk["term_similarity"],
|
||||||
|
"positions": chunk["positions"],
|
||||||
|
}
|
||||||
|
chunk_list.append(new_chunk)
|
||||||
|
chunk_num += 1
|
||||||
|
messages[message_num]["reference"] = chunk_list
|
||||||
|
message_num += 1
|
||||||
|
del conv["reference"]
|
||||||
|
return get_json_result(data=convs)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/delete', methods=["DELETE"])
|
||||||
|
@token_required
|
||||||
|
def delete(tenant_id):
|
||||||
|
id = request.args.get("id")
|
||||||
|
if not id:
|
||||||
|
return get_data_error_result(retmsg="`id` is required in deleting operation")
|
||||||
|
conv = ConversationService.query(id=id)
|
||||||
|
if not conv:
|
||||||
|
return get_data_error_result(retmsg="Session doesn't exist")
|
||||||
|
conv = conv[0]
|
||||||
|
if not DialogService.query(id=conv.dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_data_error_result(retmsg="You don't own the session")
|
||||||
|
ConversationService.delete_by_id(id)
|
||||||
|
return get_json_result(data=True)
|
||||||
@ -13,6 +13,8 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License
|
# limitations under the License
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
|
|
||||||
from flask_login import login_required
|
from flask_login import login_required
|
||||||
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
@ -20,7 +22,7 @@ from api.utils.api_utils import get_json_result
|
|||||||
from api.versions import get_rag_version
|
from api.versions import get_rag_version
|
||||||
from rag.settings import SVR_QUEUE_NAME
|
from rag.settings import SVR_QUEUE_NAME
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
from rag.utils.minio_conn import MINIO
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
from timeit import default_timer as timer
|
from timeit import default_timer as timer
|
||||||
|
|
||||||
from rag.utils.redis_conn import REDIS_CONN
|
from rag.utils.redis_conn import REDIS_CONN
|
||||||
@ -45,7 +47,7 @@ def status():
|
|||||||
|
|
||||||
st = timer()
|
st = timer()
|
||||||
try:
|
try:
|
||||||
MINIO.health()
|
STORAGE_IMPL.health()
|
||||||
res["minio"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
res["minio"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
res["minio"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
res["minio"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||||
@ -59,10 +61,29 @@ def status():
|
|||||||
|
|
||||||
st = timer()
|
st = timer()
|
||||||
try:
|
try:
|
||||||
qinfo = REDIS_CONN.health(SVR_QUEUE_NAME)
|
if not REDIS_CONN.health():
|
||||||
res["redis"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.),
|
raise Exception("Lost connection!")
|
||||||
"pending": qinfo.get("pending", 0)}
|
res["redis"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
res["redis"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
res["redis"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||||
|
|
||||||
|
try:
|
||||||
|
v = REDIS_CONN.get("TASKEXE")
|
||||||
|
if not v:
|
||||||
|
raise Exception("No task executor running!")
|
||||||
|
obj = json.loads(v)
|
||||||
|
color = "green"
|
||||||
|
for id in obj.keys():
|
||||||
|
arr = obj[id]
|
||||||
|
if len(arr) == 1:
|
||||||
|
obj[id] = [0]
|
||||||
|
else:
|
||||||
|
obj[id] = [arr[i+1]-arr[i] for i in range(len(arr)-1)]
|
||||||
|
elapsed = max(obj[id])
|
||||||
|
if elapsed > 50: color = "yellow"
|
||||||
|
if elapsed > 120: color = "red"
|
||||||
|
res["task_executor"] = {"status": color, "elapsed": obj}
|
||||||
|
except Exception as e:
|
||||||
|
res["task_executor"] = {"status": "red", "error": str(e)}
|
||||||
|
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=res)
|
||||||
|
|||||||
85
api/apps/tenant_app.py
Normal file
85
api/apps/tenant_app.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from flask import request
|
||||||
|
from flask_login import current_user, login_required
|
||||||
|
|
||||||
|
from api.db import UserTenantRole, StatusEnum
|
||||||
|
from api.db.db_models import UserTenant
|
||||||
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
|
from api.settings import RetCode
|
||||||
|
|
||||||
|
from api.utils import get_uuid
|
||||||
|
from api.utils.api_utils import get_json_result, validate_request, server_error_response
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/list", methods=["GET"])
|
||||||
|
@login_required
|
||||||
|
def tenant_list():
|
||||||
|
try:
|
||||||
|
tenants = TenantService.get_by_user_id(current_user.id)
|
||||||
|
return get_json_result(data=tenants)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/<tenant_id>/user/list", methods=["GET"])
|
||||||
|
@login_required
|
||||||
|
def user_list(tenant_id):
|
||||||
|
try:
|
||||||
|
users = UserTenantService.get_by_tenant_id(tenant_id)
|
||||||
|
return get_json_result(data=users)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/<tenant_id>/user', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@validate_request("user_id")
|
||||||
|
def create(tenant_id):
|
||||||
|
user_id = request.json.get("user_id")
|
||||||
|
if not user_id:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, retmsg='Lack of "USER ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
try:
|
||||||
|
user_tenants = UserTenantService.query(user_id=user_id, tenant_id=tenant_id)
|
||||||
|
if user_tenants:
|
||||||
|
uuid = user_tenants[0].id
|
||||||
|
return get_json_result(data={"id": uuid})
|
||||||
|
|
||||||
|
uuid = get_uuid()
|
||||||
|
UserTenantService.save(
|
||||||
|
id = uuid,
|
||||||
|
user_id = user_id,
|
||||||
|
tenant_id = tenant_id,
|
||||||
|
role = UserTenantRole.NORMAL.value,
|
||||||
|
status = StatusEnum.VALID.value)
|
||||||
|
|
||||||
|
return get_json_result(data={"id": uuid})
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE'])
|
||||||
|
@login_required
|
||||||
|
def rm(tenant_id, user_id):
|
||||||
|
try:
|
||||||
|
UserTenantService.filter_delete([UserTenant.tenant_id == tenant_id, UserTenant.user_id == user_id])
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
@ -1,391 +1,422 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from flask import request, session, redirect
|
from flask import request, session, redirect
|
||||||
from werkzeug.security import generate_password_hash, check_password_hash
|
from werkzeug.security import generate_password_hash, check_password_hash
|
||||||
from flask_login import login_required, current_user, login_user, logout_user
|
from flask_login import login_required, current_user, login_user, logout_user
|
||||||
|
|
||||||
from api.db.db_models import TenantLLM
|
from api.db.db_models import TenantLLM
|
||||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||||
from api.utils.api_utils import server_error_response, validate_request
|
from api.utils.api_utils import server_error_response, validate_request
|
||||||
from api.utils import get_uuid, get_format_time, decrypt, download_img, current_timestamp, datetime_format
|
from api.utils import get_uuid, get_format_time, decrypt, download_img, current_timestamp, datetime_format
|
||||||
from api.db import UserTenantRole, LLMType, FileType
|
from api.db import UserTenantRole, LLMType, FileType
|
||||||
from api.settings import RetCode, GITHUB_OAUTH, FEISHU_OAUTH, CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, \
|
from api.settings import RetCode, GITHUB_OAUTH, FEISHU_OAUTH, CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, \
|
||||||
API_KEY, \
|
API_KEY, \
|
||||||
LLM_FACTORY, LLM_BASE_URL, RERANK_MDL
|
LLM_FACTORY, LLM_BASE_URL, RERANK_MDL
|
||||||
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.settings import stat_logger
|
from api.settings import stat_logger
|
||||||
from api.utils.api_utils import get_json_result, cors_reponse
|
from api.utils.api_utils import get_json_result, construct_response
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/login', methods=['POST', 'GET'])
|
@manager.route('/login', methods=['POST', 'GET'])
|
||||||
def login():
|
def login():
|
||||||
login_channel = "password"
|
if not request.json:
|
||||||
if not request.json:
|
return get_json_result(data=False,
|
||||||
return get_json_result(data=False, retcode=RetCode.AUTHENTICATION_ERROR,
|
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||||
retmsg='Unautherized!')
|
retmsg='Unauthorized!')
|
||||||
|
|
||||||
email = request.json.get('email', "")
|
email = request.json.get('email', "")
|
||||||
users = UserService.query(email=email)
|
users = UserService.query(email=email)
|
||||||
if not users:
|
if not users:
|
||||||
return get_json_result(
|
return get_json_result(data=False,
|
||||||
data=False, retcode=RetCode.AUTHENTICATION_ERROR, retmsg=f'This Email is not registered!')
|
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||||
|
retmsg=f'Email: {email} is not registered!')
|
||||||
password = request.json.get('password')
|
|
||||||
try:
|
password = request.json.get('password')
|
||||||
password = decrypt(password)
|
try:
|
||||||
except BaseException:
|
password = decrypt(password)
|
||||||
return get_json_result(
|
except BaseException:
|
||||||
data=False, retcode=RetCode.SERVER_ERROR, retmsg='Fail to crypt password')
|
return get_json_result(data=False,
|
||||||
|
retcode=RetCode.SERVER_ERROR,
|
||||||
user = UserService.query_user(email, password)
|
retmsg='Fail to crypt password')
|
||||||
if user:
|
|
||||||
response_data = user.to_json()
|
user = UserService.query_user(email, password)
|
||||||
user.access_token = get_uuid()
|
if user:
|
||||||
login_user(user)
|
response_data = user.to_json()
|
||||||
user.update_time = current_timestamp(),
|
user.access_token = get_uuid()
|
||||||
user.update_date = datetime_format(datetime.now()),
|
login_user(user)
|
||||||
user.save()
|
user.update_time = current_timestamp(),
|
||||||
msg = "Welcome back!"
|
user.update_date = datetime_format(datetime.now()),
|
||||||
return cors_reponse(data=response_data, auth=user.get_id(), retmsg=msg)
|
user.save()
|
||||||
else:
|
msg = "Welcome back!"
|
||||||
return get_json_result(data=False, retcode=RetCode.AUTHENTICATION_ERROR,
|
return construct_response(data=response_data, auth=user.get_id(), retmsg=msg)
|
||||||
retmsg='Email and Password do not match!')
|
else:
|
||||||
|
return get_json_result(data=False,
|
||||||
|
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||||
@manager.route('/github_callback', methods=['GET'])
|
retmsg='Email and password do not match!')
|
||||||
def github_callback():
|
|
||||||
import requests
|
|
||||||
res = requests.post(GITHUB_OAUTH.get("url"), data={
|
@manager.route('/github_callback', methods=['GET'])
|
||||||
"client_id": GITHUB_OAUTH.get("client_id"),
|
def github_callback():
|
||||||
"client_secret": GITHUB_OAUTH.get("secret_key"),
|
import requests
|
||||||
"code": request.args.get('code')
|
res = requests.post(GITHUB_OAUTH.get("url"),
|
||||||
}, headers={"Accept": "application/json"})
|
data={
|
||||||
res = res.json()
|
"client_id": GITHUB_OAUTH.get("client_id"),
|
||||||
if "error" in res:
|
"client_secret": GITHUB_OAUTH.get("secret_key"),
|
||||||
return redirect("/?error=%s" % res["error_description"])
|
"code": request.args.get('code')},
|
||||||
|
headers={"Accept": "application/json"})
|
||||||
if "user:email" not in res["scope"].split(","):
|
res = res.json()
|
||||||
return redirect("/?error=user:email not in scope")
|
if "error" in res:
|
||||||
|
return redirect("/?error=%s" % res["error_description"])
|
||||||
session["access_token"] = res["access_token"]
|
|
||||||
session["access_token_from"] = "github"
|
if "user:email" not in res["scope"].split(","):
|
||||||
userinfo = user_info_from_github(session["access_token"])
|
return redirect("/?error=user:email not in scope")
|
||||||
users = UserService.query(email=userinfo["email"])
|
|
||||||
user_id = get_uuid()
|
session["access_token"] = res["access_token"]
|
||||||
if not users:
|
session["access_token_from"] = "github"
|
||||||
try:
|
user_info = user_info_from_github(session["access_token"])
|
||||||
try:
|
email_address = user_info["email"]
|
||||||
avatar = download_img(userinfo["avatar_url"])
|
users = UserService.query(email=email_address)
|
||||||
except Exception as e:
|
user_id = get_uuid()
|
||||||
stat_logger.exception(e)
|
if not users:
|
||||||
avatar = ""
|
# User isn't try to register
|
||||||
users = user_register(user_id, {
|
try:
|
||||||
"access_token": session["access_token"],
|
try:
|
||||||
"email": userinfo["email"],
|
avatar = download_img(user_info["avatar_url"])
|
||||||
"avatar": avatar,
|
except Exception as e:
|
||||||
"nickname": userinfo["login"],
|
stat_logger.exception(e)
|
||||||
"login_channel": "github",
|
avatar = ""
|
||||||
"last_login_time": get_format_time(),
|
users = user_register(user_id, {
|
||||||
"is_superuser": False,
|
"access_token": session["access_token"],
|
||||||
})
|
"email": email_address,
|
||||||
if not users:
|
"avatar": avatar,
|
||||||
raise Exception('Register user failure.')
|
"nickname": user_info["login"],
|
||||||
if len(users) > 1:
|
"login_channel": "github",
|
||||||
raise Exception('Same E-mail exist!')
|
"last_login_time": get_format_time(),
|
||||||
user = users[0]
|
"is_superuser": False,
|
||||||
login_user(user)
|
})
|
||||||
return redirect("/?auth=%s" % user.get_id())
|
if not users:
|
||||||
except Exception as e:
|
raise Exception(f'Fail to register {email_address}.')
|
||||||
rollback_user_registration(user_id)
|
if len(users) > 1:
|
||||||
stat_logger.exception(e)
|
raise Exception(f'Same email: {email_address} exists!')
|
||||||
return redirect("/?error=%s" % str(e))
|
|
||||||
user = users[0]
|
# Try to log in
|
||||||
user.access_token = get_uuid()
|
user = users[0]
|
||||||
login_user(user)
|
login_user(user)
|
||||||
user.save()
|
return redirect("/?auth=%s" % user.get_id())
|
||||||
return redirect("/?auth=%s" % user.get_id())
|
except Exception as e:
|
||||||
|
rollback_user_registration(user_id)
|
||||||
|
stat_logger.exception(e)
|
||||||
@manager.route('/feishu_callback', methods=['GET'])
|
return redirect("/?error=%s" % str(e))
|
||||||
def feishu_callback():
|
|
||||||
import requests
|
# User has already registered, try to log in
|
||||||
app_access_token_res = requests.post(FEISHU_OAUTH.get("app_access_token_url"), data=json.dumps({
|
user = users[0]
|
||||||
"app_id": FEISHU_OAUTH.get("app_id"),
|
user.access_token = get_uuid()
|
||||||
"app_secret": FEISHU_OAUTH.get("app_secret")
|
login_user(user)
|
||||||
}), headers={"Content-Type": "application/json; charset=utf-8"})
|
user.save()
|
||||||
app_access_token_res = app_access_token_res.json()
|
return redirect("/?auth=%s" % user.get_id())
|
||||||
if app_access_token_res['code'] != 0:
|
|
||||||
return redirect("/?error=%s" % app_access_token_res)
|
|
||||||
|
@manager.route('/feishu_callback', methods=['GET'])
|
||||||
res = requests.post(FEISHU_OAUTH.get("user_access_token_url"), data=json.dumps({
|
def feishu_callback():
|
||||||
"grant_type": FEISHU_OAUTH.get("grant_type"),
|
import requests
|
||||||
"code": request.args.get('code')
|
app_access_token_res = requests.post(FEISHU_OAUTH.get("app_access_token_url"),
|
||||||
}), headers={"Content-Type": "application/json; charset=utf-8",
|
data=json.dumps({
|
||||||
'Authorization': f"Bearer {app_access_token_res['app_access_token']}"})
|
"app_id": FEISHU_OAUTH.get("app_id"),
|
||||||
res = res.json()
|
"app_secret": FEISHU_OAUTH.get("app_secret")
|
||||||
if res['code'] != 0:
|
}),
|
||||||
return redirect("/?error=%s" % res["message"])
|
headers={"Content-Type": "application/json; charset=utf-8"})
|
||||||
|
app_access_token_res = app_access_token_res.json()
|
||||||
if "contact:user.email:readonly" not in res["data"]["scope"].split(" "):
|
if app_access_token_res['code'] != 0:
|
||||||
return redirect("/?error=contact:user.email:readonly not in scope")
|
return redirect("/?error=%s" % app_access_token_res)
|
||||||
session["access_token"] = res["data"]["access_token"]
|
|
||||||
session["access_token_from"] = "feishu"
|
res = requests.post(FEISHU_OAUTH.get("user_access_token_url"),
|
||||||
userinfo = user_info_from_feishu(session["access_token"])
|
data=json.dumps({
|
||||||
users = UserService.query(email=userinfo["email"])
|
"grant_type": FEISHU_OAUTH.get("grant_type"),
|
||||||
user_id = get_uuid()
|
"code": request.args.get('code')
|
||||||
if not users:
|
}),
|
||||||
try:
|
headers={
|
||||||
try:
|
"Content-Type": "application/json; charset=utf-8",
|
||||||
avatar = download_img(userinfo["avatar_url"])
|
'Authorization': f"Bearer {app_access_token_res['app_access_token']}"
|
||||||
except Exception as e:
|
})
|
||||||
stat_logger.exception(e)
|
res = res.json()
|
||||||
avatar = ""
|
if res['code'] != 0:
|
||||||
users = user_register(user_id, {
|
return redirect("/?error=%s" % res["message"])
|
||||||
"access_token": session["access_token"],
|
|
||||||
"email": userinfo["email"],
|
if "contact:user.email:readonly" not in res["data"]["scope"].split(" "):
|
||||||
"avatar": avatar,
|
return redirect("/?error=contact:user.email:readonly not in scope")
|
||||||
"nickname": userinfo["en_name"],
|
session["access_token"] = res["data"]["access_token"]
|
||||||
"login_channel": "feishu",
|
session["access_token_from"] = "feishu"
|
||||||
"last_login_time": get_format_time(),
|
user_info = user_info_from_feishu(session["access_token"])
|
||||||
"is_superuser": False,
|
email_address = user_info["email"]
|
||||||
})
|
users = UserService.query(email=email_address)
|
||||||
if not users:
|
user_id = get_uuid()
|
||||||
raise Exception('Register user failure.')
|
if not users:
|
||||||
if len(users) > 1:
|
# User isn't try to register
|
||||||
raise Exception('Same E-mail exist!')
|
try:
|
||||||
user = users[0]
|
try:
|
||||||
login_user(user)
|
avatar = download_img(user_info["avatar_url"])
|
||||||
return redirect("/?auth=%s" % user.get_id())
|
except Exception as e:
|
||||||
except Exception as e:
|
stat_logger.exception(e)
|
||||||
rollback_user_registration(user_id)
|
avatar = ""
|
||||||
stat_logger.exception(e)
|
users = user_register(user_id, {
|
||||||
return redirect("/?error=%s" % str(e))
|
"access_token": session["access_token"],
|
||||||
user = users[0]
|
"email": email_address,
|
||||||
user.access_token = get_uuid()
|
"avatar": avatar,
|
||||||
login_user(user)
|
"nickname": user_info["en_name"],
|
||||||
user.save()
|
"login_channel": "feishu",
|
||||||
return redirect("/?auth=%s" % user.get_id())
|
"last_login_time": get_format_time(),
|
||||||
|
"is_superuser": False,
|
||||||
|
})
|
||||||
def user_info_from_feishu(access_token):
|
if not users:
|
||||||
import requests
|
raise Exception(f'Fail to register {email_address}.')
|
||||||
headers = {"Content-Type": "application/json; charset=utf-8",
|
if len(users) > 1:
|
||||||
'Authorization': f"Bearer {access_token}"}
|
raise Exception(f'Same email: {email_address} exists!')
|
||||||
res = requests.get(
|
|
||||||
f"https://open.feishu.cn/open-apis/authen/v1/user_info",
|
# Try to log in
|
||||||
headers=headers)
|
user = users[0]
|
||||||
user_info = res.json()["data"]
|
login_user(user)
|
||||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
return redirect("/?auth=%s" % user.get_id())
|
||||||
return user_info
|
except Exception as e:
|
||||||
|
rollback_user_registration(user_id)
|
||||||
|
stat_logger.exception(e)
|
||||||
def user_info_from_github(access_token):
|
return redirect("/?error=%s" % str(e))
|
||||||
import requests
|
|
||||||
headers = {"Accept": "application/json",
|
# User has already registered, try to log in
|
||||||
'Authorization': f"token {access_token}"}
|
user = users[0]
|
||||||
res = requests.get(
|
user.access_token = get_uuid()
|
||||||
f"https://api.github.com/user?access_token={access_token}",
|
login_user(user)
|
||||||
headers=headers)
|
user.save()
|
||||||
user_info = res.json()
|
return redirect("/?auth=%s" % user.get_id())
|
||||||
email_info = requests.get(
|
|
||||||
f"https://api.github.com/user/emails?access_token={access_token}",
|
|
||||||
headers=headers).json()
|
def user_info_from_feishu(access_token):
|
||||||
user_info["email"] = next(
|
import requests
|
||||||
(email for email in email_info if email['primary'] == True),
|
headers = {"Content-Type": "application/json; charset=utf-8",
|
||||||
None)["email"]
|
'Authorization': f"Bearer {access_token}"}
|
||||||
return user_info
|
res = requests.get(
|
||||||
|
f"https://open.feishu.cn/open-apis/authen/v1/user_info",
|
||||||
|
headers=headers)
|
||||||
@manager.route("/logout", methods=['GET'])
|
user_info = res.json()["data"]
|
||||||
@login_required
|
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||||
def log_out():
|
return user_info
|
||||||
current_user.access_token = ""
|
|
||||||
current_user.save()
|
|
||||||
logout_user()
|
def user_info_from_github(access_token):
|
||||||
return get_json_result(data=True)
|
import requests
|
||||||
|
headers = {"Accept": "application/json",
|
||||||
|
'Authorization': f"token {access_token}"}
|
||||||
@manager.route("/setting", methods=["POST"])
|
res = requests.get(
|
||||||
@login_required
|
f"https://api.github.com/user?access_token={access_token}",
|
||||||
def setting_user():
|
headers=headers)
|
||||||
update_dict = {}
|
user_info = res.json()
|
||||||
request_data = request.json
|
email_info = requests.get(
|
||||||
if request_data.get("password"):
|
f"https://api.github.com/user/emails?access_token={access_token}",
|
||||||
new_password = request_data.get("new_password")
|
headers=headers).json()
|
||||||
if not check_password_hash(
|
user_info["email"] = next(
|
||||||
current_user.password, decrypt(request_data["password"])):
|
(email for email in email_info if email['primary'] == True),
|
||||||
return get_json_result(
|
None)["email"]
|
||||||
data=False, retcode=RetCode.AUTHENTICATION_ERROR, retmsg='Password error!')
|
return user_info
|
||||||
|
|
||||||
if new_password:
|
|
||||||
update_dict["password"] = generate_password_hash(
|
@manager.route("/logout", methods=['GET'])
|
||||||
decrypt(new_password))
|
@login_required
|
||||||
|
def log_out():
|
||||||
for k in request_data.keys():
|
current_user.access_token = ""
|
||||||
if k in ["password", "new_password"]:
|
current_user.save()
|
||||||
continue
|
logout_user()
|
||||||
update_dict[k] = request_data[k]
|
return get_json_result(data=True)
|
||||||
|
|
||||||
try:
|
|
||||||
UserService.update_by_id(current_user.id, update_dict)
|
@manager.route("/setting", methods=["POST"])
|
||||||
return get_json_result(data=True)
|
@login_required
|
||||||
except Exception as e:
|
def setting_user():
|
||||||
stat_logger.exception(e)
|
update_dict = {}
|
||||||
return get_json_result(
|
request_data = request.json
|
||||||
data=False, retmsg='Update failure!', retcode=RetCode.EXCEPTION_ERROR)
|
if request_data.get("password"):
|
||||||
|
new_password = request_data.get("new_password")
|
||||||
|
if not check_password_hash(
|
||||||
@manager.route("/info", methods=["GET"])
|
current_user.password, decrypt(request_data["password"])):
|
||||||
@login_required
|
return get_json_result(data=False, retcode=RetCode.AUTHENTICATION_ERROR, retmsg='Password error!')
|
||||||
def user_info():
|
|
||||||
return get_json_result(data=current_user.to_dict())
|
if new_password:
|
||||||
|
update_dict["password"] = generate_password_hash(decrypt(new_password))
|
||||||
|
|
||||||
def rollback_user_registration(user_id):
|
for k in request_data.keys():
|
||||||
try:
|
if k in ["password", "new_password"]:
|
||||||
UserService.delete_by_id(user_id)
|
continue
|
||||||
except Exception as e:
|
update_dict[k] = request_data[k]
|
||||||
pass
|
|
||||||
try:
|
try:
|
||||||
TenantService.delete_by_id(user_id)
|
UserService.update_by_id(current_user.id, update_dict)
|
||||||
except Exception as e:
|
return get_json_result(data=True)
|
||||||
pass
|
except Exception as e:
|
||||||
try:
|
stat_logger.exception(e)
|
||||||
u = UserTenantService.query(tenant_id=user_id)
|
return get_json_result(data=False, retmsg='Update failure!', retcode=RetCode.EXCEPTION_ERROR)
|
||||||
if u:
|
|
||||||
UserTenantService.delete_by_id(u[0].id)
|
|
||||||
except Exception as e:
|
@manager.route("/info", methods=["GET"])
|
||||||
pass
|
@login_required
|
||||||
try:
|
def user_profile():
|
||||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
return get_json_result(data=current_user.to_dict())
|
||||||
except Exception as e:
|
|
||||||
pass
|
|
||||||
|
def rollback_user_registration(user_id):
|
||||||
|
try:
|
||||||
def user_register(user_id, user):
|
UserService.delete_by_id(user_id)
|
||||||
user["id"] = user_id
|
except Exception as e:
|
||||||
tenant = {
|
pass
|
||||||
"id": user_id,
|
try:
|
||||||
"name": user["nickname"] + "‘s Kingdom",
|
TenantService.delete_by_id(user_id)
|
||||||
"llm_id": CHAT_MDL,
|
except Exception as e:
|
||||||
"embd_id": EMBEDDING_MDL,
|
pass
|
||||||
"asr_id": ASR_MDL,
|
try:
|
||||||
"parser_ids": PARSERS,
|
u = UserTenantService.query(tenant_id=user_id)
|
||||||
"img2txt_id": IMAGE2TEXT_MDL,
|
if u:
|
||||||
"rerank_id": RERANK_MDL
|
UserTenantService.delete_by_id(u[0].id)
|
||||||
}
|
except Exception as e:
|
||||||
usr_tenant = {
|
pass
|
||||||
"tenant_id": user_id,
|
try:
|
||||||
"user_id": user_id,
|
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
||||||
"invited_by": user_id,
|
except Exception as e:
|
||||||
"role": UserTenantRole.OWNER
|
pass
|
||||||
}
|
|
||||||
file_id = get_uuid()
|
|
||||||
file = {
|
def user_register(user_id, user):
|
||||||
"id": file_id,
|
user["id"] = user_id
|
||||||
"parent_id": file_id,
|
tenant = {
|
||||||
"tenant_id": user_id,
|
"id": user_id,
|
||||||
"created_by": user_id,
|
"name": user["nickname"] + "‘s Kingdom",
|
||||||
"name": "/",
|
"llm_id": CHAT_MDL,
|
||||||
"type": FileType.FOLDER.value,
|
"embd_id": EMBEDDING_MDL,
|
||||||
"size": 0,
|
"asr_id": ASR_MDL,
|
||||||
"location": "",
|
"parser_ids": PARSERS,
|
||||||
}
|
"img2txt_id": IMAGE2TEXT_MDL,
|
||||||
tenant_llm = []
|
"rerank_id": RERANK_MDL
|
||||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
}
|
||||||
tenant_llm.append({"tenant_id": user_id,
|
usr_tenant = {
|
||||||
"llm_factory": LLM_FACTORY,
|
"tenant_id": user_id,
|
||||||
"llm_name": llm.llm_name,
|
"user_id": user_id,
|
||||||
"model_type": llm.model_type,
|
"invited_by": user_id,
|
||||||
"api_key": API_KEY,
|
"role": UserTenantRole.OWNER
|
||||||
"api_base": LLM_BASE_URL
|
}
|
||||||
})
|
file_id = get_uuid()
|
||||||
|
file = {
|
||||||
if not UserService.save(**user):
|
"id": file_id,
|
||||||
return
|
"parent_id": file_id,
|
||||||
TenantService.insert(**tenant)
|
"tenant_id": user_id,
|
||||||
UserTenantService.insert(**usr_tenant)
|
"created_by": user_id,
|
||||||
TenantLLMService.insert_many(tenant_llm)
|
"name": "/",
|
||||||
FileService.insert(file)
|
"type": FileType.FOLDER.value,
|
||||||
return UserService.query(email=user["email"])
|
"size": 0,
|
||||||
|
"location": "",
|
||||||
|
}
|
||||||
@manager.route("/register", methods=["POST"])
|
tenant_llm = []
|
||||||
@validate_request("nickname", "email", "password")
|
for llm in LLMService.query(fid=LLM_FACTORY):
|
||||||
def user_add():
|
tenant_llm.append({"tenant_id": user_id,
|
||||||
req = request.json
|
"llm_factory": LLM_FACTORY,
|
||||||
if UserService.query(email=req["email"]):
|
"llm_name": llm.llm_name,
|
||||||
return get_json_result(
|
"model_type": llm.model_type,
|
||||||
data=False, retmsg=f'Email: {req["email"]} has already registered!', retcode=RetCode.OPERATING_ERROR)
|
"api_key": API_KEY,
|
||||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,4}$", req["email"]):
|
"api_base": LLM_BASE_URL
|
||||||
return get_json_result(data=False, retmsg=f'Invaliad e-mail: {req["email"]}!',
|
})
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
|
||||||
|
if not UserService.save(**user):
|
||||||
user_dict = {
|
return
|
||||||
"access_token": get_uuid(),
|
TenantService.insert(**tenant)
|
||||||
"email": req["email"],
|
UserTenantService.insert(**usr_tenant)
|
||||||
"nickname": req["nickname"],
|
TenantLLMService.insert_many(tenant_llm)
|
||||||
"password": decrypt(req["password"]),
|
FileService.insert(file)
|
||||||
"login_channel": "password",
|
return UserService.query(email=user["email"])
|
||||||
"last_login_time": get_format_time(),
|
|
||||||
"is_superuser": False,
|
|
||||||
}
|
@manager.route("/register", methods=["POST"])
|
||||||
|
@validate_request("nickname", "email", "password")
|
||||||
user_id = get_uuid()
|
def user_add():
|
||||||
try:
|
req = request.json
|
||||||
users = user_register(user_id, user_dict)
|
email_address = req["email"]
|
||||||
if not users:
|
|
||||||
raise Exception('Register user failure.')
|
# Validate the email address
|
||||||
if len(users) > 1:
|
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,4}$", email_address):
|
||||||
raise Exception('Same E-mail exist!')
|
return get_json_result(data=False,
|
||||||
user = users[0]
|
retmsg=f'Invalid email address: {email_address}!',
|
||||||
login_user(user)
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
return cors_reponse(data=user.to_json(),
|
|
||||||
auth=user.get_id(), retmsg="Welcome aboard!")
|
# Check if the email address is already used
|
||||||
except Exception as e:
|
if UserService.query(email=email_address):
|
||||||
rollback_user_registration(user_id)
|
return get_json_result(
|
||||||
stat_logger.exception(e)
|
data=False,
|
||||||
return get_json_result(
|
retmsg=f'Email: {email_address} has already registered!',
|
||||||
data=False, retmsg='User registration failure!', retcode=RetCode.EXCEPTION_ERROR)
|
retcode=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
# Construct user info data
|
||||||
@manager.route("/tenant_info", methods=["GET"])
|
nickname = req["nickname"]
|
||||||
@login_required
|
user_dict = {
|
||||||
def tenant_info():
|
"access_token": get_uuid(),
|
||||||
try:
|
"email": email_address,
|
||||||
tenants = TenantService.get_by_user_id(current_user.id)[0]
|
"nickname": nickname,
|
||||||
return get_json_result(data=tenants)
|
"password": decrypt(req["password"]),
|
||||||
except Exception as e:
|
"login_channel": "password",
|
||||||
return server_error_response(e)
|
"last_login_time": get_format_time(),
|
||||||
|
"is_superuser": False,
|
||||||
|
}
|
||||||
@manager.route("/set_tenant_info", methods=["POST"])
|
|
||||||
@login_required
|
user_id = get_uuid()
|
||||||
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
try:
|
||||||
def set_tenant_info():
|
users = user_register(user_id, user_dict)
|
||||||
req = request.json
|
if not users:
|
||||||
try:
|
raise Exception(f'Fail to register {email_address}.')
|
||||||
tid = req["tenant_id"]
|
if len(users) > 1:
|
||||||
del req["tenant_id"]
|
raise Exception(f'Same email: {email_address} exists!')
|
||||||
TenantService.update_by_id(tid, req)
|
user = users[0]
|
||||||
return get_json_result(data=True)
|
login_user(user)
|
||||||
except Exception as e:
|
return construct_response(data=user.to_json(),
|
||||||
return server_error_response(e)
|
auth=user.get_id(),
|
||||||
|
retmsg=f"{nickname}, welcome aboard!")
|
||||||
|
except Exception as e:
|
||||||
|
rollback_user_registration(user_id)
|
||||||
|
stat_logger.exception(e)
|
||||||
|
return get_json_result(data=False,
|
||||||
|
retmsg=f'User registration failure, error: {str(e)}',
|
||||||
|
retcode=RetCode.EXCEPTION_ERROR)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/tenant_info", methods=["GET"])
|
||||||
|
@login_required
|
||||||
|
def tenant_info():
|
||||||
|
try:
|
||||||
|
tenants = TenantService.get_by_user_id(current_user.id)[0]
|
||||||
|
return get_json_result(data=tenants)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/set_tenant_info", methods=["POST"])
|
||||||
|
@login_required
|
||||||
|
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
||||||
|
def set_tenant_info():
|
||||||
|
req = request.json
|
||||||
|
try:
|
||||||
|
tid = req["tenant_id"]
|
||||||
|
del req["tenant_id"]
|
||||||
|
TenantService.update_by_id(tid, req)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|||||||
@ -1,99 +1,103 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
from strenum import StrEnum
|
from strenum import StrEnum
|
||||||
|
|
||||||
|
|
||||||
class StatusEnum(Enum):
|
class StatusEnum(Enum):
|
||||||
VALID = "1"
|
VALID = "1"
|
||||||
INVALID = "0"
|
INVALID = "0"
|
||||||
|
|
||||||
|
|
||||||
class UserTenantRole(StrEnum):
|
class UserTenantRole(StrEnum):
|
||||||
OWNER = 'owner'
|
OWNER = 'owner'
|
||||||
ADMIN = 'admin'
|
ADMIN = 'admin'
|
||||||
NORMAL = 'normal'
|
NORMAL = 'normal'
|
||||||
|
|
||||||
|
|
||||||
class TenantPermission(StrEnum):
|
class TenantPermission(StrEnum):
|
||||||
ME = 'me'
|
ME = 'me'
|
||||||
TEAM = 'team'
|
TEAM = 'team'
|
||||||
|
|
||||||
|
|
||||||
class SerializedType(IntEnum):
|
class SerializedType(IntEnum):
|
||||||
PICKLE = 1
|
PICKLE = 1
|
||||||
JSON = 2
|
JSON = 2
|
||||||
|
|
||||||
|
|
||||||
class FileType(StrEnum):
|
class FileType(StrEnum):
|
||||||
PDF = 'pdf'
|
PDF = 'pdf'
|
||||||
DOC = 'doc'
|
DOC = 'doc'
|
||||||
VISUAL = 'visual'
|
VISUAL = 'visual'
|
||||||
AURAL = 'aural'
|
AURAL = 'aural'
|
||||||
VIRTUAL = 'virtual'
|
VIRTUAL = 'virtual'
|
||||||
FOLDER = 'folder'
|
FOLDER = 'folder'
|
||||||
OTHER = "other"
|
OTHER = "other"
|
||||||
|
|
||||||
|
|
||||||
class LLMType(StrEnum):
|
class LLMType(StrEnum):
|
||||||
CHAT = 'chat'
|
CHAT = 'chat'
|
||||||
EMBEDDING = 'embedding'
|
EMBEDDING = 'embedding'
|
||||||
SPEECH2TEXT = 'speech2text'
|
SPEECH2TEXT = 'speech2text'
|
||||||
IMAGE2TEXT = 'image2text'
|
IMAGE2TEXT = 'image2text'
|
||||||
RERANK = 'rerank'
|
RERANK = 'rerank'
|
||||||
|
TTS = 'tts'
|
||||||
|
|
||||||
class ChatStyle(StrEnum):
|
|
||||||
CREATIVE = 'Creative'
|
class ChatStyle(StrEnum):
|
||||||
PRECISE = 'Precise'
|
CREATIVE = 'Creative'
|
||||||
EVENLY = 'Evenly'
|
PRECISE = 'Precise'
|
||||||
CUSTOM = 'Custom'
|
EVENLY = 'Evenly'
|
||||||
|
CUSTOM = 'Custom'
|
||||||
|
|
||||||
class TaskStatus(StrEnum):
|
|
||||||
UNSTART = "0"
|
class TaskStatus(StrEnum):
|
||||||
RUNNING = "1"
|
UNSTART = "0"
|
||||||
CANCEL = "2"
|
RUNNING = "1"
|
||||||
DONE = "3"
|
CANCEL = "2"
|
||||||
FAIL = "4"
|
DONE = "3"
|
||||||
|
FAIL = "4"
|
||||||
|
|
||||||
class ParserType(StrEnum):
|
|
||||||
PRESENTATION = "presentation"
|
class ParserType(StrEnum):
|
||||||
LAWS = "laws"
|
PRESENTATION = "presentation"
|
||||||
MANUAL = "manual"
|
LAWS = "laws"
|
||||||
PAPER = "paper"
|
MANUAL = "manual"
|
||||||
RESUME = "resume"
|
PAPER = "paper"
|
||||||
BOOK = "book"
|
RESUME = "resume"
|
||||||
QA = "qa"
|
BOOK = "book"
|
||||||
TABLE = "table"
|
QA = "qa"
|
||||||
NAIVE = "naive"
|
TABLE = "table"
|
||||||
PICTURE = "picture"
|
NAIVE = "naive"
|
||||||
ONE = "one"
|
PICTURE = "picture"
|
||||||
|
ONE = "one"
|
||||||
|
AUDIO = "audio"
|
||||||
class FileSource(StrEnum):
|
EMAIL = "email"
|
||||||
LOCAL = ""
|
KG = "knowledge_graph"
|
||||||
KNOWLEDGEBASE = "knowledgebase"
|
|
||||||
S3 = "s3"
|
|
||||||
|
class FileSource(StrEnum):
|
||||||
|
LOCAL = ""
|
||||||
class CanvasType(StrEnum):
|
KNOWLEDGEBASE = "knowledgebase"
|
||||||
ChatBot = "chatbot"
|
S3 = "s3"
|
||||||
DocBot = "docbot"
|
|
||||||
|
|
||||||
KNOWLEDGEBASE_FOLDER_NAME=".knowledgebase"
|
class CanvasType(StrEnum):
|
||||||
|
ChatBot = "chatbot"
|
||||||
|
DocBot = "docbot"
|
||||||
|
|
||||||
|
KNOWLEDGEBASE_FOLDER_NAME=".knowledgebase"
|
||||||
|
|||||||
1942
api/db/db_models.py
1942
api/db/db_models.py
File diff suppressed because it is too large
Load Diff
@ -1,130 +1,135 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import operator
|
import operator
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from typing import Dict, Type, Union
|
from typing import Dict, Type, Union
|
||||||
|
|
||||||
from api.utils import current_timestamp, timestamp_to_date
|
from playhouse.pool import PooledMySQLDatabase
|
||||||
|
|
||||||
from api.db.db_models import DB, DataBaseModel
|
from api.utils import current_timestamp, timestamp_to_date
|
||||||
from api.db.runtime_config import RuntimeConfig
|
|
||||||
from api.utils.log_utils import getLogger
|
from api.db.db_models import DB, DataBaseModel
|
||||||
from enum import Enum
|
from api.db.runtime_config import RuntimeConfig
|
||||||
|
from api.utils.log_utils import getLogger
|
||||||
|
from enum import Enum
|
||||||
LOGGER = getLogger()
|
|
||||||
|
|
||||||
|
LOGGER = getLogger()
|
||||||
@DB.connection_context()
|
|
||||||
def bulk_insert_into_db(model, data_source, replace_on_conflict=False):
|
|
||||||
DB.create_tables([model])
|
@DB.connection_context()
|
||||||
|
def bulk_insert_into_db(model, data_source, replace_on_conflict=False):
|
||||||
for i, data in enumerate(data_source):
|
DB.create_tables([model])
|
||||||
current_time = current_timestamp() + i
|
|
||||||
current_date = timestamp_to_date(current_time)
|
for i, data in enumerate(data_source):
|
||||||
if 'create_time' not in data:
|
current_time = current_timestamp() + i
|
||||||
data['create_time'] = current_time
|
current_date = timestamp_to_date(current_time)
|
||||||
data['create_date'] = timestamp_to_date(data['create_time'])
|
if 'create_time' not in data:
|
||||||
data['update_time'] = current_time
|
data['create_time'] = current_time
|
||||||
data['update_date'] = current_date
|
data['create_date'] = timestamp_to_date(data['create_time'])
|
||||||
|
data['update_time'] = current_time
|
||||||
preserve = tuple(data_source[0].keys() - {'create_time', 'create_date'})
|
data['update_date'] = current_date
|
||||||
|
|
||||||
batch_size = 1000
|
preserve = tuple(data_source[0].keys() - {'create_time', 'create_date'})
|
||||||
|
|
||||||
for i in range(0, len(data_source), batch_size):
|
batch_size = 1000
|
||||||
with DB.atomic():
|
|
||||||
query = model.insert_many(data_source[i:i + batch_size])
|
for i in range(0, len(data_source), batch_size):
|
||||||
if replace_on_conflict:
|
with DB.atomic():
|
||||||
query = query.on_conflict(preserve=preserve)
|
query = model.insert_many(data_source[i:i + batch_size])
|
||||||
query.execute()
|
if replace_on_conflict:
|
||||||
|
if isinstance(DB, PooledMySQLDatabase):
|
||||||
|
query = query.on_conflict(preserve=preserve)
|
||||||
def get_dynamic_db_model(base, job_id):
|
else:
|
||||||
return type(base.model(
|
query = query.on_conflict(conflict_target="id", preserve=preserve)
|
||||||
table_index=get_dynamic_tracking_table_index(job_id=job_id)))
|
query.execute()
|
||||||
|
|
||||||
|
|
||||||
def get_dynamic_tracking_table_index(job_id):
|
def get_dynamic_db_model(base, job_id):
|
||||||
return job_id[:8]
|
return type(base.model(
|
||||||
|
table_index=get_dynamic_tracking_table_index(job_id=job_id)))
|
||||||
|
|
||||||
def fill_db_model_object(model_object, human_model_dict):
|
|
||||||
for k, v in human_model_dict.items():
|
def get_dynamic_tracking_table_index(job_id):
|
||||||
attr_name = 'f_%s' % k
|
return job_id[:8]
|
||||||
if hasattr(model_object.__class__, attr_name):
|
|
||||||
setattr(model_object, attr_name, v)
|
|
||||||
return model_object
|
def fill_db_model_object(model_object, human_model_dict):
|
||||||
|
for k, v in human_model_dict.items():
|
||||||
|
attr_name = 'f_%s' % k
|
||||||
# https://docs.peewee-orm.com/en/latest/peewee/query_operators.html
|
if hasattr(model_object.__class__, attr_name):
|
||||||
supported_operators = {
|
setattr(model_object, attr_name, v)
|
||||||
'==': operator.eq,
|
return model_object
|
||||||
'<': operator.lt,
|
|
||||||
'<=': operator.le,
|
|
||||||
'>': operator.gt,
|
# https://docs.peewee-orm.com/en/latest/peewee/query_operators.html
|
||||||
'>=': operator.ge,
|
supported_operators = {
|
||||||
'!=': operator.ne,
|
'==': operator.eq,
|
||||||
'<<': operator.lshift,
|
'<': operator.lt,
|
||||||
'>>': operator.rshift,
|
'<=': operator.le,
|
||||||
'%': operator.mod,
|
'>': operator.gt,
|
||||||
'**': operator.pow,
|
'>=': operator.ge,
|
||||||
'^': operator.xor,
|
'!=': operator.ne,
|
||||||
'~': operator.inv,
|
'<<': operator.lshift,
|
||||||
}
|
'>>': operator.rshift,
|
||||||
|
'%': operator.mod,
|
||||||
|
'**': operator.pow,
|
||||||
def query_dict2expression(
|
'^': operator.xor,
|
||||||
model: Type[DataBaseModel], query: Dict[str, Union[bool, int, str, list, tuple]]):
|
'~': operator.inv,
|
||||||
expression = []
|
}
|
||||||
|
|
||||||
for field, value in query.items():
|
|
||||||
if not isinstance(value, (list, tuple)):
|
def query_dict2expression(
|
||||||
value = ('==', value)
|
model: Type[DataBaseModel], query: Dict[str, Union[bool, int, str, list, tuple]]):
|
||||||
op, *val = value
|
expression = []
|
||||||
|
|
||||||
field = getattr(model, f'f_{field}')
|
for field, value in query.items():
|
||||||
value = supported_operators[op](
|
if not isinstance(value, (list, tuple)):
|
||||||
field, val[0]) if op in supported_operators else getattr(
|
value = ('==', value)
|
||||||
field, op)(
|
op, *val = value
|
||||||
*val)
|
|
||||||
expression.append(value)
|
field = getattr(model, f'f_{field}')
|
||||||
|
value = supported_operators[op](
|
||||||
return reduce(operator.iand, expression)
|
field, val[0]) if op in supported_operators else getattr(
|
||||||
|
field, op)(
|
||||||
|
*val)
|
||||||
def query_db(model: Type[DataBaseModel], limit: int = 0, offset: int = 0,
|
expression.append(value)
|
||||||
query: dict = None, order_by: Union[str, list, tuple] = None):
|
|
||||||
data = model.select()
|
return reduce(operator.iand, expression)
|
||||||
if query:
|
|
||||||
data = data.where(query_dict2expression(model, query))
|
|
||||||
count = data.count()
|
def query_db(model: Type[DataBaseModel], limit: int = 0, offset: int = 0,
|
||||||
|
query: dict = None, order_by: Union[str, list, tuple] = None):
|
||||||
if not order_by:
|
data = model.select()
|
||||||
order_by = 'create_time'
|
if query:
|
||||||
if not isinstance(order_by, (list, tuple)):
|
data = data.where(query_dict2expression(model, query))
|
||||||
order_by = (order_by, 'asc')
|
count = data.count()
|
||||||
order_by, order = order_by
|
|
||||||
order_by = getattr(model, f'f_{order_by}')
|
if not order_by:
|
||||||
order_by = getattr(order_by, order)()
|
order_by = 'create_time'
|
||||||
data = data.order_by(order_by)
|
if not isinstance(order_by, (list, tuple)):
|
||||||
|
order_by = (order_by, 'asc')
|
||||||
if limit > 0:
|
order_by, order = order_by
|
||||||
data = data.limit(limit)
|
order_by = getattr(model, f'f_{order_by}')
|
||||||
if offset > 0:
|
order_by = getattr(order_by, order)()
|
||||||
data = data.offset(offset)
|
data = data.order_by(order_by)
|
||||||
|
|
||||||
return list(data), count
|
if limit > 0:
|
||||||
|
data = data.limit(limit)
|
||||||
|
if offset > 0:
|
||||||
|
data = data.offset(offset)
|
||||||
|
|
||||||
|
return list(data), count
|
||||||
|
|||||||
1161
api/db/init_data.py
1161
api/db/init_data.py
File diff suppressed because it is too large
Load Diff
@ -1,21 +1,21 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
import operator
|
import operator
|
||||||
import time
|
import time
|
||||||
import typing
|
import typing
|
||||||
from api.utils.log_utils import sql_logger
|
from api.utils.log_utils import sql_logger
|
||||||
import peewee
|
import peewee
|
||||||
|
|||||||
@ -1,28 +1,28 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
class ReloadConfigBase:
|
class ReloadConfigBase:
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_all(cls):
|
def get_all(cls):
|
||||||
configs = {}
|
configs = {}
|
||||||
for k, v in cls.__dict__.items():
|
for k, v in cls.__dict__.items():
|
||||||
if not callable(getattr(cls, k)) and not k.startswith(
|
if not callable(getattr(cls, k)) and not k.startswith(
|
||||||
"__") and not k.startswith("_"):
|
"__") and not k.startswith("_"):
|
||||||
configs[k] = v
|
configs[k] = v
|
||||||
return configs
|
return configs
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get(cls, config_name):
|
def get(cls, config_name):
|
||||||
return getattr(cls, config_name) if hasattr(cls, config_name) else None
|
return getattr(cls, config_name) if hasattr(cls, config_name) else None
|
||||||
|
|||||||
@ -1,54 +1,54 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from api.versions import get_versions
|
from api.versions import get_versions
|
||||||
from .reload_config_base import ReloadConfigBase
|
from .reload_config_base import ReloadConfigBase
|
||||||
|
|
||||||
|
|
||||||
class RuntimeConfig(ReloadConfigBase):
|
class RuntimeConfig(ReloadConfigBase):
|
||||||
DEBUG = None
|
DEBUG = None
|
||||||
WORK_MODE = None
|
WORK_MODE = None
|
||||||
HTTP_PORT = None
|
HTTP_PORT = None
|
||||||
JOB_SERVER_HOST = None
|
JOB_SERVER_HOST = None
|
||||||
JOB_SERVER_VIP = None
|
JOB_SERVER_VIP = None
|
||||||
ENV = dict()
|
ENV = dict()
|
||||||
SERVICE_DB = None
|
SERVICE_DB = None
|
||||||
LOAD_CONFIG_MANAGER = False
|
LOAD_CONFIG_MANAGER = False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def init_config(cls, **kwargs):
|
def init_config(cls, **kwargs):
|
||||||
for k, v in kwargs.items():
|
for k, v in kwargs.items():
|
||||||
if hasattr(cls, k):
|
if hasattr(cls, k):
|
||||||
setattr(cls, k, v)
|
setattr(cls, k, v)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def init_env(cls):
|
def init_env(cls):
|
||||||
cls.ENV.update(get_versions())
|
cls.ENV.update(get_versions())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_config_manager(cls):
|
def load_config_manager(cls):
|
||||||
cls.LOAD_CONFIG_MANAGER = True
|
cls.LOAD_CONFIG_MANAGER = True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_env(cls, key):
|
def get_env(cls, key):
|
||||||
return cls.ENV.get(key, None)
|
return cls.ENV.get(key, None)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_all_env(cls):
|
def get_all_env(cls):
|
||||||
return cls.ENV
|
return cls.ENV
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_service_db(cls, service_db):
|
def set_service_db(cls, service_db):
|
||||||
cls.SERVICE_DB = service_db
|
cls.SERVICE_DB = service_db
|
||||||
|
|||||||
@ -1,38 +1,38 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import pathlib
|
import pathlib
|
||||||
import re
|
import re
|
||||||
from .user_service import UserService
|
from .user_service import UserService
|
||||||
|
|
||||||
|
|
||||||
def duplicate_name(query_func, **kwargs):
|
def duplicate_name(query_func, **kwargs):
|
||||||
fnm = kwargs["name"]
|
fnm = kwargs["name"]
|
||||||
objs = query_func(**kwargs)
|
objs = query_func(**kwargs)
|
||||||
if not objs: return fnm
|
if not objs: return fnm
|
||||||
ext = pathlib.Path(fnm).suffix #.jpg
|
ext = pathlib.Path(fnm).suffix #.jpg
|
||||||
nm = re.sub(r"%s$"%ext, "", fnm)
|
nm = re.sub(r"%s$"%ext, "", fnm)
|
||||||
r = re.search(r"\(([0-9]+)\)$", nm)
|
r = re.search(r"\(([0-9]+)\)$", nm)
|
||||||
c = 0
|
c = 0
|
||||||
if r:
|
if r:
|
||||||
c = int(r.group(1))
|
c = int(r.group(1))
|
||||||
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
||||||
c += 1
|
c += 1
|
||||||
nm = f"{nm}({c})"
|
nm = f"{nm}({c})"
|
||||||
if ext: nm += f"{ext}"
|
if ext: nm += f"{ext}"
|
||||||
|
|
||||||
kwargs["name"] = nm
|
kwargs["name"] = nm
|
||||||
return duplicate_name(query_func, **kwargs)
|
return duplicate_name(query_func, **kwargs)
|
||||||
|
|
||||||
|
|||||||
@ -1,66 +1,70 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import peewee
|
|
||||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog
|
import peewee
|
||||||
from api.db.services.common_service import CommonService
|
|
||||||
from api.utils import current_timestamp, datetime_format
|
from api.db.db_models import DB, API4Conversation, APIToken, Dialog
|
||||||
|
from api.db.services.common_service import CommonService
|
||||||
|
from api.utils import current_timestamp, datetime_format
|
||||||
class APITokenService(CommonService):
|
|
||||||
model = APIToken
|
|
||||||
|
class APITokenService(CommonService):
|
||||||
@classmethod
|
model = APIToken
|
||||||
@DB.connection_context()
|
|
||||||
def used(cls, token):
|
@classmethod
|
||||||
return cls.model.update({
|
@DB.connection_context()
|
||||||
"update_time": current_timestamp(),
|
def used(cls, token):
|
||||||
"update_date": datetime_format(datetime.now()),
|
return cls.model.update({
|
||||||
}).where(
|
"update_time": current_timestamp(),
|
||||||
cls.model.token == token
|
"update_date": datetime_format(datetime.now()),
|
||||||
)
|
}).where(
|
||||||
|
cls.model.token == token
|
||||||
|
)
|
||||||
class API4ConversationService(CommonService):
|
|
||||||
model = API4Conversation
|
|
||||||
|
class API4ConversationService(CommonService):
|
||||||
@classmethod
|
model = API4Conversation
|
||||||
@DB.connection_context()
|
|
||||||
def append_message(cls, id, conversation):
|
@classmethod
|
||||||
cls.update_by_id(id, conversation)
|
@DB.connection_context()
|
||||||
return cls.model.update(round=cls.model.round + 1).where(cls.model.id==id).execute()
|
def append_message(cls, id, conversation):
|
||||||
|
cls.update_by_id(id, conversation)
|
||||||
@classmethod
|
return cls.model.update(round=cls.model.round + 1).where(cls.model.id == id).execute()
|
||||||
@DB.connection_context()
|
|
||||||
def stats(cls, tenant_id, from_date, to_date):
|
@classmethod
|
||||||
return cls.model.select(
|
@DB.connection_context()
|
||||||
cls.model.create_date.truncate("day").alias("dt"),
|
def stats(cls, tenant_id, from_date, to_date, source=None):
|
||||||
peewee.fn.COUNT(
|
if len(to_date) == 10: to_date += " 23:59:59"
|
||||||
cls.model.id).alias("pv"),
|
return cls.model.select(
|
||||||
peewee.fn.COUNT(
|
cls.model.create_date.truncate("day").alias("dt"),
|
||||||
cls.model.user_id.distinct()).alias("uv"),
|
peewee.fn.COUNT(
|
||||||
peewee.fn.SUM(
|
cls.model.id).alias("pv"),
|
||||||
cls.model.tokens).alias("tokens"),
|
peewee.fn.COUNT(
|
||||||
peewee.fn.SUM(
|
cls.model.user_id.distinct()).alias("uv"),
|
||||||
cls.model.duration).alias("duration"),
|
peewee.fn.SUM(
|
||||||
peewee.fn.AVG(
|
cls.model.tokens).alias("tokens"),
|
||||||
cls.model.round).alias("round"),
|
peewee.fn.SUM(
|
||||||
peewee.fn.SUM(
|
cls.model.duration).alias("duration"),
|
||||||
cls.model.thumb_up).alias("thumb_up")
|
peewee.fn.AVG(
|
||||||
).join(Dialog, on=(cls.model.dialog_id == Dialog.id & Dialog.tenant_id == tenant_id)).where(
|
cls.model.round).alias("round"),
|
||||||
cls.model.create_date >= from_date,
|
peewee.fn.SUM(
|
||||||
cls.model.create_date <= to_date
|
cls.model.thumb_up).alias("thumb_up")
|
||||||
).group_by(cls.model.create_date.truncate("day")).dicts()
|
).join(Dialog, on=((cls.model.dialog_id == Dialog.id) & (Dialog.tenant_id == tenant_id))).where(
|
||||||
|
cls.model.create_date >= from_date,
|
||||||
|
cls.model.create_date <= to_date,
|
||||||
|
cls.model.source == source
|
||||||
|
).group_by(cls.model.create_date.truncate("day")).dicts()
|
||||||
|
|||||||
@ -1,183 +1,183 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import peewee
|
import peewee
|
||||||
|
|
||||||
from api.db.db_models import DB
|
from api.db.db_models import DB
|
||||||
from api.utils import datetime_format, current_timestamp, get_uuid
|
from api.utils import datetime_format, current_timestamp, get_uuid
|
||||||
|
|
||||||
|
|
||||||
class CommonService:
|
class CommonService:
|
||||||
model = None
|
model = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||||
return cls.model.query(cols=cols, reverse=reverse,
|
return cls.model.query(cols=cols, reverse=reverse,
|
||||||
order_by=order_by, **kwargs)
|
order_by=order_by, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_all(cls, cols=None, reverse=None, order_by=None):
|
def get_all(cls, cols=None, reverse=None, order_by=None):
|
||||||
if cols:
|
if cols:
|
||||||
query_records = cls.model.select(*cols)
|
query_records = cls.model.select(*cols)
|
||||||
else:
|
else:
|
||||||
query_records = cls.model.select()
|
query_records = cls.model.select()
|
||||||
if reverse is not None:
|
if reverse is not None:
|
||||||
if not order_by or not hasattr(cls, order_by):
|
if not order_by or not hasattr(cls, order_by):
|
||||||
order_by = "create_time"
|
order_by = "create_time"
|
||||||
if reverse is True:
|
if reverse is True:
|
||||||
query_records = query_records.order_by(
|
query_records = query_records.order_by(
|
||||||
cls.model.getter_by(order_by).desc())
|
cls.model.getter_by(order_by).desc())
|
||||||
elif reverse is False:
|
elif reverse is False:
|
||||||
query_records = query_records.order_by(
|
query_records = query_records.order_by(
|
||||||
cls.model.getter_by(order_by).asc())
|
cls.model.getter_by(order_by).asc())
|
||||||
return query_records
|
return query_records
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get(cls, **kwargs):
|
def get(cls, **kwargs):
|
||||||
return cls.model.get(**kwargs)
|
return cls.model.get(**kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_or_none(cls, **kwargs):
|
def get_or_none(cls, **kwargs):
|
||||||
try:
|
try:
|
||||||
return cls.model.get(**kwargs)
|
return cls.model.get(**kwargs)
|
||||||
except peewee.DoesNotExist:
|
except peewee.DoesNotExist:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def save(cls, **kwargs):
|
def save(cls, **kwargs):
|
||||||
# if "id" not in kwargs:
|
# if "id" not in kwargs:
|
||||||
# kwargs["id"] = get_uuid()
|
# kwargs["id"] = get_uuid()
|
||||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||||
return sample_obj
|
return sample_obj
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def insert(cls, **kwargs):
|
def insert(cls, **kwargs):
|
||||||
if "id" not in kwargs:
|
if "id" not in kwargs:
|
||||||
kwargs["id"] = get_uuid()
|
kwargs["id"] = get_uuid()
|
||||||
kwargs["create_time"] = current_timestamp()
|
kwargs["create_time"] = current_timestamp()
|
||||||
kwargs["create_date"] = datetime_format(datetime.now())
|
kwargs["create_date"] = datetime_format(datetime.now())
|
||||||
kwargs["update_time"] = current_timestamp()
|
kwargs["update_time"] = current_timestamp()
|
||||||
kwargs["update_date"] = datetime_format(datetime.now())
|
kwargs["update_date"] = datetime_format(datetime.now())
|
||||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||||
return sample_obj
|
return sample_obj
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def insert_many(cls, data_list, batch_size=100):
|
def insert_many(cls, data_list, batch_size=100):
|
||||||
with DB.atomic():
|
with DB.atomic():
|
||||||
for d in data_list:
|
for d in data_list:
|
||||||
d["create_time"] = current_timestamp()
|
d["create_time"] = current_timestamp()
|
||||||
d["create_date"] = datetime_format(datetime.now())
|
d["create_date"] = datetime_format(datetime.now())
|
||||||
for i in range(0, len(data_list), batch_size):
|
for i in range(0, len(data_list), batch_size):
|
||||||
cls.model.insert_many(data_list[i:i + batch_size]).execute()
|
cls.model.insert_many(data_list[i:i + batch_size]).execute()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_many_by_id(cls, data_list):
|
def update_many_by_id(cls, data_list):
|
||||||
with DB.atomic():
|
with DB.atomic():
|
||||||
for data in data_list:
|
for data in data_list:
|
||||||
data["update_time"] = current_timestamp()
|
data["update_time"] = current_timestamp()
|
||||||
data["update_date"] = datetime_format(datetime.now())
|
data["update_date"] = datetime_format(datetime.now())
|
||||||
cls.model.update(data).where(
|
cls.model.update(data).where(
|
||||||
cls.model.id == data["id"]).execute()
|
cls.model.id == data["id"]).execute()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_by_id(cls, pid, data):
|
def update_by_id(cls, pid, data):
|
||||||
data["update_time"] = current_timestamp()
|
data["update_time"] = current_timestamp()
|
||||||
data["update_date"] = datetime_format(datetime.now())
|
data["update_date"] = datetime_format(datetime.now())
|
||||||
num = cls.model.update(data).where(cls.model.id == pid).execute()
|
num = cls.model.update(data).where(cls.model.id == pid).execute()
|
||||||
return num
|
return num
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_id(cls, pid):
|
def get_by_id(cls, pid):
|
||||||
try:
|
try:
|
||||||
obj = cls.model.query(id=pid)[0]
|
obj = cls.model.query(id=pid)[0]
|
||||||
return True, obj
|
return True, obj
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_ids(cls, pids, cols=None):
|
def get_by_ids(cls, pids, cols=None):
|
||||||
if cols:
|
if cols:
|
||||||
objs = cls.model.select(*cols)
|
objs = cls.model.select(*cols)
|
||||||
else:
|
else:
|
||||||
objs = cls.model.select()
|
objs = cls.model.select()
|
||||||
return objs.where(cls.model.id.in_(pids))
|
return objs.where(cls.model.id.in_(pids))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def delete_by_id(cls, pid):
|
def delete_by_id(cls, pid):
|
||||||
return cls.model.delete().where(cls.model.id == pid).execute()
|
return cls.model.delete().where(cls.model.id == pid).execute()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def filter_delete(cls, filters):
|
def filter_delete(cls, filters):
|
||||||
with DB.atomic():
|
with DB.atomic():
|
||||||
num = cls.model.delete().where(*filters).execute()
|
num = cls.model.delete().where(*filters).execute()
|
||||||
return num
|
return num
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def filter_update(cls, filters, update_data):
|
def filter_update(cls, filters, update_data):
|
||||||
with DB.atomic():
|
with DB.atomic():
|
||||||
return cls.model.update(update_data).where(*filters).execute()
|
return cls.model.update(update_data).where(*filters).execute()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def cut_list(tar_list, n):
|
def cut_list(tar_list, n):
|
||||||
length = len(tar_list)
|
length = len(tar_list)
|
||||||
arr = range(length)
|
arr = range(length)
|
||||||
result = [tuple(tar_list[x:(x + n)]) for x in arr[::n]]
|
result = [tuple(tar_list[x:(x + n)]) for x in arr[::n]]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def filter_scope_list(cls, in_key, in_filters_list,
|
def filter_scope_list(cls, in_key, in_filters_list,
|
||||||
filters=None, cols=None):
|
filters=None, cols=None):
|
||||||
in_filters_tuple_list = cls.cut_list(in_filters_list, 20)
|
in_filters_tuple_list = cls.cut_list(in_filters_list, 20)
|
||||||
if not filters:
|
if not filters:
|
||||||
filters = []
|
filters = []
|
||||||
res_list = []
|
res_list = []
|
||||||
if cols:
|
if cols:
|
||||||
for i in in_filters_tuple_list:
|
for i in in_filters_tuple_list:
|
||||||
query_records = cls.model.select(
|
query_records = cls.model.select(
|
||||||
*
|
*
|
||||||
cols).where(
|
cols).where(
|
||||||
getattr(
|
getattr(
|
||||||
cls.model,
|
cls.model,
|
||||||
in_key).in_(i),
|
in_key).in_(i),
|
||||||
*
|
*
|
||||||
filters)
|
filters)
|
||||||
if query_records:
|
if query_records:
|
||||||
res_list.extend(
|
res_list.extend(
|
||||||
[query_record for query_record in query_records])
|
[query_record for query_record in query_records])
|
||||||
else:
|
else:
|
||||||
for i in in_filters_tuple_list:
|
for i in in_filters_tuple_list:
|
||||||
query_records = cls.model.select().where(
|
query_records = cls.model.select().where(
|
||||||
getattr(cls.model, in_key).in_(i), *filters)
|
getattr(cls.model, in_key).in_(i), *filters)
|
||||||
if query_records:
|
if query_records:
|
||||||
res_list.extend(
|
res_list.extend(
|
||||||
[query_record for query_record in query_records])
|
[query_record for query_record in query_records])
|
||||||
return res_list
|
return res_list
|
||||||
|
|||||||
@ -1,359 +1,482 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import re
|
import binascii
|
||||||
from copy import deepcopy
|
import os
|
||||||
|
import json
|
||||||
from api.db import LLMType
|
import re
|
||||||
from api.db.db_models import Dialog, Conversation
|
from copy import deepcopy
|
||||||
from api.db.services.common_service import CommonService
|
from timeit import default_timer as timer
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db import LLMType, ParserType
|
||||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
from api.db.db_models import Dialog, Conversation
|
||||||
from api.settings import chat_logger, retrievaler
|
from api.db.services.common_service import CommonService
|
||||||
from rag.app.resume import forbidden_select_fields4resume
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from rag.nlp import keyword_extraction
|
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||||
from rag.nlp.search import index_name
|
from api.settings import chat_logger, retrievaler, kg_retrievaler
|
||||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
from rag.app.resume import forbidden_select_fields4resume
|
||||||
|
from rag.nlp import keyword_extraction
|
||||||
|
from rag.nlp.search import index_name
|
||||||
class DialogService(CommonService):
|
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||||
model = Dialog
|
from api.utils.file_utils import get_project_base_directory
|
||||||
|
|
||||||
|
|
||||||
class ConversationService(CommonService):
|
class DialogService(CommonService):
|
||||||
model = Conversation
|
model = Dialog
|
||||||
|
|
||||||
|
|
||||||
def message_fit_in(msg, max_length=4000):
|
class ConversationService(CommonService):
|
||||||
def count():
|
model = Conversation
|
||||||
nonlocal msg
|
|
||||||
tks_cnts = []
|
|
||||||
for m in msg:
|
def message_fit_in(msg, max_length=4000):
|
||||||
tks_cnts.append(
|
def count():
|
||||||
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
nonlocal msg
|
||||||
total = 0
|
tks_cnts = []
|
||||||
for m in tks_cnts:
|
for m in msg:
|
||||||
total += m["count"]
|
tks_cnts.append(
|
||||||
return total
|
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
||||||
|
total = 0
|
||||||
c = count()
|
for m in tks_cnts:
|
||||||
if c < max_length:
|
total += m["count"]
|
||||||
return c, msg
|
return total
|
||||||
|
|
||||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
c = count()
|
||||||
msg_.append(msg[-1])
|
if c < max_length:
|
||||||
msg = msg_
|
return c, msg
|
||||||
c = count()
|
|
||||||
if c < max_length:
|
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||||
return c, msg
|
msg_.append(msg[-1])
|
||||||
|
msg = msg_
|
||||||
ll = num_tokens_from_string(msg_[0]["content"])
|
c = count()
|
||||||
l = num_tokens_from_string(msg_[-1]["content"])
|
if c < max_length:
|
||||||
if ll / (ll + l) > 0.8:
|
return c, msg
|
||||||
m = msg_[0]["content"]
|
|
||||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
ll = num_tokens_from_string(msg_[0]["content"])
|
||||||
msg[0]["content"] = m
|
l = num_tokens_from_string(msg_[-1]["content"])
|
||||||
return max_length, msg
|
if ll / (ll + l) > 0.8:
|
||||||
|
m = msg_[0]["content"]
|
||||||
m = msg_[1]["content"]
|
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
msg[0]["content"] = m
|
||||||
msg[1]["content"] = m
|
return max_length, msg
|
||||||
return max_length, msg
|
|
||||||
|
m = msg_[1]["content"]
|
||||||
|
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||||
def chat(dialog, messages, stream=True, **kwargs):
|
msg[1]["content"] = m
|
||||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
return max_length, msg
|
||||||
llm = LLMService.query(llm_name=dialog.llm_id)
|
|
||||||
if not llm:
|
|
||||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=dialog.llm_id)
|
def llm_id2llm_type(llm_id):
|
||||||
if not llm:
|
fnm = os.path.join(get_project_base_directory(), "conf")
|
||||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
||||||
max_tokens = 1024
|
for llm_factory in llm_factories["factory_llm_infos"]:
|
||||||
else:
|
for llm in llm_factory["llm"]:
|
||||||
max_tokens = llm[0].max_tokens
|
if llm_id == llm["llm_name"]:
|
||||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
return llm["model_type"].strip(",")[-1]
|
||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
|
||||||
if len(embd_nms) != 1:
|
|
||||||
yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
def chat(dialog, messages, stream=True, **kwargs):
|
||||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||||
|
st = timer()
|
||||||
questions = [m["content"] for m in messages if m["role"] == "user"]
|
llm = LLMService.query(llm_name=dialog.llm_id)
|
||||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
if not llm:
|
||||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=dialog.llm_id)
|
||||||
|
if not llm:
|
||||||
prompt_config = dialog.prompt_config
|
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
||||||
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
|
max_tokens = 8192
|
||||||
# try to use sql if field mapping is good to go
|
else:
|
||||||
if field_map:
|
max_tokens = llm[0].max_tokens
|
||||||
chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
|
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||||
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
|
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||||
if ans:
|
if len(embd_nms) != 1:
|
||||||
yield ans
|
yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||||
return
|
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||||
|
|
||||||
for p in prompt_config["parameters"]:
|
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||||
if p["key"] == "knowledge":
|
retr = retrievaler if not is_kg else kg_retrievaler
|
||||||
continue
|
|
||||||
if p["key"] not in kwargs and not p["optional"]:
|
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
||||||
raise KeyError("Miss parameter: " + p["key"])
|
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||||
if p["key"] not in kwargs:
|
if "doc_ids" in messages[-1]:
|
||||||
prompt_config["system"] = prompt_config["system"].replace(
|
attachments = messages[-1]["doc_ids"]
|
||||||
"{%s}" % p["key"], " ")
|
for m in messages[:-1]:
|
||||||
|
if "doc_ids" in m:
|
||||||
rerank_mdl = None
|
attachments.extend(m["doc_ids"])
|
||||||
if dialog.rerank_id:
|
|
||||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||||
|
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||||
for _ in range(len(questions) // 2):
|
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||||
questions.append(questions[-1])
|
else:
|
||||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
|
||||||
else:
|
prompt_config = dialog.prompt_config
|
||||||
if prompt_config.get("keyword", False):
|
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
|
||||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
tts_mdl = None
|
||||||
kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
if prompt_config.get("tts"):
|
||||||
dialog.similarity_threshold,
|
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||||
dialog.vector_similarity_weight,
|
# try to use sql if field mapping is good to go
|
||||||
doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
|
if field_map:
|
||||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
|
||||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
|
||||||
#self-rag
|
if ans:
|
||||||
if dialog.prompt_config.get("self_rag") and not relevant(dialog.tenant_id, dialog.llm_id, questions[-1], knowledges):
|
yield ans
|
||||||
questions[-1] = rewrite(dialog.tenant_id, dialog.llm_id, questions[-1])
|
return
|
||||||
kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
|
||||||
dialog.similarity_threshold,
|
for p in prompt_config["parameters"]:
|
||||||
dialog.vector_similarity_weight,
|
if p["key"] == "knowledge":
|
||||||
doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
|
continue
|
||||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
if p["key"] not in kwargs and not p["optional"]:
|
||||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
raise KeyError("Miss parameter: " + p["key"])
|
||||||
|
if p["key"] not in kwargs:
|
||||||
chat_logger.info(
|
prompt_config["system"] = prompt_config["system"].replace(
|
||||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
"{%s}" % p["key"], " ")
|
||||||
|
|
||||||
if not knowledges and prompt_config.get("empty_response"):
|
rerank_mdl = None
|
||||||
yield {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
if dialog.rerank_id:
|
||||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||||
|
|
||||||
kwargs["knowledge"] = "\n".join(knowledges)
|
for _ in range(len(questions) // 2):
|
||||||
gen_conf = dialog.llm_setting
|
questions.append(questions[-1])
|
||||||
|
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||||
msg.extend([{"role": m["role"], "content": m["content"]}
|
else:
|
||||||
for m in messages if m["role"] != "system"])
|
if prompt_config.get("keyword", False):
|
||||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
kbinfos = retr.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
||||||
|
dialog.similarity_threshold,
|
||||||
if "max_tokens" in gen_conf:
|
dialog.vector_similarity_weight,
|
||||||
gen_conf["max_tokens"] = min(
|
doc_ids=attachments,
|
||||||
gen_conf["max_tokens"],
|
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||||
max_tokens - used_token_count)
|
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||||
|
chat_logger.info(
|
||||||
def decorate_answer(answer):
|
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||||
nonlocal prompt_config, knowledges, kwargs, kbinfos
|
retrieval_tm = timer()
|
||||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
|
||||||
answer, idx = retrievaler.insert_citations(answer,
|
if not knowledges and prompt_config.get("empty_response"):
|
||||||
[ck["content_ltks"]
|
empty_res = prompt_config["empty_response"]
|
||||||
for ck in kbinfos["chunks"]],
|
yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
|
||||||
[ck["vector"]
|
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||||
for ck in kbinfos["chunks"]],
|
|
||||||
embd_mdl,
|
kwargs["knowledge"] = "\n------\n".join(knowledges)
|
||||||
tkweight=1 - dialog.vector_similarity_weight,
|
gen_conf = dialog.llm_setting
|
||||||
vtweight=dialog.vector_similarity_weight)
|
|
||||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||||
recall_docs = [
|
msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
|
||||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
for m in messages if m["role"] != "system"])
|
||||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||||
kbinfos["doc_aggs"] = recall_docs
|
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||||
|
prompt = msg[0]["content"]
|
||||||
refs = deepcopy(kbinfos)
|
|
||||||
for c in refs["chunks"]:
|
if "max_tokens" in gen_conf:
|
||||||
if c.get("vector"):
|
gen_conf["max_tokens"] = min(
|
||||||
del c["vector"]
|
gen_conf["max_tokens"],
|
||||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
max_tokens - used_token_count)
|
||||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
|
||||||
return {"answer": answer, "reference": refs}
|
def decorate_answer(answer):
|
||||||
|
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_tm
|
||||||
if stream:
|
refs = []
|
||||||
answer = ""
|
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], gen_conf):
|
answer, idx = retr.insert_citations(answer,
|
||||||
answer = ans
|
[ck["content_ltks"]
|
||||||
yield {"answer": answer, "reference": {}}
|
for ck in kbinfos["chunks"]],
|
||||||
yield decorate_answer(answer)
|
[ck["vector"]
|
||||||
else:
|
for ck in kbinfos["chunks"]],
|
||||||
answer = chat_mdl.chat(
|
embd_mdl,
|
||||||
msg[0]["content"], msg[1:], gen_conf)
|
tkweight=1 - dialog.vector_similarity_weight,
|
||||||
chat_logger.info("User: {}|Assistant: {}".format(
|
vtweight=dialog.vector_similarity_weight)
|
||||||
msg[-1]["content"], answer))
|
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||||
yield decorate_answer(answer)
|
recall_docs = [
|
||||||
|
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||||
|
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||||
def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
kbinfos["doc_aggs"] = recall_docs
|
||||||
sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
|
|
||||||
user_promt = """
|
refs = deepcopy(kbinfos)
|
||||||
表名:{};
|
for c in refs["chunks"]:
|
||||||
数据库表字段说明如下:
|
if c.get("vector"):
|
||||||
{}
|
del c["vector"]
|
||||||
|
|
||||||
问题如下:
|
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||||
{}
|
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||||
请写出SQL, 且只要SQL,不要有其他说明及文字。
|
done_tm = timer()
|
||||||
""".format(
|
prompt += "\n### Elapsed\n - Retrieval: %.1f ms\n - LLM: %.1f ms"%((retrieval_tm-st)*1000, (done_tm-st)*1000)
|
||||||
index_name(tenant_id),
|
return {"answer": answer, "reference": refs, "prompt": prompt}
|
||||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
|
||||||
question
|
if stream:
|
||||||
)
|
last_ans = ""
|
||||||
tried_times = 0
|
answer = ""
|
||||||
|
for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
|
||||||
def get_table():
|
answer = ans
|
||||||
nonlocal sys_prompt, user_promt, question, tried_times
|
delta_ans = ans[len(last_ans):]
|
||||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
if num_tokens_from_string(delta_ans) < 16:
|
||||||
"temperature": 0.06})
|
continue
|
||||||
print(user_promt, sql)
|
last_ans = answer
|
||||||
chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
|
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
delta_ans = answer[len(last_ans):]
|
||||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
if delta_ans:
|
||||||
sql = re.sub(r" +", " ", sql)
|
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||||
sql = re.sub(r"([;;]|```).*", "", sql)
|
yield decorate_answer(answer)
|
||||||
if sql[:len("select ")] != "select ":
|
else:
|
||||||
return None, None
|
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
||||||
if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
|
chat_logger.info("User: {}|Assistant: {}".format(
|
||||||
if sql[:len("select *")] != "select *":
|
msg[-1]["content"], answer))
|
||||||
sql = "select doc_id,docnm_kwd," + sql[6:]
|
res = decorate_answer(answer)
|
||||||
else:
|
res["audio_binary"] = tts(tts_mdl, answer)
|
||||||
flds = []
|
yield res
|
||||||
for k in field_map.keys():
|
|
||||||
if k in forbidden_select_fields4resume:
|
|
||||||
continue
|
def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||||
if len(flds) > 11:
|
sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
|
||||||
break
|
user_promt = """
|
||||||
flds.append(k)
|
表名:{};
|
||||||
sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
|
数据库表字段说明如下:
|
||||||
|
{}
|
||||||
print(f"“{question}” get SQL(refined): {sql}")
|
|
||||||
|
问题如下:
|
||||||
chat_logger.info(f"“{question}” get SQL(refined): {sql}")
|
{}
|
||||||
tried_times += 1
|
请写出SQL, 且只要SQL,不要有其他说明及文字。
|
||||||
return retrievaler.sql_retrieval(sql, format="json"), sql
|
""".format(
|
||||||
|
index_name(tenant_id),
|
||||||
tbl, sql = get_table()
|
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||||
if tbl is None:
|
question
|
||||||
return None
|
)
|
||||||
if tbl.get("error") and tried_times <= 2:
|
tried_times = 0
|
||||||
user_promt = """
|
|
||||||
表名:{};
|
def get_table():
|
||||||
数据库表字段说明如下:
|
nonlocal sys_prompt, user_promt, question, tried_times
|
||||||
{}
|
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
||||||
|
"temperature": 0.06})
|
||||||
问题如下:
|
print(user_promt, sql)
|
||||||
{}
|
chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
|
||||||
|
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||||
你上一次给出的错误SQL如下:
|
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||||
{}
|
sql = re.sub(r" +", " ", sql)
|
||||||
|
sql = re.sub(r"([;;]|```).*", "", sql)
|
||||||
后台报错如下:
|
if sql[:len("select ")] != "select ":
|
||||||
{}
|
return None, None
|
||||||
|
if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
|
||||||
请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
|
if sql[:len("select *")] != "select *":
|
||||||
""".format(
|
sql = "select doc_id,docnm_kwd," + sql[6:]
|
||||||
index_name(tenant_id),
|
else:
|
||||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
flds = []
|
||||||
question, sql, tbl["error"]
|
for k in field_map.keys():
|
||||||
)
|
if k in forbidden_select_fields4resume:
|
||||||
tbl, sql = get_table()
|
continue
|
||||||
chat_logger.info("TRY it again: {}".format(sql))
|
if len(flds) > 11:
|
||||||
|
break
|
||||||
chat_logger.info("GET table: {}".format(tbl))
|
flds.append(k)
|
||||||
print(tbl)
|
sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
|
||||||
if tbl.get("error") or len(tbl["rows"]) == 0:
|
|
||||||
return None
|
print(f"“{question}” get SQL(refined): {sql}")
|
||||||
|
|
||||||
docid_idx = set([ii for ii, c in enumerate(
|
chat_logger.info(f"“{question}” get SQL(refined): {sql}")
|
||||||
tbl["columns"]) if c["name"] == "doc_id"])
|
tried_times += 1
|
||||||
docnm_idx = set([ii for ii, c in enumerate(
|
return retrievaler.sql_retrieval(sql, format="json"), sql
|
||||||
tbl["columns"]) if c["name"] == "docnm_kwd"])
|
|
||||||
clmn_idx = [ii for ii in range(
|
tbl, sql = get_table()
|
||||||
len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
|
if tbl is None:
|
||||||
|
return None
|
||||||
# compose markdown table
|
if tbl.get("error") and tried_times <= 2:
|
||||||
clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
user_promt = """
|
||||||
tbl["columns"][i]["name"])) for i in
|
表名:{};
|
||||||
clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
数据库表字段说明如下:
|
||||||
|
{}
|
||||||
line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
|
|
||||||
("|------|" if docid_idx and docid_idx else "")
|
问题如下:
|
||||||
|
{}
|
||||||
rows = ["|" +
|
|
||||||
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
你上一次给出的错误SQL如下:
|
||||||
"|" for r in tbl["rows"]]
|
{}
|
||||||
if quota:
|
|
||||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
后台报错如下:
|
||||||
else:
|
{}
|
||||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
|
||||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
|
||||||
|
""".format(
|
||||||
if not docid_idx or not docnm_idx:
|
index_name(tenant_id),
|
||||||
chat_logger.warning("SQL missing field: " + sql)
|
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||||
return {
|
question, sql, tbl["error"]
|
||||||
"answer": "\n".join([clmns, line, rows]),
|
)
|
||||||
"reference": {"chunks": [], "doc_aggs": []}
|
tbl, sql = get_table()
|
||||||
}
|
chat_logger.info("TRY it again: {}".format(sql))
|
||||||
|
|
||||||
docid_idx = list(docid_idx)[0]
|
chat_logger.info("GET table: {}".format(tbl))
|
||||||
docnm_idx = list(docnm_idx)[0]
|
print(tbl)
|
||||||
doc_aggs = {}
|
if tbl.get("error") or len(tbl["rows"]) == 0:
|
||||||
for r in tbl["rows"]:
|
return None
|
||||||
if r[docid_idx] not in doc_aggs:
|
|
||||||
doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
|
docid_idx = set([ii for ii, c in enumerate(
|
||||||
doc_aggs[r[docid_idx]]["count"] += 1
|
tbl["columns"]) if c["name"] == "doc_id"])
|
||||||
return {
|
docnm_idx = set([ii for ii, c in enumerate(
|
||||||
"answer": "\n".join([clmns, line, rows]),
|
tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
|
clmn_idx = [ii for ii in range(
|
||||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
|
len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
|
||||||
doc_aggs.items()]}
|
|
||||||
}
|
# compose markdown table
|
||||||
|
clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||||
|
tbl["columns"][i]["name"])) for i in
|
||||||
def relevant(tenant_id, llm_id, question, contents: list):
|
clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
|
||||||
prompt = """
|
line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
|
||||||
You are a grader assessing relevance of a retrieved document to a user question.
|
("|------|" if docid_idx and docid_idx else "")
|
||||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
|
||||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
rows = ["|" +
|
||||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
||||||
No other words needed except 'yes' or 'no'.
|
"|" for r in tbl["rows"]]
|
||||||
"""
|
if quota:
|
||||||
if not contents:return False
|
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||||
contents = "Documents: \n" + " - ".join(contents)
|
else:
|
||||||
contents = f"Question: {question}\n" + contents
|
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
if not docid_idx or not docnm_idx:
|
||||||
if ans.lower().find("yes") >= 0: return True
|
chat_logger.warning("SQL missing field: " + sql)
|
||||||
return False
|
return {
|
||||||
|
"answer": "\n".join([clmns, line, rows]),
|
||||||
|
"reference": {"chunks": [], "doc_aggs": []},
|
||||||
def rewrite(tenant_id, llm_id, question):
|
"prompt": sys_prompt
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
}
|
||||||
prompt = """
|
|
||||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
docid_idx = list(docid_idx)[0]
|
||||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
docnm_idx = list(docnm_idx)[0]
|
||||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
doc_aggs = {}
|
||||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
for r in tbl["rows"]:
|
||||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
if r[docid_idx] not in doc_aggs:
|
||||||
And return 5 versions of question and one is from translation.
|
doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
|
||||||
Just list the question. No other words are needed.
|
doc_aggs[r[docid_idx]]["count"] += 1
|
||||||
"""
|
return {
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
"answer": "\n".join([clmns, line, rows]),
|
||||||
return ans
|
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
|
||||||
|
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
|
||||||
|
doc_aggs.items()]},
|
||||||
|
"prompt": sys_prompt
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def relevant(tenant_id, llm_id, question, contents: list):
|
||||||
|
if llm_id2llm_type(llm_id) == "image2text":
|
||||||
|
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||||
|
else:
|
||||||
|
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||||
|
prompt = """
|
||||||
|
You are a grader assessing relevance of a retrieved document to a user question.
|
||||||
|
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||||
|
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||||
|
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||||
|
No other words needed except 'yes' or 'no'.
|
||||||
|
"""
|
||||||
|
if not contents:return False
|
||||||
|
contents = "Documents: \n" + " - ".join(contents)
|
||||||
|
contents = f"Question: {question}\n" + contents
|
||||||
|
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||||
|
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||||
|
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||||
|
if ans.lower().find("yes") >= 0: return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def rewrite(tenant_id, llm_id, question):
|
||||||
|
if llm_id2llm_type(llm_id) == "image2text":
|
||||||
|
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||||
|
else:
|
||||||
|
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||||
|
prompt = """
|
||||||
|
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||||
|
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||||
|
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||||
|
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||||
|
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||||
|
And return 5 versions of question and one is from translation.
|
||||||
|
Just list the question. No other words are needed.
|
||||||
|
"""
|
||||||
|
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
||||||
|
return ans
|
||||||
|
|
||||||
|
|
||||||
|
def tts(tts_mdl, text):
|
||||||
|
if not tts_mdl or not text: return
|
||||||
|
bin = b""
|
||||||
|
for chunk in tts_mdl.tts(text):
|
||||||
|
bin += chunk
|
||||||
|
return binascii.hexlify(bin).decode("utf-8")
|
||||||
|
|
||||||
|
|
||||||
|
def ask(question, kb_ids, tenant_id):
|
||||||
|
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||||
|
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||||
|
|
||||||
|
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||||
|
retr = retrievaler if not is_kg else kg_retrievaler
|
||||||
|
|
||||||
|
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||||
|
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||||
|
max_tokens = chat_mdl.max_length
|
||||||
|
|
||||||
|
kbinfos = retr.retrieval(question, embd_mdl, tenant_id, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
||||||
|
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||||
|
|
||||||
|
used_token_count = 0
|
||||||
|
for i, c in enumerate(knowledges):
|
||||||
|
used_token_count += num_tokens_from_string(c)
|
||||||
|
if max_tokens * 0.97 < used_token_count:
|
||||||
|
knowledges = knowledges[:i]
|
||||||
|
break
|
||||||
|
|
||||||
|
prompt = """
|
||||||
|
Role: You're a smart assistant. Your name is Miss R.
|
||||||
|
Task: Summarize the information from knowledge bases and answer user's question.
|
||||||
|
Requirements and restriction:
|
||||||
|
- DO NOT make things up, especially for numbers.
|
||||||
|
- If the information from knowledge is irrelevant with user's question, JUST SAY: Sorry, no relevant information provided.
|
||||||
|
- Answer with markdown format text.
|
||||||
|
- Answer in language of user's question.
|
||||||
|
- DO NOT make things up, especially for numbers.
|
||||||
|
|
||||||
|
### Information from knowledge bases
|
||||||
|
%s
|
||||||
|
|
||||||
|
The above is information from knowledge bases.
|
||||||
|
|
||||||
|
"""%"\n".join(knowledges)
|
||||||
|
msg = [{"role": "user", "content": question}]
|
||||||
|
|
||||||
|
def decorate_answer(answer):
|
||||||
|
nonlocal knowledges, kbinfos, prompt
|
||||||
|
answer, idx = retr.insert_citations(answer,
|
||||||
|
[ck["content_ltks"]
|
||||||
|
for ck in kbinfos["chunks"]],
|
||||||
|
[ck["vector"]
|
||||||
|
for ck in kbinfos["chunks"]],
|
||||||
|
embd_mdl,
|
||||||
|
tkweight=0.7,
|
||||||
|
vtweight=0.3)
|
||||||
|
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||||
|
recall_docs = [
|
||||||
|
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||||
|
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||||
|
kbinfos["doc_aggs"] = recall_docs
|
||||||
|
refs = deepcopy(kbinfos)
|
||||||
|
for c in refs["chunks"]:
|
||||||
|
if c.get("vector"):
|
||||||
|
del c["vector"]
|
||||||
|
|
||||||
|
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||||
|
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||||
|
return {"answer": answer, "reference": refs}
|
||||||
|
|
||||||
|
answer = ""
|
||||||
|
for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
|
||||||
|
answer = ans
|
||||||
|
yield {"answer": answer, "reference": {}}
|
||||||
|
yield decorate_answer(answer)
|
||||||
|
|
||||||
|
|||||||
@ -1,350 +1,532 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import random
|
import hashlib
|
||||||
from datetime import datetime
|
import json
|
||||||
from elasticsearch_dsl import Q
|
import os
|
||||||
from peewee import fn
|
import random
|
||||||
|
import re
|
||||||
from api.db.db_utils import bulk_insert_into_db
|
import traceback
|
||||||
from api.settings import stat_logger
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
from copy import deepcopy
|
||||||
from rag.settings import SVR_QUEUE_NAME
|
from datetime import datetime
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
from io import BytesIO
|
||||||
from rag.utils.minio_conn import MINIO
|
|
||||||
from rag.nlp import search
|
from elasticsearch_dsl import Q
|
||||||
|
from peewee import fn
|
||||||
from api.db import FileType, TaskStatus
|
|
||||||
from api.db.db_models import DB, Knowledgebase, Tenant, Task
|
from api.db.db_utils import bulk_insert_into_db
|
||||||
from api.db.db_models import Document
|
from api.settings import stat_logger
|
||||||
from api.db.services.common_service import CommonService
|
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.utils.file_utils import get_project_base_directory
|
||||||
from api.db import StatusEnum
|
from graphrag.mind_map_extractor import MindMapExtractor
|
||||||
from rag.utils.redis_conn import REDIS_CONN
|
from rag.settings import SVR_QUEUE_NAME
|
||||||
|
from rag.utils.es_conn import ELASTICSEARCH
|
||||||
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
class DocumentService(CommonService):
|
from rag.nlp import search, rag_tokenizer
|
||||||
model = Document
|
|
||||||
|
from api.db import FileType, TaskStatus, ParserType, LLMType
|
||||||
@classmethod
|
from api.db.db_models import DB, Knowledgebase, Tenant, Task
|
||||||
@DB.connection_context()
|
from api.db.db_models import Document
|
||||||
def get_by_kb_id(cls, kb_id, page_number, items_per_page,
|
from api.db.services.common_service import CommonService
|
||||||
orderby, desc, keywords):
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
if keywords:
|
from api.db import StatusEnum
|
||||||
docs = cls.model.select().where(
|
from rag.utils.redis_conn import REDIS_CONN
|
||||||
(cls.model.kb_id == kb_id),
|
|
||||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
|
||||||
)
|
class DocumentService(CommonService):
|
||||||
else:
|
model = Document
|
||||||
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
|
||||||
count = docs.count()
|
@classmethod
|
||||||
if desc:
|
@DB.connection_context()
|
||||||
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
def get_by_kb_id(cls, kb_id, page_number, items_per_page,
|
||||||
else:
|
orderby, desc, keywords):
|
||||||
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
if keywords:
|
||||||
|
docs = cls.model.select().where(
|
||||||
docs = docs.paginate(page_number, items_per_page)
|
(cls.model.kb_id == kb_id),
|
||||||
|
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||||
return list(docs.dicts()), count
|
)
|
||||||
|
else:
|
||||||
@classmethod
|
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||||
@DB.connection_context()
|
count = docs.count()
|
||||||
def list_documents_in_dataset(cls, dataset_id, offset, count, order_by, descend, keywords):
|
if desc:
|
||||||
if keywords:
|
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
||||||
docs = cls.model.select().where(
|
else:
|
||||||
(cls.model.kb_id == dataset_id),
|
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
||||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
|
||||||
)
|
docs = docs.paginate(page_number, items_per_page)
|
||||||
else:
|
|
||||||
docs = cls.model.select().where(cls.model.kb_id == dataset_id)
|
return list(docs.dicts()), count
|
||||||
|
|
||||||
total = docs.count()
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
if descend == 'True':
|
def list_documents_in_dataset(cls, dataset_id, offset, count, order_by, descend, keywords):
|
||||||
docs = docs.order_by(cls.model.getter_by(order_by).desc())
|
if keywords:
|
||||||
if descend == 'False':
|
docs = cls.model.select().where(
|
||||||
docs = docs.order_by(cls.model.getter_by(order_by).asc())
|
(cls.model.kb_id == dataset_id),
|
||||||
|
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||||
docs = list(docs.dicts())
|
)
|
||||||
docs_length = len(docs)
|
else:
|
||||||
|
docs = cls.model.select().where(cls.model.kb_id == dataset_id)
|
||||||
if offset < 0 or offset > docs_length:
|
|
||||||
raise IndexError("Offset is out of the valid range.")
|
total = docs.count()
|
||||||
|
|
||||||
if count == -1:
|
if descend == 'True':
|
||||||
return docs[offset:], total
|
docs = docs.order_by(cls.model.getter_by(order_by).desc())
|
||||||
|
if descend == 'False':
|
||||||
return docs[offset:offset + count], total
|
docs = docs.order_by(cls.model.getter_by(order_by).asc())
|
||||||
|
|
||||||
@classmethod
|
docs = list(docs.dicts())
|
||||||
@DB.connection_context()
|
docs_length = len(docs)
|
||||||
def insert(cls, doc):
|
|
||||||
if not cls.save(**doc):
|
if offset < 0 or offset > docs_length:
|
||||||
raise RuntimeError("Database error (Document)!")
|
raise IndexError("Offset is out of the valid range.")
|
||||||
e, doc = cls.get_by_id(doc["id"])
|
|
||||||
if not e:
|
if count == -1:
|
||||||
raise RuntimeError("Database error (Document retrieval)!")
|
return docs[offset:], total
|
||||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
|
||||||
if not KnowledgebaseService.update_by_id(
|
return docs[offset:offset + count], total
|
||||||
kb.id, {"doc_num": kb.doc_num + 1}):
|
|
||||||
raise RuntimeError("Database error (Knowledgebase)!")
|
@classmethod
|
||||||
return doc
|
@DB.connection_context()
|
||||||
|
def insert(cls, doc):
|
||||||
@classmethod
|
if not cls.save(**doc):
|
||||||
@DB.connection_context()
|
raise RuntimeError("Database error (Document)!")
|
||||||
def remove_document(cls, doc, tenant_id):
|
e, doc = cls.get_by_id(doc["id"])
|
||||||
ELASTICSEARCH.deleteByQuery(
|
if not e:
|
||||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
raise RuntimeError("Database error (Document retrieval)!")
|
||||||
cls.clear_chunk_num(doc.id)
|
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||||
return cls.delete_by_id(doc.id)
|
if not KnowledgebaseService.update_by_id(
|
||||||
|
kb.id, {"doc_num": kb.doc_num + 1}):
|
||||||
@classmethod
|
raise RuntimeError("Database error (Knowledgebase)!")
|
||||||
@DB.connection_context()
|
return doc
|
||||||
def get_newly_uploaded(cls):
|
|
||||||
fields = [
|
@classmethod
|
||||||
cls.model.id,
|
@DB.connection_context()
|
||||||
cls.model.kb_id,
|
def remove_document(cls, doc, tenant_id):
|
||||||
cls.model.parser_id,
|
ELASTICSEARCH.deleteByQuery(
|
||||||
cls.model.parser_config,
|
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||||
cls.model.name,
|
cls.clear_chunk_num(doc.id)
|
||||||
cls.model.type,
|
return cls.delete_by_id(doc.id)
|
||||||
cls.model.location,
|
|
||||||
cls.model.size,
|
@classmethod
|
||||||
Knowledgebase.tenant_id,
|
@DB.connection_context()
|
||||||
Tenant.embd_id,
|
def get_newly_uploaded(cls):
|
||||||
Tenant.img2txt_id,
|
fields = [
|
||||||
Tenant.asr_id,
|
cls.model.id,
|
||||||
cls.model.update_time]
|
cls.model.kb_id,
|
||||||
docs = cls.model.select(*fields) \
|
cls.model.parser_id,
|
||||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
|
cls.model.parser_config,
|
||||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))\
|
cls.model.name,
|
||||||
.where(
|
cls.model.type,
|
||||||
cls.model.status == StatusEnum.VALID.value,
|
cls.model.location,
|
||||||
~(cls.model.type == FileType.VIRTUAL.value),
|
cls.model.size,
|
||||||
cls.model.progress == 0,
|
Knowledgebase.tenant_id,
|
||||||
cls.model.update_time >= current_timestamp() - 1000 * 600,
|
Tenant.embd_id,
|
||||||
cls.model.run == TaskStatus.RUNNING.value)\
|
Tenant.img2txt_id,
|
||||||
.order_by(cls.model.update_time.asc())
|
Tenant.asr_id,
|
||||||
return list(docs.dicts())
|
cls.model.update_time]
|
||||||
|
docs = cls.model.select(*fields) \
|
||||||
@classmethod
|
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
|
||||||
@DB.connection_context()
|
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))\
|
||||||
def get_unfinished_docs(cls):
|
.where(
|
||||||
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg]
|
cls.model.status == StatusEnum.VALID.value,
|
||||||
docs = cls.model.select(*fields) \
|
~(cls.model.type == FileType.VIRTUAL.value),
|
||||||
.where(
|
cls.model.progress == 0,
|
||||||
cls.model.status == StatusEnum.VALID.value,
|
cls.model.update_time >= current_timestamp() - 1000 * 600,
|
||||||
~(cls.model.type == FileType.VIRTUAL.value),
|
cls.model.run == TaskStatus.RUNNING.value)\
|
||||||
cls.model.progress < 1,
|
.order_by(cls.model.update_time.asc())
|
||||||
cls.model.progress > 0)
|
return list(docs.dicts())
|
||||||
return list(docs.dicts())
|
|
||||||
|
@classmethod
|
||||||
@classmethod
|
@DB.connection_context()
|
||||||
@DB.connection_context()
|
def get_unfinished_docs(cls):
|
||||||
def increment_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
|
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg, cls.model.run]
|
||||||
num = cls.model.update(token_num=cls.model.token_num + token_num,
|
docs = cls.model.select(*fields) \
|
||||||
chunk_num=cls.model.chunk_num + chunk_num,
|
.where(
|
||||||
process_duation=cls.model.process_duation + duation).where(
|
cls.model.status == StatusEnum.VALID.value,
|
||||||
cls.model.id == doc_id).execute()
|
~(cls.model.type == FileType.VIRTUAL.value),
|
||||||
if num == 0:
|
cls.model.progress < 1,
|
||||||
raise LookupError(
|
cls.model.progress > 0)
|
||||||
"Document not found which is supposed to be there")
|
return list(docs.dicts())
|
||||||
num = Knowledgebase.update(
|
|
||||||
token_num=Knowledgebase.token_num +
|
@classmethod
|
||||||
token_num,
|
@DB.connection_context()
|
||||||
chunk_num=Knowledgebase.chunk_num +
|
def increment_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
|
||||||
chunk_num).where(
|
num = cls.model.update(token_num=cls.model.token_num + token_num,
|
||||||
Knowledgebase.id == kb_id).execute()
|
chunk_num=cls.model.chunk_num + chunk_num,
|
||||||
return num
|
process_duation=cls.model.process_duation + duation).where(
|
||||||
|
cls.model.id == doc_id).execute()
|
||||||
@classmethod
|
if num == 0:
|
||||||
@DB.connection_context()
|
raise LookupError(
|
||||||
def clear_chunk_num(cls, doc_id):
|
"Document not found which is supposed to be there")
|
||||||
doc = cls.model.get_by_id(doc_id)
|
num = Knowledgebase.update(
|
||||||
assert doc, "Can't fine document in database."
|
token_num=Knowledgebase.token_num +
|
||||||
|
token_num,
|
||||||
num = Knowledgebase.update(
|
chunk_num=Knowledgebase.chunk_num +
|
||||||
token_num=Knowledgebase.token_num -
|
chunk_num).where(
|
||||||
doc.token_num,
|
Knowledgebase.id == kb_id).execute()
|
||||||
chunk_num=Knowledgebase.chunk_num -
|
return num
|
||||||
doc.chunk_num,
|
|
||||||
doc_num=Knowledgebase.doc_num-1
|
@classmethod
|
||||||
).where(
|
@DB.connection_context()
|
||||||
Knowledgebase.id == doc.kb_id).execute()
|
def decrement_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
|
||||||
return num
|
num = cls.model.update(token_num=cls.model.token_num - token_num,
|
||||||
|
chunk_num=cls.model.chunk_num - chunk_num,
|
||||||
@classmethod
|
process_duation=cls.model.process_duation + duation).where(
|
||||||
@DB.connection_context()
|
cls.model.id == doc_id).execute()
|
||||||
def get_tenant_id(cls, doc_id):
|
if num == 0:
|
||||||
docs = cls.model.select(
|
raise LookupError(
|
||||||
Knowledgebase.tenant_id).join(
|
"Document not found which is supposed to be there")
|
||||||
Knowledgebase, on=(
|
num = Knowledgebase.update(
|
||||||
Knowledgebase.id == cls.model.kb_id)).where(
|
token_num=Knowledgebase.token_num -
|
||||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
token_num,
|
||||||
docs = docs.dicts()
|
chunk_num=Knowledgebase.chunk_num -
|
||||||
if not docs:
|
chunk_num
|
||||||
return
|
).where(
|
||||||
return docs[0]["tenant_id"]
|
Knowledgebase.id == kb_id).execute()
|
||||||
|
return num
|
||||||
@classmethod
|
|
||||||
@DB.connection_context()
|
@classmethod
|
||||||
def get_tenant_id_by_name(cls, name):
|
@DB.connection_context()
|
||||||
docs = cls.model.select(
|
def clear_chunk_num(cls, doc_id):
|
||||||
Knowledgebase.tenant_id).join(
|
doc = cls.model.get_by_id(doc_id)
|
||||||
Knowledgebase, on=(
|
assert doc, "Can't fine document in database."
|
||||||
Knowledgebase.id == cls.model.kb_id)).where(
|
|
||||||
cls.model.name == name, Knowledgebase.status == StatusEnum.VALID.value)
|
num = Knowledgebase.update(
|
||||||
docs = docs.dicts()
|
token_num=Knowledgebase.token_num -
|
||||||
if not docs:
|
doc.token_num,
|
||||||
return
|
chunk_num=Knowledgebase.chunk_num -
|
||||||
return docs[0]["tenant_id"]
|
doc.chunk_num,
|
||||||
|
doc_num=Knowledgebase.doc_num-1
|
||||||
@classmethod
|
).where(
|
||||||
@DB.connection_context()
|
Knowledgebase.id == doc.kb_id).execute()
|
||||||
def get_embd_id(cls, doc_id):
|
return num
|
||||||
docs = cls.model.select(
|
|
||||||
Knowledgebase.embd_id).join(
|
@classmethod
|
||||||
Knowledgebase, on=(
|
@DB.connection_context()
|
||||||
Knowledgebase.id == cls.model.kb_id)).where(
|
def get_tenant_id(cls, doc_id):
|
||||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
docs = cls.model.select(
|
||||||
docs = docs.dicts()
|
Knowledgebase.tenant_id).join(
|
||||||
if not docs:
|
Knowledgebase, on=(
|
||||||
return
|
Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
return docs[0]["embd_id"]
|
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||||
|
docs = docs.dicts()
|
||||||
@classmethod
|
if not docs:
|
||||||
@DB.connection_context()
|
return
|
||||||
def get_doc_id_by_doc_name(cls, doc_name):
|
return docs[0]["tenant_id"]
|
||||||
fields = [cls.model.id]
|
|
||||||
doc_id = cls.model.select(*fields) \
|
@classmethod
|
||||||
.where(cls.model.name == doc_name)
|
@DB.connection_context()
|
||||||
doc_id = doc_id.dicts()
|
def get_tenant_id_by_name(cls, name):
|
||||||
if not doc_id:
|
docs = cls.model.select(
|
||||||
return
|
Knowledgebase.tenant_id).join(
|
||||||
return doc_id[0]["id"]
|
Knowledgebase, on=(
|
||||||
|
Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
@classmethod
|
cls.model.name == name, Knowledgebase.status == StatusEnum.VALID.value)
|
||||||
@DB.connection_context()
|
docs = docs.dicts()
|
||||||
def get_thumbnails(cls, docids):
|
if not docs:
|
||||||
fields = [cls.model.id, cls.model.thumbnail]
|
return
|
||||||
return list(cls.model.select(
|
return docs[0]["tenant_id"]
|
||||||
*fields).where(cls.model.id.in_(docids)).dicts())
|
|
||||||
|
@classmethod
|
||||||
@classmethod
|
@DB.connection_context()
|
||||||
@DB.connection_context()
|
def get_embd_id(cls, doc_id):
|
||||||
def update_parser_config(cls, id, config):
|
docs = cls.model.select(
|
||||||
e, d = cls.get_by_id(id)
|
Knowledgebase.embd_id).join(
|
||||||
if not e:
|
Knowledgebase, on=(
|
||||||
raise LookupError(f"Document({id}) not found.")
|
Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
|
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||||
def dfs_update(old, new):
|
docs = docs.dicts()
|
||||||
for k, v in new.items():
|
if not docs:
|
||||||
if k not in old:
|
return
|
||||||
old[k] = v
|
return docs[0]["embd_id"]
|
||||||
continue
|
|
||||||
if isinstance(v, dict):
|
@classmethod
|
||||||
assert isinstance(old[k], dict)
|
@DB.connection_context()
|
||||||
dfs_update(old[k], v)
|
def get_doc_id_by_doc_name(cls, doc_name):
|
||||||
else:
|
fields = [cls.model.id]
|
||||||
old[k] = v
|
doc_id = cls.model.select(*fields) \
|
||||||
dfs_update(d.parser_config, config)
|
.where(cls.model.name == doc_name)
|
||||||
cls.update_by_id(id, {"parser_config": d.parser_config})
|
doc_id = doc_id.dicts()
|
||||||
|
if not doc_id:
|
||||||
@classmethod
|
return
|
||||||
@DB.connection_context()
|
return doc_id[0]["id"]
|
||||||
def get_doc_count(cls, tenant_id):
|
|
||||||
docs = cls.model.select(cls.model.id).join(Knowledgebase,
|
@classmethod
|
||||||
on=(Knowledgebase.id == cls.model.kb_id)).where(
|
@DB.connection_context()
|
||||||
Knowledgebase.tenant_id == tenant_id)
|
def get_thumbnails(cls, docids):
|
||||||
return len(docs)
|
fields = [cls.model.id, cls.model.thumbnail]
|
||||||
|
return list(cls.model.select(
|
||||||
@classmethod
|
*fields).where(cls.model.id.in_(docids)).dicts())
|
||||||
@DB.connection_context()
|
|
||||||
def begin2parse(cls, docid):
|
@classmethod
|
||||||
cls.update_by_id(
|
@DB.connection_context()
|
||||||
docid, {"progress": random.random() * 1 / 100.,
|
def update_parser_config(cls, id, config):
|
||||||
"progress_msg": "Task dispatched...",
|
e, d = cls.get_by_id(id)
|
||||||
"process_begin_at": get_format_time()
|
if not e:
|
||||||
})
|
raise LookupError(f"Document({id}) not found.")
|
||||||
|
|
||||||
@classmethod
|
def dfs_update(old, new):
|
||||||
@DB.connection_context()
|
for k, v in new.items():
|
||||||
def update_progress(cls):
|
if k not in old:
|
||||||
docs = cls.get_unfinished_docs()
|
old[k] = v
|
||||||
for d in docs:
|
continue
|
||||||
try:
|
if isinstance(v, dict):
|
||||||
tsks = Task.query(doc_id=d["id"], order_by=Task.create_time)
|
assert isinstance(old[k], dict)
|
||||||
if not tsks:
|
dfs_update(old[k], v)
|
||||||
continue
|
else:
|
||||||
msg = []
|
old[k] = v
|
||||||
prg = 0
|
dfs_update(d.parser_config, config)
|
||||||
finished = True
|
cls.update_by_id(id, {"parser_config": d.parser_config})
|
||||||
bad = 0
|
|
||||||
status = TaskStatus.RUNNING.value
|
@classmethod
|
||||||
for t in tsks:
|
@DB.connection_context()
|
||||||
if 0 <= t.progress < 1:
|
def get_doc_count(cls, tenant_id):
|
||||||
finished = False
|
docs = cls.model.select(cls.model.id).join(Knowledgebase,
|
||||||
prg += t.progress if t.progress >= 0 else 0
|
on=(Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
msg.append(t.progress_msg)
|
Knowledgebase.tenant_id == tenant_id)
|
||||||
if t.progress == -1:
|
return len(docs)
|
||||||
bad += 1
|
|
||||||
prg /= len(tsks)
|
@classmethod
|
||||||
if finished and bad:
|
@DB.connection_context()
|
||||||
prg = -1
|
def begin2parse(cls, docid):
|
||||||
status = TaskStatus.FAIL.value
|
cls.update_by_id(
|
||||||
elif finished:
|
docid, {"progress": random.random() * 1 / 100.,
|
||||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(" raptor")<0:
|
"progress_msg": "Task dispatched...",
|
||||||
queue_raptor_tasks(d)
|
"process_begin_at": get_format_time()
|
||||||
prg *= 0.98
|
})
|
||||||
msg.append("------ RAPTOR -------")
|
|
||||||
else:
|
@classmethod
|
||||||
status = TaskStatus.DONE.value
|
@DB.connection_context()
|
||||||
|
def update_progress(cls):
|
||||||
msg = "\n".join(msg)
|
docs = cls.get_unfinished_docs()
|
||||||
info = {
|
for d in docs:
|
||||||
"process_duation": datetime.timestamp(
|
try:
|
||||||
datetime.now()) -
|
tsks = Task.query(doc_id=d["id"], order_by=Task.create_time)
|
||||||
d["process_begin_at"].timestamp(),
|
if not tsks:
|
||||||
"run": status}
|
continue
|
||||||
if prg != 0:
|
msg = []
|
||||||
info["progress"] = prg
|
prg = 0
|
||||||
if msg:
|
finished = True
|
||||||
info["progress_msg"] = msg
|
bad = 0
|
||||||
cls.update_by_id(d["id"], info)
|
e, doc = DocumentService.get_by_id(d["id"])
|
||||||
except Exception as e:
|
status = doc.run#TaskStatus.RUNNING.value
|
||||||
stat_logger.error("fetch task exception:" + str(e))
|
for t in tsks:
|
||||||
|
if 0 <= t.progress < 1:
|
||||||
@classmethod
|
finished = False
|
||||||
@DB.connection_context()
|
prg += t.progress if t.progress >= 0 else 0
|
||||||
def get_kb_doc_count(cls, kb_id):
|
if t.progress_msg not in msg:
|
||||||
return len(cls.model.select(cls.model.id).where(
|
msg.append(t.progress_msg)
|
||||||
cls.model.kb_id == kb_id).dicts())
|
if t.progress == -1:
|
||||||
|
bad += 1
|
||||||
|
prg /= len(tsks)
|
||||||
def queue_raptor_tasks(doc):
|
if finished and bad:
|
||||||
def new_task():
|
prg = -1
|
||||||
nonlocal doc
|
status = TaskStatus.FAIL.value
|
||||||
return {
|
elif finished:
|
||||||
"id": get_uuid(),
|
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(" raptor")<0:
|
||||||
"doc_id": doc["id"],
|
queue_raptor_tasks(d)
|
||||||
"from_page": 0,
|
prg *= 0.98
|
||||||
"to_page": -1,
|
msg.append("------ RAPTOR -------")
|
||||||
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing For Tree-Organized Retrieval)."
|
else:
|
||||||
}
|
status = TaskStatus.DONE.value
|
||||||
|
|
||||||
task = new_task()
|
msg = "\n".join(msg)
|
||||||
bulk_insert_into_db(Task, [task], True)
|
info = {
|
||||||
task["type"] = "raptor"
|
"process_duation": datetime.timestamp(
|
||||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
datetime.now()) -
|
||||||
|
d["process_begin_at"].timestamp(),
|
||||||
|
"run": status}
|
||||||
|
if prg != 0:
|
||||||
|
info["progress"] = prg
|
||||||
|
if msg:
|
||||||
|
info["progress_msg"] = msg
|
||||||
|
cls.update_by_id(d["id"], info)
|
||||||
|
except Exception as e:
|
||||||
|
stat_logger.error("fetch task exception:" + str(e))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_kb_doc_count(cls, kb_id):
|
||||||
|
return len(cls.model.select(cls.model.id).where(
|
||||||
|
cls.model.kb_id == kb_id).dicts())
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def do_cancel(cls, doc_id):
|
||||||
|
try:
|
||||||
|
_, doc = DocumentService.get_by_id(doc_id)
|
||||||
|
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def queue_raptor_tasks(doc):
|
||||||
|
def new_task():
|
||||||
|
nonlocal doc
|
||||||
|
return {
|
||||||
|
"id": get_uuid(),
|
||||||
|
"doc_id": doc["id"],
|
||||||
|
"from_page": 0,
|
||||||
|
"to_page": -1,
|
||||||
|
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing For Tree-Organized Retrieval)."
|
||||||
|
}
|
||||||
|
|
||||||
|
task = new_task()
|
||||||
|
bulk_insert_into_db(Task, [task], True)
|
||||||
|
task["type"] = "raptor"
|
||||||
|
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
||||||
|
|
||||||
|
|
||||||
|
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||||
|
from rag.app import presentation, picture, naive, audio, email
|
||||||
|
from api.db.services.dialog_service import ConversationService, DialogService
|
||||||
|
from api.db.services.file_service import FileService
|
||||||
|
from api.db.services.llm_service import LLMBundle
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from api.db.services.api_service import API4ConversationService
|
||||||
|
|
||||||
|
e, conv = ConversationService.get_by_id(conversation_id)
|
||||||
|
if not e:
|
||||||
|
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||||
|
assert e, "Conversation not found!"
|
||||||
|
|
||||||
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
|
kb_id = dia.kb_ids[0]
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
|
if not e:
|
||||||
|
raise LookupError("Can't find this knowledgebase!")
|
||||||
|
|
||||||
|
idxnm = search.index_name(kb.tenant_id)
|
||||||
|
if not ELASTICSEARCH.indexExist(idxnm):
|
||||||
|
ELASTICSEARCH.createIdx(idxnm, json.load(
|
||||||
|
open(os.path.join(get_project_base_directory(), "conf", "mapping.json"), "r")))
|
||||||
|
|
||||||
|
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id, lang=kb.language)
|
||||||
|
|
||||||
|
err, files = FileService.upload_document(kb, file_objs, user_id)
|
||||||
|
assert not err, "\n".join(err)
|
||||||
|
|
||||||
|
def dummy(prog=None, msg=""):
|
||||||
|
pass
|
||||||
|
|
||||||
|
FACTORY = {
|
||||||
|
ParserType.PRESENTATION.value: presentation,
|
||||||
|
ParserType.PICTURE.value: picture,
|
||||||
|
ParserType.AUDIO.value: audio,
|
||||||
|
ParserType.EMAIL.value: email
|
||||||
|
}
|
||||||
|
parser_config = {"chunk_token_num": 4096, "delimiter": "\n!?;。;!?", "layout_recognize": False}
|
||||||
|
exe = ThreadPoolExecutor(max_workers=12)
|
||||||
|
threads = []
|
||||||
|
doc_nm = {}
|
||||||
|
for d, blob in files:
|
||||||
|
doc_nm[d["id"]] = d["name"]
|
||||||
|
for d, blob in files:
|
||||||
|
kwargs = {
|
||||||
|
"callback": dummy,
|
||||||
|
"parser_config": parser_config,
|
||||||
|
"from_page": 0,
|
||||||
|
"to_page": 100000,
|
||||||
|
"tenant_id": kb.tenant_id,
|
||||||
|
"lang": kb.language
|
||||||
|
}
|
||||||
|
threads.append(exe.submit(FACTORY.get(d["parser_id"], naive).chunk, d["name"], blob, **kwargs))
|
||||||
|
|
||||||
|
for (docinfo, _), th in zip(files, threads):
|
||||||
|
docs = []
|
||||||
|
doc = {
|
||||||
|
"doc_id": docinfo["id"],
|
||||||
|
"kb_id": [kb.id]
|
||||||
|
}
|
||||||
|
for ck in th.result():
|
||||||
|
d = deepcopy(doc)
|
||||||
|
d.update(ck)
|
||||||
|
md5 = hashlib.md5()
|
||||||
|
md5.update((ck["content_with_weight"] +
|
||||||
|
str(d["doc_id"])).encode("utf-8"))
|
||||||
|
d["_id"] = md5.hexdigest()
|
||||||
|
d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
|
||||||
|
d["create_timestamp_flt"] = datetime.now().timestamp()
|
||||||
|
if not d.get("image"):
|
||||||
|
docs.append(d)
|
||||||
|
continue
|
||||||
|
|
||||||
|
output_buffer = BytesIO()
|
||||||
|
if isinstance(d["image"], bytes):
|
||||||
|
output_buffer = BytesIO(d["image"])
|
||||||
|
else:
|
||||||
|
d["image"].save(output_buffer, format='JPEG')
|
||||||
|
|
||||||
|
STORAGE_IMPL.put(kb.id, d["_id"], output_buffer.getvalue())
|
||||||
|
d["img_id"] = "{}-{}".format(kb.id, d["_id"])
|
||||||
|
del d["image"]
|
||||||
|
docs.append(d)
|
||||||
|
|
||||||
|
parser_ids = {d["id"]: d["parser_id"] for d, _ in files}
|
||||||
|
docids = [d["id"] for d, _ in files]
|
||||||
|
chunk_counts = {id: 0 for id in docids}
|
||||||
|
token_counts = {id: 0 for id in docids}
|
||||||
|
es_bulk_size = 64
|
||||||
|
|
||||||
|
def embedding(doc_id, cnts, batch_size=16):
|
||||||
|
nonlocal embd_mdl, chunk_counts, token_counts
|
||||||
|
vects = []
|
||||||
|
for i in range(0, len(cnts), batch_size):
|
||||||
|
vts, c = embd_mdl.encode(cnts[i: i + batch_size])
|
||||||
|
vects.extend(vts.tolist())
|
||||||
|
chunk_counts[doc_id] += len(cnts[i:i + batch_size])
|
||||||
|
token_counts[doc_id] += c
|
||||||
|
return vects
|
||||||
|
|
||||||
|
_, tenant = TenantService.get_by_id(kb.tenant_id)
|
||||||
|
llm_bdl = LLMBundle(kb.tenant_id, LLMType.CHAT, tenant.llm_id)
|
||||||
|
for doc_id in docids:
|
||||||
|
cks = [c for c in docs if c["doc_id"] == doc_id]
|
||||||
|
|
||||||
|
if parser_ids[doc_id] != ParserType.PICTURE.value:
|
||||||
|
mindmap = MindMapExtractor(llm_bdl)
|
||||||
|
try:
|
||||||
|
mind_map = json.dumps(mindmap([c["content_with_weight"] for c in docs if c["doc_id"] == doc_id]).output,
|
||||||
|
ensure_ascii=False, indent=2)
|
||||||
|
if len(mind_map) < 32: raise Exception("Few content: " + mind_map)
|
||||||
|
cks.append({
|
||||||
|
"id": get_uuid(),
|
||||||
|
"doc_id": doc_id,
|
||||||
|
"kb_id": [kb.id],
|
||||||
|
"docnm_kwd": doc_nm[doc_id],
|
||||||
|
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", doc_nm[doc_id])),
|
||||||
|
"content_ltks": "",
|
||||||
|
"content_with_weight": mind_map,
|
||||||
|
"knowledge_graph_kwd": "mind_map"
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
stat_logger.error("Mind map generation error:", traceback.format_exc())
|
||||||
|
|
||||||
|
vects = embedding(doc_id, [c["content_with_weight"] for c in cks])
|
||||||
|
assert len(cks) == len(vects)
|
||||||
|
for i, d in enumerate(cks):
|
||||||
|
v = vects[i]
|
||||||
|
d["q_%d_vec" % len(v)] = v
|
||||||
|
for b in range(0, len(cks), es_bulk_size):
|
||||||
|
ELASTICSEARCH.bulk(cks[b:b + es_bulk_size], idxnm)
|
||||||
|
|
||||||
|
DocumentService.increment_chunk_num(
|
||||||
|
doc_id, kb.id, token_counts[doc_id], chunk_counts[doc_id], 0)
|
||||||
|
|
||||||
|
return [d["id"] for d,_ in files]
|
||||||
@ -76,7 +76,7 @@ class File2DocumentService(CommonService):
|
|||||||
f2d = cls.get_by_file_id(file_id)
|
f2d = cls.get_by_file_id(file_id)
|
||||||
if f2d:
|
if f2d:
|
||||||
file = File.get_by_id(f2d[0].file_id)
|
file = File.get_by_id(f2d[0].file_id)
|
||||||
if file.source_type == FileSource.LOCAL:
|
if not file.source_type or file.source_type == FileSource.LOCAL:
|
||||||
return file.parent_id, file.location
|
return file.parent_id, file.location
|
||||||
doc_id = f2d[0].document_id
|
doc_id = f2d[0].document_id
|
||||||
|
|
||||||
|
|||||||
@ -13,16 +13,21 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import re
|
||||||
|
import os
|
||||||
from flask_login import current_user
|
from flask_login import current_user
|
||||||
from peewee import fn
|
from peewee import fn
|
||||||
|
|
||||||
from api.db import FileType, KNOWLEDGEBASE_FOLDER_NAME, FileSource
|
from api.db import FileType, KNOWLEDGEBASE_FOLDER_NAME, FileSource, ParserType
|
||||||
from api.db.db_models import DB, File2Document, Knowledgebase
|
from api.db.db_models import DB, File2Document, Knowledgebase
|
||||||
from api.db.db_models import File, Document
|
from api.db.db_models import File, Document
|
||||||
|
from api.db.services import duplicate_name
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
|
from api.utils.file_utils import filename_type, thumbnail
|
||||||
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
|
|
||||||
class FileService(CommonService):
|
class FileService(CommonService):
|
||||||
@ -57,6 +62,12 @@ class FileService(CommonService):
|
|||||||
if file["type"] == FileType.FOLDER.value:
|
if file["type"] == FileType.FOLDER.value:
|
||||||
file["size"] = cls.get_folder_size(file["id"])
|
file["size"] = cls.get_folder_size(file["id"])
|
||||||
file['kbs_info'] = []
|
file['kbs_info'] = []
|
||||||
|
children = list(cls.model.select().where(
|
||||||
|
(cls.model.tenant_id == tenant_id),
|
||||||
|
(cls.model.parent_id == file["id"]),
|
||||||
|
~(cls.model.id == file["id"]),
|
||||||
|
).dicts())
|
||||||
|
file["has_child_folder"] = any(value["type"] == FileType.FOLDER.value for value in children)
|
||||||
continue
|
continue
|
||||||
kbs_info = cls.get_kb_id_by_file_id(file['id'])
|
kbs_info = cls.get_kb_id_by_file_id(file['id'])
|
||||||
file['kbs_info'] = kbs_info
|
file['kbs_info'] = kbs_info
|
||||||
@ -312,4 +323,62 @@ class FileService(CommonService):
|
|||||||
cls.filter_update((cls.model.id << file_ids, ), { 'parent_id': folder_id })
|
cls.filter_update((cls.model.id << file_ids, ), { 'parent_id': folder_id })
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
raise RuntimeError("Database error (File move)!")
|
raise RuntimeError("Database error (File move)!")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def upload_document(self, kb, file_objs, user_id):
|
||||||
|
root_folder = self.get_root_folder(user_id)
|
||||||
|
pf_id = root_folder["id"]
|
||||||
|
self.init_knowledgebase_docs(pf_id, user_id)
|
||||||
|
kb_root_folder = self.get_kb_folder(user_id)
|
||||||
|
kb_folder = self.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||||
|
|
||||||
|
err, files = [], []
|
||||||
|
for file in file_objs:
|
||||||
|
try:
|
||||||
|
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||||
|
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||||
|
raise RuntimeError("Exceed the maximum file number of a free user!")
|
||||||
|
|
||||||
|
filename = duplicate_name(
|
||||||
|
DocumentService.query,
|
||||||
|
name=file.filename,
|
||||||
|
kb_id=kb.id)
|
||||||
|
filetype = filename_type(filename)
|
||||||
|
if filetype == FileType.OTHER.value:
|
||||||
|
raise RuntimeError("This type of file has not been supported yet!")
|
||||||
|
|
||||||
|
location = filename
|
||||||
|
while STORAGE_IMPL.obj_exist(kb.id, location):
|
||||||
|
location += "_"
|
||||||
|
blob = file.read()
|
||||||
|
STORAGE_IMPL.put(kb.id, location, blob)
|
||||||
|
doc = {
|
||||||
|
"id": get_uuid(),
|
||||||
|
"kb_id": kb.id,
|
||||||
|
"parser_id": kb.parser_id,
|
||||||
|
"parser_config": kb.parser_config,
|
||||||
|
"created_by": user_id,
|
||||||
|
"type": filetype,
|
||||||
|
"name": filename,
|
||||||
|
"location": location,
|
||||||
|
"size": len(blob),
|
||||||
|
"thumbnail": thumbnail(filename, blob)
|
||||||
|
}
|
||||||
|
if doc["type"] == FileType.VISUAL:
|
||||||
|
doc["parser_id"] = ParserType.PICTURE.value
|
||||||
|
if doc["type"] == FileType.AURAL:
|
||||||
|
doc["parser_id"] = ParserType.AUDIO.value
|
||||||
|
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||||
|
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||||
|
if re.search(r"\.(eml)$", filename):
|
||||||
|
doc["parser_id"] = ParserType.EMAIL.value
|
||||||
|
DocumentService.insert(doc)
|
||||||
|
|
||||||
|
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||||
|
files.append((doc, blob))
|
||||||
|
except Exception as e:
|
||||||
|
err.append(file.filename + ": " + str(e))
|
||||||
|
|
||||||
|
return err, files
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user