mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
539 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d44739283c | |||
| 9c953a67a6 | |||
| bd3fa317e7 | |||
| 715e2b48ca | |||
| 90d18143ba | |||
| 4b6809b32d | |||
| 7b96146d3f | |||
| 21c55a2e0f | |||
| 8e965040ce | |||
| 780ee2b2be | |||
| 6f9cd96ec5 | |||
| 47e244ee9f | |||
| df11fe75d3 | |||
| bf0d516e49 | |||
| b18da35da6 | |||
| 8ba1e6c183 | |||
| d4f84f0b54 | |||
| 6ec6ca6971 | |||
| 1163e9e409 | |||
| 15736c57c3 | |||
| fa817a8ab3 | |||
| 8b99635eb3 | |||
| 1919780880 | |||
| 82f5d901c8 | |||
| dc4d4342cd | |||
| e05658685c | |||
| b29539b442 | |||
| b1a46d5adc | |||
| 50c510d16b | |||
| 8a84d1048c | |||
| 2ad852d8df | |||
| ca39f5204d | |||
| 5b0e38060a | |||
| 66938e0b68 | |||
| 64c6cc4cf3 | |||
| 3418984848 | |||
| 3c79990934 | |||
| da3f279495 | |||
| b1bbb9e210 | |||
| 0e3e129a83 | |||
| c87b58511e | |||
| 8d61dcc8ab | |||
| 06b29d7da4 | |||
| 5229a76f68 | |||
| 4f9504305a | |||
| 27153dde85 | |||
| 9fc7174612 | |||
| 8fb8374dfc | |||
| ff35c140dc | |||
| df9b7b2fe9 | |||
| 48f3f49e80 | |||
| 94d7af00b8 | |||
| 251ba7f058 | |||
| 28296955f1 | |||
| 1b2fc3cc9a | |||
| b8da2eeb69 | |||
| 5f62f0c9d7 | |||
| a54843cc65 | |||
| 4326873af6 | |||
| a64f4539e7 | |||
| ec68ab1c8c | |||
| e5041749a2 | |||
| 78b2e0be89 | |||
| b6aded378d | |||
| 11e3f5e8b2 | |||
| f65c3ae62b | |||
| 02c955babb | |||
| ca04ae9540 | |||
| b0c21b00d9 | |||
| 47684fa17c | |||
| 148a7e7002 | |||
| 76e8285904 | |||
| 555c70672e | |||
| 850e218051 | |||
| fb4b5b0a06 | |||
| f256e1a59a | |||
| 9816b868f9 | |||
| 6e828f0fcb | |||
| 4d6484b03e | |||
| afe9269534 | |||
| 688cb8f19d | |||
| f6dd2cd1af | |||
| 69dc14f5d6 | |||
| 202acbd628 | |||
| a283fefd18 | |||
| d9bbaf5d6c | |||
| 1075b975c5 | |||
| c813c1ff4c | |||
| abac2ca2c5 | |||
| 64e9702a26 | |||
| 76cb4cd174 | |||
| 65d7c19979 | |||
| b67697b6f2 | |||
| 131f272e69 | |||
| 03d1265cfd | |||
| c190086707 | |||
| 5d89a8010b | |||
| 7a81fa00e9 | |||
| 606ed0c8ab | |||
| 8b1a4365ed | |||
| 8a2542157f | |||
| d6836444c9 | |||
| 3b30799b7e | |||
| e61da33672 | |||
| 6a71314d70 | |||
| 06e0c7d1a9 | |||
| 7600ebd263 | |||
| 21943ce0e2 | |||
| aa313e112a | |||
| 2c7428e2ee | |||
| 014f2ef900 | |||
| b418ce5643 | |||
| fe1c48178e | |||
| 35f13e882e | |||
| 85924e898e | |||
| 622b72db4b | |||
| a0a7b46cff | |||
| 37aacb3960 | |||
| 79bc9d97c9 | |||
| f150687dbc | |||
| b2a5482d2c | |||
| 5fdfb8d465 | |||
| 8b2c04abc4 | |||
| 83d0949498 | |||
| 244cf49ba4 | |||
| 651422127c | |||
| 11de7599e5 | |||
| 7a6e70d6b3 | |||
| 230865c4f7 | |||
| 4c9a3e918f | |||
| 5beb022ee1 | |||
| 170abf9b7f | |||
| afaa7144a5 | |||
| eaa1adb3b2 | |||
| fa76974e24 | |||
| f372bd8809 | |||
| 0284248c93 | |||
| d9dd1171a3 | |||
| fefea3a2a5 | |||
| 0e920a91dd | |||
| 63e3398f49 | |||
| cdcaae17c6 | |||
| 96e9d50060 | |||
| 5cab6c4ccb | |||
| b3b341173f | |||
| a9e4695b74 | |||
| 4f40f685d9 | |||
| ffb4cda475 | |||
| 5859a3df72 | |||
| 5c6a7cb4b8 | |||
| 4e2afcd3b8 | |||
| 11e6d84d46 | |||
| 53b9e7b52f | |||
| e5e9ca0015 | |||
| 150ab9c6a4 | |||
| f789463982 | |||
| 955801db2e | |||
| 93b2e80eb8 | |||
| 1a41b92f77 | |||
| 58a8f1f1b0 | |||
| daddfc9e1b | |||
| ecf5f6976f | |||
| e2448fb6dd | |||
| 9c9f2dbe3f | |||
| b3d579e2c1 | |||
| eb72d598b1 | |||
| 033a4cf21e | |||
| fda9b58ab7 | |||
| ca865df87f | |||
| f9f75aa119 | |||
| db42d0e0ae | |||
| df3d0f61bd | |||
| c6bc69cbc5 | |||
| 8c9df482ab | |||
| 1137b04154 | |||
| ec96426c00 | |||
| 4d22daefa7 | |||
| bcc92e04c9 | |||
| 9aa222f738 | |||
| 605cfdb8dc | |||
| 041d72b755 | |||
| 569e40544d | |||
| 3d605a23fe | |||
| 4f2816c01c | |||
| a0b461a18e | |||
| 7ce675030b | |||
| 217caecfda | |||
| ef8847eda7 | |||
| d78010c376 | |||
| 3444cb15e3 | |||
| 0151d42156 | |||
| 392f28882f | |||
| cdb3e6434a | |||
| bf5f6ec262 | |||
| 1a755e75c5 | |||
| 46ff897107 | |||
| f5d63bb7df | |||
| c54ec09519 | |||
| 7b3d700d5f | |||
| 744ff55c62 | |||
| c326f14fed | |||
| 07ddb8fcff | |||
| 84bcd8b3bc | |||
| f52970b038 | |||
| 39b96849a9 | |||
| f298e55ded | |||
| ed943b1b5b | |||
| 0c6d787f92 | |||
| a4f9aa2172 | |||
| c432ce6be5 | |||
| c5b32b2211 | |||
| 24efa86f26 | |||
| 38e551cc3d | |||
| ef95f08c48 | |||
| 3ced290eb5 | |||
| fab0f07379 | |||
| 8525f55ad0 | |||
| e6c024f8bf | |||
| c28bc41a96 | |||
| 29a59ed7e2 | |||
| f8b80f3f93 | |||
| 189007e44d | |||
| 3cffadc7a2 | |||
| 18e43831bc | |||
| 3356de55ed | |||
| 375e727f9a | |||
| a2b8ba472f | |||
| 00c7ddbc9b | |||
| 3e0bc9e36b | |||
| d6ba4bd255 | |||
| 84b4b38cbb | |||
| 4694604836 | |||
| 224c5472c8 | |||
| 409310aae9 | |||
| 9ff825f39d | |||
| 7b5d831296 | |||
| 42ee209084 | |||
| e4096fbc33 | |||
| 3aa5c2a699 | |||
| 2ddf278e2d | |||
| f46448d04c | |||
| ab17606e79 | |||
| 7c90b87715 | |||
| d2929e432e | |||
| 88daa349f9 | |||
| f29da49893 | |||
| 194e8ea696 | |||
| 810f997276 | |||
| 6daae7f226 | |||
| f9fe6ac642 | |||
| b4ad565df6 | |||
| 754d5ea364 | |||
| 26add87c3d | |||
| 986062a604 | |||
| 29ceeba95f | |||
| 849d9eb463 | |||
| dce7053c24 | |||
| 042f4c90c6 | |||
| c1583a3e1d | |||
| 17fa2e9e8e | |||
| ff237f2dbc | |||
| 50c99599f2 | |||
| 891ee85fa6 | |||
| a03f5dd9f6 | |||
| 415c4b7ed5 | |||
| d599707154 | |||
| 7f06712a30 | |||
| b08bb56f6c | |||
| 9bcccadebd | |||
| 1287558f24 | |||
| 6b389e01b5 | |||
| 8fcca1b958 | |||
| a1cf792245 | |||
| 978b580dcf | |||
| d197f33646 | |||
| 521d25d4e6 | |||
| ca1648052a | |||
| f34b913bd8 | |||
| 0d3ed37b48 | |||
| bc68f18c48 | |||
| 6e42687e65 | |||
| e4bd879686 | |||
| 78982d88e0 | |||
| fa5c7edab4 | |||
| 6fa34d5532 | |||
| 9e5427dc6e | |||
| a357190eff | |||
| bfcc2abe47 | |||
| f64ae9dc33 | |||
| 5a51bdd824 | |||
| b48c85dcf9 | |||
| f374dd38b6 | |||
| ccb72e6787 | |||
| 55823dbdf6 | |||
| 588207d7c1 | |||
| 2aa0cdde8f | |||
| 44d798d8f0 | |||
| 4150805073 | |||
| 448fa1c4d4 | |||
| e786f596e2 | |||
| fe9e9a644f | |||
| d2961b2d25 | |||
| a73e1750b6 | |||
| c1d71e9a3f | |||
| 2a07eb69a7 | |||
| a3a70431f3 | |||
| 6f2c3a3c3c | |||
| 54803c4ef2 | |||
| efbaa484d7 | |||
| 3411d0a2ce | |||
| 283d036cba | |||
| 307717b045 | |||
| 8e74bc8e42 | |||
| 4b9c4c0705 | |||
| b2bb560007 | |||
| e1526846da | |||
| 7a7f98b1a9 | |||
| 036f37a627 | |||
| 191587346c | |||
| 50055c47ec | |||
| 6f30397bb5 | |||
| d970d0ef39 | |||
| ce8658aa84 | |||
| bc6a768b90 | |||
| 656a2fab41 | |||
| 47b28a27a6 | |||
| c354239b79 | |||
| b4303f6010 | |||
| 4776fa5e4e | |||
| c24137bd11 | |||
| 4011c8f68c | |||
| 2cb8edc42c | |||
| 284b4d4430 | |||
| 42f7261509 | |||
| f33415b751 | |||
| 530b0dab17 | |||
| c4b1c4e6f4 | |||
| 3c2c8942d5 | |||
| 71c132f76d | |||
| 9d717f0b6e | |||
| 8b49734241 | |||
| 898ae7fa80 | |||
| fa4277225d | |||
| 1bff6b7333 | |||
| e9ccba0395 | |||
| f1d9f4290e | |||
| 4230402fbb | |||
| e14d6ae441 | |||
| 55f2b7c4d5 | |||
| 07b3e55903 | |||
| 86892959a0 | |||
| bbc1d02c96 | |||
| b23a4a8fea | |||
| 240e7d7c22 | |||
| 52fa8bdcf3 | |||
| 13f04b7cca | |||
| c4b9e903c8 | |||
| 15f9406e7b | |||
| c5c0dd2da0 | |||
| dd0ebbea35 | |||
| 1a367664f1 | |||
| 336e5fb37f | |||
| 598e142c85 | |||
| cbc3c5297e | |||
| 4b82275ae5 | |||
| 3894de895b | |||
| 583050a876 | |||
| a2946b0fb0 | |||
| 21052b2972 | |||
| 5632613eb5 | |||
| fc35821f81 | |||
| db80376427 | |||
| 99430a7db7 | |||
| a3391c4d55 | |||
| e0f52eebc6 | |||
| 367babda2f | |||
| 2962284c79 | |||
| 75e1981e13 | |||
| 4f9f9405b8 | |||
| 938492cbae | |||
| f4d084bcf1 | |||
| 69984554a5 | |||
| 03d7a51d49 | |||
| 0efe7a544b | |||
| c0799c53b3 | |||
| 4946e43941 | |||
| 37235315e1 | |||
| 39be08c83d | |||
| 3805621564 | |||
| a75cda4957 | |||
| 961e8c4980 | |||
| 57b4e0c464 | |||
| c852a6dfbf | |||
| b4614e9517 | |||
| be5f830878 | |||
| 7944aacafa | |||
| e478586a8e | |||
| 713f38090b | |||
| 8f7ecde908 | |||
| 23ad459136 | |||
| f556f0239c | |||
| f318342c8e | |||
| d3c07794b5 | |||
| fd0bf3adf0 | |||
| c08382099a | |||
| d8346cb7a6 | |||
| 46c52d65b7 | |||
| e098fcf6ad | |||
| ecdb2a88bd | |||
| 2c7ba90cb4 | |||
| 95261f17f6 | |||
| 7d909d4d1b | |||
| 4dde73f897 | |||
| 93b30b2fb5 | |||
| 06c54367fa | |||
| 6acbd374d8 | |||
| 48bca0ca01 | |||
| 300d8ecf51 | |||
| dac54ded96 | |||
| c5da3cdd97 | |||
| f892d7d426 | |||
| f86d8906e7 | |||
| bc681e2ee9 | |||
| b6c71c1e01 | |||
| 7bebf4b7bf | |||
| d64df4de9c | |||
| af43cb04e8 | |||
| 3d66d78304 | |||
| b7ce4e7e62 | |||
| 5e64d79587 | |||
| 49cebd9fec | |||
| d9a4e4cc3b | |||
| ac89a2dda1 | |||
| 01a122dc9d | |||
| 8ec392adb0 | |||
| de822a108f | |||
| 2e40c2a6f6 | |||
| d088a34fe2 | |||
| 16e1681fa4 | |||
| bb24e5f739 | |||
| 1d93eb81ae | |||
| 439d20e41f | |||
| 45619702ff | |||
| b93c136797 | |||
| 983ec0666c | |||
| a9ba051582 | |||
| bad764bcda | |||
| 9c6cf12137 | |||
| 6288b6d02b | |||
| 52c20033d7 | |||
| 5dad15600c | |||
| 8674156d1c | |||
| 5083d92998 | |||
| 59a78408be | |||
| df22ead841 | |||
| 5883493c7d | |||
| 50f209204e | |||
| 564277736a | |||
| 061a22588a | |||
| 5071df9de1 | |||
| 419b546f03 | |||
| e5b1511c66 | |||
| 0e5124ec99 | |||
| 7c7b7d2689 | |||
| accd3a6c7e | |||
| 4ba4f622a5 | |||
| b52b0f68fc | |||
| d42e78bce2 | |||
| 8fb18f37f6 | |||
| f619d5a9b6 | |||
| d1971e988a | |||
| 54908ebd30 | |||
| 3ba2b8d80f | |||
| 713b837276 | |||
| 8feb4c1a99 | |||
| dd13a5d05c | |||
| 8cdf10148d | |||
| 7773afa561 | |||
| 798eb3647c | |||
| 2d17e5aa04 | |||
| c75aa11ae6 | |||
| c7818770f4 | |||
| 6f6303d017 | |||
| 146e8bb793 | |||
| f948c0d9f1 | |||
| c3e3f0fbb4 | |||
| a1a825c830 | |||
| a6f4153775 | |||
| 097aab09a2 | |||
| 600f435d27 | |||
| 722545e5e0 | |||
| 9fa73771ee | |||
| fe279754ac | |||
| 7e063283ba | |||
| 28eeb29b88 | |||
| 85511cb1fd | |||
| a3eeb5de32 | |||
| 1160b58b6e | |||
| 61790ebe15 | |||
| bc3288d390 | |||
| 4e5f92f01b | |||
| 7d8e0602aa | |||
| 03cbbf7784 | |||
| b7a7413419 | |||
| 321e9f3719 | |||
| 76cd23eecf | |||
| d030b4a680 | |||
| a9fd6066d2 | |||
| c373dba0bc | |||
| cf62230548 | |||
| 8d73cf6f02 | |||
| b635002666 | |||
| 4abc144d3d | |||
| a4bccc1ae7 | |||
| 8f070c3d56 | |||
| 31d67c850e | |||
| 2cbe064080 | |||
| cac7851fc5 | |||
| 96da618b6a | |||
| f13f503952 | |||
| cb45431412 | |||
| 85083ad400 | |||
| 35580af875 | |||
| a0dc9e1bdf | |||
| 6379a934ff | |||
| 10a62115c7 | |||
| e38e3bcc3b | |||
| 8dcf99611b | |||
| 213218a094 | |||
| 478da3118c | |||
| 101b8ff813 | |||
| d8fca43017 | |||
| b35e811fe7 | |||
| 7474348394 | |||
| 8939206531 | |||
| 57c99dd811 | |||
| 561eeabfa4 | |||
| 5fb9136251 | |||
| 044bb0334b |
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
@ -75,12 +75,6 @@ jobs:
|
||||
# The body field does not support environment variable substitution directly.
|
||||
body_path: release_body.md
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# https://github.com/marketplace/actions/docker-login
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
@ -113,12 +107,12 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
cd sdk/python && \
|
||||
poetry build
|
||||
uv build
|
||||
|
||||
- name: Publish package distributions to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: dist/
|
||||
packages-dir: sdk/python/dist/
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
36
.github/workflows/tests.yml
vendored
36
.github/workflows/tests.yml
vendored
@ -32,12 +32,9 @@ jobs:
|
||||
# https://github.com/hmarr/debug-action
|
||||
#- uses: hmarr/debug-action@v2
|
||||
|
||||
- name: Show PR labels
|
||||
- name: Show who triggered this workflow
|
||||
run: |
|
||||
echo "Workflow triggered by ${{ github.event_name }}"
|
||||
if [[ ${{ github.event_name }} == 'pull_request' ]]; then
|
||||
echo "PR labels: ${{ join(github.event.pull_request.labels.*.name, ', ') }}"
|
||||
fi
|
||||
|
||||
- name: Ensure workspace ownership
|
||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
@ -68,7 +65,7 @@ jobs:
|
||||
|
||||
- name: Start ragflow:nightly-slim
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Stop ragflow:nightly-slim
|
||||
@ -78,7 +75,7 @@ jobs:
|
||||
|
||||
- name: Start ragflow:nightly
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Run sdk tests against Elasticsearch
|
||||
@ -89,7 +86,7 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
|
||||
- name: Run frontend api tests against Elasticsearch
|
||||
run: |
|
||||
@ -99,8 +96,17 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Elasticsearch
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short -m "not slow"
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
@ -119,7 +125,7 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
|
||||
- name: Run frontend api tests against Infinity
|
||||
run: |
|
||||
@ -129,7 +135,17 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Infinity
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short -m "not slow"
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -38,3 +38,6 @@ sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
huggingface.co/
|
||||
nltk_data/
|
||||
|
||||
# Exclude hash-like temporary files like 9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
*[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]*
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# Contribution guidelines
|
||||
|
||||
This document offers guidlines and major considerations for submitting your contributions to RAGFlow.
|
||||
This document offers guidelines and major considerations for submitting your contributions to RAGFlow.
|
||||
|
||||
- To report a bug, file a [GitHub issue](https://github.com/infiniflow/ragflow/issues/new/choose) with us.
|
||||
- For further questions, you can explore existing discussions or initiate a new one in [Discussions](https://github.com/orgs/infiniflow/discussions).
|
||||
|
||||
74
Dockerfile
74
Dockerfile
@ -59,33 +59,45 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt install -y default-jdk && \
|
||||
apt install -y libatk-bridge2.0-0 && \
|
||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||
apt install -y libjemalloc-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||
|
||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
|
||||
pip3 config set global.trusted-host pypi.tuna.tsinghua.edu.cn; \
|
||||
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||
pip3 config set global.trusted-host mirrors.aliyun.com; \
|
||||
mkdir -p /etc/uv && \
|
||||
echo "[[index]]" > /etc/uv/uv.toml && \
|
||||
echo 'url = "https://mirrors.aliyun.com/pypi/simple"' >> /etc/uv/uv.toml && \
|
||||
echo "default = true" >> /etc/uv/uv.toml; \
|
||||
fi; \
|
||||
pipx install poetry; \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pipx inject poetry poetry-plugin-pypi-mirror; \
|
||||
fi
|
||||
pipx install uv
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||
ENV PATH=/root/.local/bin:$PATH
|
||||
# Configure Poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
||||
|
||||
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt purge -y nodejs npm && \
|
||||
apt autoremove && \
|
||||
apt purge -y nodejs npm cargo && \
|
||||
apt autoremove -y && \
|
||||
apt update && \
|
||||
apt install -y nodejs cargo
|
||||
|
||||
apt install -y nodejs
|
||||
|
||||
# A modern version of cargo is needed for the latest version of the Rust compiler.
|
||||
RUN apt update && apt install -y curl build-essential \
|
||||
&& if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
# Use TUNA mirrors for rustup/rust dist files
|
||||
export RUSTUP_DIST_SERVER="https://mirrors.tuna.tsinghua.edu.cn/rustup"; \
|
||||
export RUSTUP_UPDATE_ROOT="https://mirrors.tuna.tsinghua.edu.cn/rustup/rustup"; \
|
||||
echo "Using TUNA mirrors for Rustup."; \
|
||||
fi; \
|
||||
# Force curl to use HTTP/1.1
|
||||
curl --proto '=https' --tlsv1.2 --http1.1 -sSf https://sh.rustup.rs | bash -s -- -y --profile minimal \
|
||||
&& echo 'export PATH="/root/.cargo/bin:${PATH}"' >> /root/.bashrc
|
||||
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
RUN cargo --version && rustc --version
|
||||
|
||||
# Add msssql ODBC driver
|
||||
# macOS ARM64 environment, install msodbcsql18.
|
||||
@ -94,11 +106,12 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \
|
||||
curl https://packages.microsoft.com/config/ubuntu/22.04/prod.list > /etc/apt/sources.list.d/mssql-release.list && \
|
||||
apt update && \
|
||||
if [ -n "$ARCH" ] && [ "$ARCH" = "arm64" ]; then \
|
||||
# MacOS ARM64
|
||||
arch="$(uname -m)"; \
|
||||
if [ "$arch" = "arm64" ] || [ "$arch" = "aarch64" ]; then \
|
||||
# ARM64 (macOS/Apple Silicon or Linux aarch64)
|
||||
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql18; \
|
||||
else \
|
||||
# (x86_64)
|
||||
# x86_64 or others
|
||||
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql17; \
|
||||
fi || \
|
||||
{ echo "Failed to install ODBC driver"; exit 1; }
|
||||
@ -131,23 +144,27 @@ USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
# install dependencies from poetry.lock file
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
# install dependencies from uv.lock file
|
||||
COPY pyproject.toml uv.lock ./
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||
# https://github.com/astral-sh/uv/issues/10462
|
||||
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
||||
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
export POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/; \
|
||||
sed -i 's|pypi.org|mirrors.aliyun.com/pypi|g' uv.lock; \
|
||||
else \
|
||||
sed -i 's|mirrors.aliyun.com/pypi|pypi.org|g' uv.lock; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
poetry install --no-root; \
|
||||
uv sync --python 3.10 --frozen; \
|
||||
else \
|
||||
poetry install --no-root --with=full; \
|
||||
uv sync --python 3.10 --frozen --all-extras; \
|
||||
fi
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm install --force && npm run build
|
||||
cd web && npm install && npm run build
|
||||
|
||||
COPY .git /ragflow/.git
|
||||
|
||||
@ -180,11 +197,12 @@ COPY deepdoc deepdoc
|
||||
COPY rag rag
|
||||
COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
COPY agentic_reasoning agentic_reasoning
|
||||
COPY pyproject.toml uv.lock ./
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
COPY docker/entrypoint.sh docker/entrypoint-parser.sh ./
|
||||
RUN chmod +x ./entrypoint*.sh
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
90
README.md
90
README.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -32,14 +34,14 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Table of Contents</b></summary>
|
||||
<summary><b>📕 Table of Contents</b></summary>
|
||||
|
||||
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
@ -68,6 +70,7 @@ data.
|
||||
## 🎮 Demo
|
||||
|
||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
@ -75,17 +78,20 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Latest Updates
|
||||
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in Deepdoc.
|
||||
- 2025-02-28 Combined with Internet search (Tavily), supports reasoning like Deep Research for any LLMs.
|
||||
- 2025-02-05 Updates the model list of 'SILICONFLOW' and adds support for Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
||||
- 2024-12-04 Adds support for pagerank score in knowledge base.
|
||||
- 2024-11-22 Adds more variables to Agent.
|
||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||
- 2024-08-22 Support text to SQL statements through RAG.
|
||||
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
||||
|
||||
## 🎉 Stay Tuned
|
||||
|
||||
⭐️ Star our repository to stay up-to-date with exciting new features and improvements! Get instant notifications for new
|
||||
releases! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
@ -134,7 +140,7 @@ releases! 🌟
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux),
|
||||
see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
> see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
@ -154,7 +160,7 @@ releases! 🌟
|
||||
> ```
|
||||
>
|
||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the
|
||||
`vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
> `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
@ -168,19 +174,23 @@ releases! 🌟
|
||||
|
||||
3. Start up the server using the pre-built Docker images:
|
||||
|
||||
> The command below downloads the `v0.15.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download an RAGFlow edition different from `v0.14.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1` for the full edition `v0.14.1`.
|
||||
> [!CAUTION]
|
||||
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||
|
||||
> The command below downloads the `v0.17.1-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.17.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1` for the full edition `v0.17.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
|-------------------|-----------------|-----------------------|--------------------------|
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. Check the server status after having the server up and running:
|
||||
|
||||
@ -192,23 +202,21 @@ releases! 🌟
|
||||
|
||||
```bash
|
||||
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||
error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
> error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default
|
||||
HTTP serving port `80` can be omitted when using the default configurations.
|
||||
> HTTP serving port `80` can be omitted when using the default configurations.
|
||||
6. In [service_conf.yaml.template](./docker/service_conf.yaml.template), select the desired LLM factory in `user_default_llm` and update
|
||||
the `API_KEY` field with the corresponding API key.
|
||||
|
||||
@ -234,7 +242,7 @@ to `<YOUR_SERVING_PORT>:80`.
|
||||
Updates to the above configurations require a reboot of all containers to take effect:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Switch doc engine from Elasticsearch to Infinity
|
||||
@ -247,15 +255,18 @@ RAGFlow uses Elasticsearch by default for storing full text and vectors. To swit
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> `-v` will delete the docker container volumes, and the existing data will be cleared.
|
||||
|
||||
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||
|
||||
3. Start the containers:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> [!WARNING]
|
||||
> Switching to Infinity on a Linux/arm64 machine is not yet officially supported.
|
||||
|
||||
## 🔧 Build a Docker image without embedding models
|
||||
@ -280,28 +291,31 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 🔨 Launch service from source for development
|
||||
|
||||
1. Install Poetry, or skip this step if it is already installed:
|
||||
1. Install uv, or skip this step if it is already installed:
|
||||
|
||||
```bash
|
||||
pipx install poetry
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. Clone the source code and install Python dependencies:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
~/.local/bin/poetry install --sync --no-root --with=full # install RAGFlow dependent python modules
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/.env** to `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
```
|
||||
|
||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||
|
||||
@ -310,6 +324,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
5. Launch backend service:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
@ -319,12 +334,13 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
6. Install frontend dependencies:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
npm install
|
||||
```
|
||||
7. Launch frontend service:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_The following output confirms a successful launch of the system:_
|
||||
|
||||
@ -339,7 +355,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
See the [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Community
|
||||
|
||||
|
||||
89
README_id.md
89
README_id.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
@ -32,14 +34,14 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Peta Jalan</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Peta Jalan</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Daftar Isi</b></summary>
|
||||
<summary><b>📕 Daftar Isi </b> </summary>
|
||||
|
||||
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
@ -65,6 +67,7 @@
|
||||
## 🎮 Demo
|
||||
|
||||
Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
@ -72,16 +75,19 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Pembaruan Terbaru
|
||||
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di Deepdoc.
|
||||
- 2025-02-28 dikombinasikan dengan pencarian Internet (TAVILY), mendukung penelitian mendalam untuk LLM apa pun.
|
||||
- 2025-02-05 Memperbarui daftar model 'SILICONFLOW' dan menambahkan dukungan untuk Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||
- 2024-12-04 Mendukung skor pagerank ke basis pengetahuan.
|
||||
- 2024-11-22 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
- 2024-08-02 Dukungan GraphRAG yang terinspirasi oleh [graphrag](https://github.com/microsoft/graphrag) dan mind map.
|
||||
|
||||
## 🎉 Tetap Terkini
|
||||
|
||||
⭐️ Star repositori kami untuk tetap mendapat informasi tentang fitur baru dan peningkatan menarik! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
@ -147,7 +153,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
> ```
|
||||
>
|
||||
> Perubahan ini akan hilang setelah sistem direboot. Untuk membuat perubahan ini permanen, tambahkan atau perbarui nilai
|
||||
`vm.max_map_count` di **/etc/sysctl.conf**:
|
||||
> `vm.max_map_count` di **/etc/sysctl.conf**:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
@ -161,21 +167,25 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Bangun image Docker pre-built dan jalankan server:
|
||||
|
||||
> Perintah di bawah ini mengunduh edisi v0.15.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.14.1-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1 untuk edisi lengkap v0.14.1.
|
||||
> [!CAUTION]
|
||||
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> Perintah di bawah ini mengunduh edisi v0.17.1-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.17.1-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1 untuk edisi lengkap v0.17.1.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. Periksa status server setelah server aktif dan berjalan:
|
||||
1. Periksa status server setelah server aktif dan berjalan:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -185,24 +195,22 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
```bash
|
||||
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
karena RAGFlow mungkin belum sepenuhnya siap.
|
||||
|
||||
5. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||
port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||
6. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
> karena RAGFlow mungkin belum sepenuhnya siap.
|
||||
|
||||
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||
3. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||
|
||||
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||
@ -224,7 +232,7 @@ menjadi `<YOUR_SERVING_PORT>:80`.
|
||||
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||
@ -249,28 +257,31 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||
|
||||
1. Instal Poetry, atau lewati langkah ini jika sudah terinstal:
|
||||
1. Instal uv, atau lewati langkah ini jika sudah terinstal:
|
||||
|
||||
```bash
|
||||
pipx install poetry
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. Clone kode sumber dan instal dependensi Python:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
~/.local/bin/poetry install --sync --no-root # install modul python RAGFlow
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **conf/service_conf.yaml** ke `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
```
|
||||
|
||||
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||
|
||||
@ -279,6 +290,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
5. Jalankan aplikasi backend:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
@ -288,12 +300,13 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
6. Instal dependensi frontend:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
npm install
|
||||
```
|
||||
7. Jalankan aplikasi frontend:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||
|
||||
@ -308,7 +321,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
Lihat [Roadmap RAGFlow 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
Lihat [Roadmap RAGFlow 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Komunitas
|
||||
|
||||
@ -319,4 +332,4 @@ Lihat [Roadmap RAGFlow 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
## 🙌 Kontribusi
|
||||
|
||||
RAGFlow berkembang melalui kolaborasi open-source. Dalam semangat ini, kami menerima kontribusi dari komunitas.
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||
|
||||
101
README_ja.md
101
README_ja.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,10 +32,9 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -46,23 +47,27 @@
|
||||
## 🎮 Demo
|
||||
|
||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 最新情報
|
||||
|
||||
- 2024-12-18 Deepdoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2025-02-28 インターネット検索 (TAVILY) と組み合わせて、あらゆる LLM の詳細な調査をサポートします。
|
||||
- 2025-02-05 シリコン フローの St およびモデル リストを更新し、Deep Seek-R1/Deep Seek-V3 のサポートを追加しました。
|
||||
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
||||
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-12-04 ナレッジ ベースへのページランク スコアをサポートしました。
|
||||
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
||||
|
||||
## 🎉 続きを楽しみに
|
||||
|
||||
⭐️ リポジトリをスター登録して、エキサイティングな新機能やアップデートを最新の状態に保ちましょう!すべての新しいリリースに関する即時通知を受け取れます! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
@ -142,21 +147,25 @@
|
||||
|
||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||
|
||||
> 以下のコマンドは、RAGFlow Dockerイメージの v0.15.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.15.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.14.1 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1 と設定します。
|
||||
> [!CAUTION]
|
||||
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.17.1-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.17.1-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.17.1 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1 と設定します。
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -165,22 +174,20 @@
|
||||
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||
|
||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
2. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||
|
||||
@ -203,7 +210,7 @@
|
||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
||||
@ -214,16 +221,17 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
2. **docker/.env** の「DOC _ ENGINE」を「infinity」に設定します。
|
||||
Note: `-v` は docker コンテナのボリュームを削除し、既存のデータをクリアします。
|
||||
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
||||
|
||||
3. 起動コンテナ:
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||
> [!WARNING]
|
||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||
|
||||
## 🔧 ソースコードでDockerイメージを作成(埋め込みモデルなし)
|
||||
## 🔧 ソースコードで Docker イメージを作成(埋め込みモデルなし)
|
||||
|
||||
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
||||
|
||||
@ -233,7 +241,7 @@ cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 ソースコードをコンパイルしたDockerイメージ(埋め込みモデルを含む)
|
||||
## 🔧 ソースコードをコンパイルした Docker イメージ(埋め込みモデルを含む)
|
||||
|
||||
この Docker のサイズは約 9GB で、埋め込みモデルを含むため、外部の大モデルサービスのみが必要です。
|
||||
|
||||
@ -245,53 +253,58 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 🔨 ソースコードからサービスを起動する方法
|
||||
|
||||
1. Poetry をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
1. uv をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
|
||||
```bash
|
||||
pipx install poetry
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
```
|
||||
|
||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. バックエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. フロントエンドの依存関係をインストールする:
|
||||
6. フロントエンドの依存関係をインストールする:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
7. フロントエンドサービスを起動する:
|
||||
npm install
|
||||
```
|
||||
7. フロントエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_以下の画面で、システムが正常に起動したことを示します:_
|
||||
_以下の画面で、システムが正常に起動したことを示します:_
|
||||
|
||||

|
||||
|
||||
@ -304,7 +317,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
[RAGFlow ロードマップ 2024](https://github.com/infiniflow/ragflow/issues/162) を参照
|
||||
[RAGFlow ロードマップ 2025](https://github.com/infiniflow/ragflow/issues/4214) を参照
|
||||
|
||||
## 🏄 コミュニティ
|
||||
|
||||
|
||||
123
README_ko.md
123
README_ko.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,78 +32,75 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
|
||||
## 💡 RAGFlow란?
|
||||
|
||||
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
||||
|
||||
|
||||
|
||||
## 🎮 데모
|
||||
|
||||
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 업데이트
|
||||
|
||||
- 2024-12-18 Deepdoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
|
||||
- 2025-02-28 인터넷 검색(TAVILY)과 결합되어 모든 LLM에 대한 심층 연구를 지원합니다.
|
||||
- 2025-02-05 'SILICONFLOW' 모델 목록을 업데이트하고 Deepseek-R1/DeepSeek-V3에 대한 지원을 추가합니다.
|
||||
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
||||
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
- 2024-12-04 지식베이스에 대한 페이지랭크 점수를 지원합니다.
|
||||
|
||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||
|
||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
||||
|
||||
|
||||
## 🎉 계속 지켜봐 주세요
|
||||
|
||||
⭐️우리의 저장소를 즐겨찾기에 등록하여 흥미로운 새로운 기능과 업데이트를 최신 상태로 유지하세요! 모든 새로운 릴리스에 대한 즉시 알림을 받으세요! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🌟 주요 기능
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
|
||||
- [심층 문서 이해](./deepdoc/README.md)를 기반으로 복잡한 형식의 비정형 데이터에서 지식을 추출합니다.
|
||||
- 문자 그대로 무한한 토큰에서 "데이터 속의 바늘"을 찾아냅니다.
|
||||
|
||||
### 🍱 **템플릿 기반의 chunking**
|
||||
|
||||
- 똑똑하고 설명 가능한 방식.
|
||||
- 다양한 템플릿 옵션을 제공합니다.
|
||||
|
||||
|
||||
### 🌱 **할루시네이션을 줄인 신뢰할 수 있는 인용**
|
||||
|
||||
- 텍스트 청킹을 시각화하여 사용자가 개입할 수 있도록 합니다.
|
||||
- 중요한 참고 자료와 추적 가능한 인용을 빠르게 확인하여 신뢰할 수 있는 답변을 지원합니다.
|
||||
|
||||
|
||||
### 🍔 **다른 종류의 데이터 소스와의 호환성**
|
||||
|
||||
- 워드, 슬라이드, 엑셀, 텍스트 파일, 이미지, 스캔본, 구조화된 데이터, 웹 페이지 등을 지원합니다.
|
||||
|
||||
### 🛀 **자동화되고 손쉬운 RAG 워크플로우**
|
||||
|
||||
- 개인 및 대규모 비즈니스에 맞춘 효율적인 RAG 오케스트레이션.
|
||||
- 구성 가능한 LLM 및 임베딩 모델.
|
||||
- 다중 검색과 결합된 re-ranking.
|
||||
- 비즈니스와 원활하게 통합할 수 있는 직관적인 API.
|
||||
|
||||
|
||||
## 🔎 시스템 아키텍처
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
@ -109,17 +108,19 @@
|
||||
</div>
|
||||
|
||||
## 🎬 시작하기
|
||||
|
||||
### 📝 사전 준비 사항
|
||||
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치]((https://docs.docker.com/engine/install/))를 참조하세요.
|
||||
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||
|
||||
### 🚀 서버 시작하기
|
||||
|
||||
1. `vm.max_map_count`가 262144 이상인지 확인하세요:
|
||||
|
||||
> `vm.max_map_count`의 값을 아래 명령어를 통해 확인하세요:
|
||||
>
|
||||
> ```bash
|
||||
@ -147,21 +148,25 @@
|
||||
|
||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.15.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.15.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.14.1을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1로 설정합니다.
|
||||
> [!CAUTION]
|
||||
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.17.1-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.17.1-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.17.1을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1로 설정합니다.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
1. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -170,22 +175,21 @@
|
||||
_다음 출력 결과로 시스템이 성공적으로 시작되었음을 확인합니다:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||
|
||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
2. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
|
||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||
|
||||
_이제 쇼가 시작됩니다!_
|
||||
@ -207,24 +211,26 @@
|
||||
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
||||
|
||||
RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및 벡터를 저장합니다. [Infinity]로 전환(https://github.com/infiniflow/infinity/), 다음 절차를 따르십시오.
|
||||
|
||||
1. 실행 중인 모든 컨테이너를 중지합니다.
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 는 docker 컨테이너의 볼륨을 삭제하고 기존 데이터를 지우며, 이 작업은 컨테이너를 중지하는 것과 동일합니다.
|
||||
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||
3. 컨테이너 부팅:
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||
|
||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함하지 않음)
|
||||
|
||||
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
||||
@ -247,53 +253,58 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||
|
||||
1. Poetry를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
1. uv를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
|
||||
```bash
|
||||
pipx install poetry
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Docker Compose를 사용하여 의존 서비스(MinIO, Elasticsearch, Redis 및 MySQL)를 시작합니다:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
```
|
||||
|
||||
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
||||
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 백엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 프론트엔드 의존성을 설치합니다:
|
||||
6. 프론트엔드 의존성을 설치합니다:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
7. 프론트엔드 서비스를 시작합니다:
|
||||
npm install
|
||||
```
|
||||
7. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_다음 인터페이스는 시스템이 성공적으로 시작되었음을 나타냅니다:_
|
||||
_다음 인터페이스는 시스템이 성공적으로 시작되었음을 나타냅니다:_
|
||||
|
||||

|
||||
|
||||
@ -306,7 +317,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 📜 로드맵
|
||||
|
||||
[RAGFlow 로드맵 2024](https://github.com/infiniflow/ragflow/issues/162)을 확인하세요.
|
||||
[RAGFlow 로드맵 2025](https://github.com/infiniflow/ragflow/issues/4214)을 확인하세요.
|
||||
|
||||
## 🏄 커뮤니티
|
||||
|
||||
|
||||
356
README_pt_br.md
Normal file
356
README_pt_br.md
Normal file
@ -0,0 +1,356 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="seguir no X(Twitter)">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="licença">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Documentação</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 Índice</b></summary>
|
||||
|
||||
- 💡 [O que é o RAGFlow?](#-o-que-é-o-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
- 📌 [Últimas Atualizações](#-últimas-atualizações)
|
||||
- 🌟 [Principais Funcionalidades](#-principais-funcionalidades)
|
||||
- 🔎 [Arquitetura do Sistema](#-arquitetura-do-sistema)
|
||||
- 🎬 [Primeiros Passos](#-primeiros-passos)
|
||||
- 🔧 [Configurações](#-configurações)
|
||||
- 🔧 [Construir uma imagem docker sem incorporar modelos](#-construir-uma-imagem-docker-sem-incorporar-modelos)
|
||||
- 🔧 [Construir uma imagem docker incluindo modelos](#-construir-uma-imagem-docker-incluindo-modelos)
|
||||
- 🔨 [Lançar serviço a partir do código-fonte para desenvolvimento](#-lançar-serviço-a-partir-do-código-fonte-para-desenvolvimento)
|
||||
- 📚 [Documentação](#-documentação)
|
||||
- 📜 [Roadmap](#-roadmap)
|
||||
- 🏄 [Comunidade](#-comunidade)
|
||||
- 🙌 [Contribuindo](#-contribuindo)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 O que é o RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) é um mecanismo RAG (Geração Aumentada por Recuperação) de código aberto baseado em entendimento profundo de documentos. Ele oferece um fluxo de trabalho RAG simplificado para empresas de qualquer porte, combinando LLMs (Modelos de Linguagem de Grande Escala) para fornecer capacidades de perguntas e respostas verídicas, respaldadas por citações bem fundamentadas de diversos dados complexos formatados.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 Últimas Atualizações
|
||||
|
||||
- 28/02/2025 combinado com a pesquisa na Internet (T AVI LY), suporta pesquisas profundas para qualquer LLM.
|
||||
- 05-02-2025 Atualiza a lista de modelos de 'SILICONFLOW' e adiciona suporte para Deepseek-R1/DeepSeek-V3.
|
||||
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||
- 04-12-2024 Adiciona suporte para pontuação de pagerank na base de conhecimento.
|
||||
- 22-11-2024 Adiciona mais variáveis para o Agente.
|
||||
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
||||
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
||||
|
||||
## 🎉 Fique Ligado
|
||||
|
||||
⭐️ Dê uma estrela no nosso repositório para se manter atualizado com novas funcionalidades e melhorias empolgantes! Receba notificações instantâneas sobre novos lançamentos! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🌟 Principais Funcionalidades
|
||||
|
||||
### 🍭 **"Qualidade entra, qualidade sai"**
|
||||
|
||||
- Extração de conhecimento baseada em [entendimento profundo de documentos](./deepdoc/README.md) a partir de dados não estruturados com formatos complicados.
|
||||
- Encontra a "agulha no palheiro de dados" de literalmente tokens ilimitados.
|
||||
|
||||
### 🍱 **Fragmentação baseada em templates**
|
||||
|
||||
- Inteligente e explicável.
|
||||
- Muitas opções de templates para escolher.
|
||||
|
||||
### 🌱 **Citações fundamentadas com menos alucinações**
|
||||
|
||||
- Visualização da fragmentação de texto para permitir intervenção humana.
|
||||
- Visualização rápida das referências chave e citações rastreáveis para apoiar respostas fundamentadas.
|
||||
|
||||
### 🍔 **Compatibilidade com fontes de dados heterogêneas**
|
||||
|
||||
- Suporta Word, apresentações, excel, txt, imagens, cópias digitalizadas, dados estruturados, páginas da web e mais.
|
||||
|
||||
### 🛀 **Fluxo de trabalho RAG automatizado e sem esforço**
|
||||
|
||||
- Orquestração RAG simplificada voltada tanto para negócios pessoais quanto grandes empresas.
|
||||
- Modelos LLM e de incorporação configuráveis.
|
||||
- Múltiplas recuperações emparelhadas com reclassificação fundida.
|
||||
- APIs intuitivas para integração sem problemas com os negócios.
|
||||
|
||||
## 🔎 Arquitetura do Sistema
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 Primeiros Passos
|
||||
|
||||
### 📝 Pré-requisitos
|
||||
|
||||
- CPU >= 4 núcleos
|
||||
- RAM >= 16 GB
|
||||
- Disco >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Iniciar o servidor
|
||||
|
||||
1. Certifique-se de que `vm.max_map_count` >= 262144:
|
||||
|
||||
> Para verificar o valor de `vm.max_map_count`:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> Se necessário, redefina `vm.max_map_count` para um valor de pelo menos 262144:
|
||||
>
|
||||
> ```bash
|
||||
> # Neste caso, defina para 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> Essa mudança será resetada após a reinicialização do sistema. Para garantir que a alteração permaneça permanente, adicione ou atualize o valor de `vm.max_map_count` em **/etc/sysctl.conf**:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. Clone o repositório:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
||||
|
||||
> [!CAUTION]
|
||||
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||
|
||||
> O comando abaixo baixa a edição `v0.17.1-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.17.1-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1` para a edição completa `v0.17.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
||||
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
||||
| v0.17.1 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.17.1-slim | ~2 | ❌ | Lançamento estável |
|
||||
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
||||
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
||||
|
||||
4. Verifique o status do servidor após tê-lo iniciado:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Rodando em todos os endereços (0.0.0.0)
|
||||
```
|
||||
|
||||
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||
|
||||
5. No seu navegador, insira o endereço IP do seu servidor e faça login no RAGFlow.
|
||||
|
||||
> Com as configurações padrão, você só precisa digitar `http://IP_DO_SEU_MÁQUINA` (**sem** o número da porta), pois a porta HTTP padrão `80` pode ser omitida ao usar as configurações padrão.
|
||||
|
||||
6. Em [service_conf.yaml.template](./docker/service_conf.yaml.template), selecione a fábrica LLM desejada em `user_default_llm` e atualize o campo `API_KEY` com a chave de API correspondente.
|
||||
|
||||
> Consulte [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) para mais informações.
|
||||
|
||||
_O show está no ar!_
|
||||
|
||||
## 🔧 Configurações
|
||||
|
||||
Quando se trata de configurações do sistema, você precisará gerenciar os seguintes arquivos:
|
||||
|
||||
- [.env](./docker/.env): Contém as configurações fundamentais para o sistema, como `SVR_HTTP_PORT`, `MYSQL_PASSWORD` e `MINIO_PASSWORD`.
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Configura os serviços de back-end. As variáveis de ambiente neste arquivo serão automaticamente preenchidas quando o contêiner Docker for iniciado. Quaisquer variáveis de ambiente definidas dentro do contêiner Docker estarão disponíveis para uso, permitindo personalizar o comportamento do serviço com base no ambiente de implantação.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): O sistema depende do [docker-compose.yml](./docker/docker-compose.yml) para iniciar.
|
||||
|
||||
> O arquivo [./docker/README](./docker/README.md) fornece uma descrição detalhada das configurações do ambiente e dos serviços, que podem ser usadas como `${ENV_VARS}` no arquivo [service_conf.yaml.template](./docker/service_conf.yaml.template).
|
||||
|
||||
Para atualizar a porta HTTP de serviço padrão (80), vá até [docker-compose.yml](./docker/docker-compose.yml) e altere `80:80` para `<SUA_PORTA_DE_SERVIÇO>:80`.
|
||||
|
||||
Atualizações nas configurações acima exigem um reinício de todos os contêineres para que tenham efeito:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Mudar o mecanismo de documentos de Elasticsearch para Infinity
|
||||
|
||||
O RAGFlow usa o Elasticsearch por padrão para armazenar texto completo e vetores. Para mudar para o [Infinity](https://github.com/infiniflow/infinity/), siga estas etapas:
|
||||
|
||||
1. Pare todos os contêineres em execução:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` irá deletar os volumes do contêiner, e os dados existentes serão apagados.
|
||||
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
||||
|
||||
3. Inicie os contêineres:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!ATENÇÃO]
|
||||
> A mudança para o Infinity em uma máquina Linux/arm64 ainda não é oficialmente suportada.
|
||||
|
||||
## 🔧 Criar uma imagem Docker sem modelos de incorporação
|
||||
|
||||
Esta imagem tem cerca de 2 GB de tamanho e depende de serviços externos de LLM e incorporação.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Criar uma imagem Docker incluindo modelos de incorporação
|
||||
|
||||
Esta imagem tem cerca de 9 GB de tamanho. Como inclui modelos de incorporação, depende apenas de serviços externos de LLM.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
||||
|
||||
1. Instale o `uv`, ou pule esta etapa se ele já estiver instalado:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. Clone o código-fonte e instale as dependências Python:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # instala os módulos Python dependentes do RAGFlow
|
||||
```
|
||||
|
||||
3. Inicie os serviços dependentes (MinIO, Elasticsearch, Redis e MySQL) usando Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Adicione a seguinte linha ao arquivo `/etc/hosts` para resolver todos os hosts especificados em **docker/.env** para `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
|
||||
4. Se não conseguir acessar o HuggingFace, defina a variável de ambiente `HF_ENDPOINT` para usar um site espelho:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Lance o serviço de back-end:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instale as dependências do front-end:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
7. Lance o serviço de front-end:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
||||
|
||||

|
||||
|
||||
## 📚 Documentação
|
||||
|
||||
- [Início rápido](https://ragflow.io/docs/dev/)
|
||||
- [Guia do usuário](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Referências](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
Veja o [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Comunidade
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Contribuindo
|
||||
|
||||
O RAGFlow prospera por meio da colaboração de código aberto. Com esse espírito, abraçamos contribuições diversas da comunidade.
|
||||
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](./CONTRIBUTING.md).
|
||||
356
README_tzh.md
Normal file
356
README_tzh.md
Normal file
@ -0,0 +1,356 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="follow on X(Twitter)">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
## 💡 RAGFlow 是什麼?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款基於深度文件理解所建構的開源 RAG(Retrieval-Augmented Generation)引擎。 RAGFlow 可以為各種規模的企業及個人提供一套精簡的 RAG 工作流程,結合大語言模型(LLM)針對用戶各類不同的複雜格式數據提供可靠的問答以及有理有據的引用。
|
||||
|
||||
## 🎮 Demo 試用
|
||||
|
||||
請登入網址 [https://demo.ragflow.io](https://demo.ragflow.io) 試用 demo。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-02-28 結合網路搜尋(Tavily),對於任意大模型實現類似 Deep Research 的推理功能.
|
||||
- 2025-02-05 更新「SILICONFLOW」的型號清單並新增 Deepseek-R1/DeepSeek-V3 的支援。
|
||||
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
||||
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||
- 2024-12-04 支援知識庫的 Pagerank 分數。
|
||||
- 2024-11-22 完善了 Agent 中的變數定義和使用。
|
||||
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
||||
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
||||
|
||||
## 🎉 關注項目
|
||||
|
||||
⭐️ 點擊右上角的 Star 追蹤 RAGFlow,可以取得最新發布的即時通知 !🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🌟 主要功能
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
|
||||
- 基於[深度文件理解](./deepdoc/README.md),能夠從各類複雜格式的非結構化資料中提取真知灼見。
|
||||
- 真正在無限上下文(token)的場景下快速完成大海撈針測試。
|
||||
|
||||
### 🍱 **基於模板的文字切片**
|
||||
|
||||
- 不只是智能,更重要的是可控可解釋。
|
||||
- 多種文字範本可供選擇
|
||||
|
||||
### 🌱 **有理有據、最大程度降低幻覺(hallucination)**
|
||||
|
||||
- 文字切片過程視覺化,支援手動調整。
|
||||
- 有理有據:答案提供關鍵引用的快照並支持追根溯源。
|
||||
|
||||
### 🍔 **相容各類異質資料來源**
|
||||
|
||||
- 支援豐富的文件類型,包括 Word 文件、PPT、excel 表格、txt 檔案、圖片、PDF、影印件、影印件、結構化資料、網頁等。
|
||||
|
||||
### 🛀 **全程無憂、自動化的 RAG 工作流程**
|
||||
|
||||
- 全面優化的 RAG 工作流程可以支援從個人應用乃至超大型企業的各類生態系統。
|
||||
- 大語言模型 LLM 以及向量模型皆支援配置。
|
||||
- 基於多路召回、融合重排序。
|
||||
- 提供易用的 API,可輕鬆整合到各類企業系統。
|
||||
|
||||
## 🔎 系統架構
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 快速開始
|
||||
|
||||
### 📝 前提條件
|
||||
|
||||
- CPU >= 4 核
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||
|
||||
### 🚀 啟動伺服器
|
||||
|
||||
1. 確保 `vm.max_map_count` 不小於 262144:
|
||||
|
||||
> 如需確認 `vm.max_map_count` 的大小:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> 如果 `vm.max_map_count` 的值小於 262144,可以進行重設:
|
||||
>
|
||||
> ```bash
|
||||
> # 這裡我們設為 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> 你的改動會在下次系統重新啟動時被重置。如果希望做永久改動,還需要在 **/etc/sysctl.conf** 檔案裡把 `vm.max_map_count` 的值再相應更新一遍:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. 克隆倉庫:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
||||
|
||||
> [!CAUTION]
|
||||
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.17.1-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.17.1-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1` 來下載 RAGFlow 鏡像的 `v0.17.1` 完整發行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
> [!TIP]
|
||||
> 如果你遇到 Docker 映像檔拉不下來的問題,可以在 **docker/.env** 檔案內根據變數 `RAGFLOW_IMAGE` 的註解提示選擇華為雲或阿里雲的對應映像。
|
||||
>
|
||||
> - 華為雲鏡像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||
> - 阿里雲鏡像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||
|
||||
4. 伺服器啟動成功後再次確認伺服器狀態:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_出現以下介面提示說明伺服器啟動成功:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
```
|
||||
|
||||
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||
|
||||
5. 在你的瀏覽器中輸入你的伺服器對應的 IP 位址並登入 RAGFlow。
|
||||
> 上面這個範例中,您只需輸入 http://IP_OF_YOUR_MACHINE 即可:未改動過設定則無需輸入連接埠(預設的 HTTP 服務連接埠 80)。
|
||||
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 檔案的 `user_default_llm` 欄位設定 LLM factory,並在 `API_KEY` 欄填入和你選擇的大模型相對應的 API key。
|
||||
|
||||
> 詳見 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||
|
||||
_好戲開始,接著奏樂接著舞! _
|
||||
|
||||
## 🔧 系統配置
|
||||
|
||||
系統配置涉及以下三份文件:
|
||||
|
||||
- [.env](./docker/.env):存放一些系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template):設定各類別後台服務。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 系統依賴該檔案完成啟動。
|
||||
|
||||
請務必確保 [.env](./docker/.env) 檔案中的變數設定與 [service_conf.yaml.template](./docker/service_conf.yaml.template) 檔案中的設定保持一致!
|
||||
|
||||
如果無法存取映像網站 hub.docker.com 或模型網站 huggingface.co,請依照 [.env](./docker/.env) 註解修改 `RAGFLOW_IMAGE` 和 `HF_ENDPOINT`。
|
||||
|
||||
> [./docker/README](./docker/README.md) 解釋了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的環境變數設定和服務配置。
|
||||
|
||||
如需更新預設的 HTTP 服務連接埠(80), 可以在[docker-compose.yml](./docker/docker-compose.yml) 檔案中將配置`80:80` 改為`<YOUR_SERVING_PORT>:80` 。
|
||||
|
||||
> 所有系統配置都需要透過系統重新啟動生效:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
###把文檔引擎從 Elasticsearch 切換成為 Infinity
|
||||
|
||||
RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換為 [Infinity](https://github.com/infiniflow/infinity/), 可以按照下面步驟進行:
|
||||
|
||||
1. 停止所有容器運作:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 將會刪除 docker 容器的 volumes,已有的資料會被清空。
|
||||
|
||||
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
||||
|
||||
3. 啟動容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> Infinity 目前官方並未正式支援在 Linux/arm64 架構下的機器上運行.
|
||||
|
||||
## 🔧 原始碼編譯 Docker 映像(不含 embedding 模型)
|
||||
|
||||
本 Docker 映像大小約 2 GB 左右並且依賴外部的大模型和 embedding 服務。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 原始碼編譯 Docker 映像(包含 embedding 模型)
|
||||
|
||||
本 Docker 大小約 9 GB 左右。由於已包含 embedding 模型,所以只需依賴外部的大模型服務即可。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 以原始碼啟動服務
|
||||
|
||||
1. 安裝 uv。如已安裝,可跳過此步驟:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
2. 下載原始碼並安裝 Python 依賴:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. 透過 Docker Compose 啟動依賴的服務(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
在 `/etc/hosts` 中加入以下程式碼,將 **conf/service_conf.yaml** 檔案中的所有 host 位址都解析為 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
|
||||
4. 如果無法存取 HuggingFace,可以把環境變數 `HF_ENDPOINT` 設為對應的鏡像網站:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5.啟動後端服務:
|
||||
『`bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
|
||||
```
|
||||
|
||||
6. 安裝前端依賴:
|
||||
『`bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
7. 啟動前端服務:
|
||||
『`bash
|
||||
npm run dev
|
||||
|
||||
```
|
||||
|
||||
以下界面說明系統已成功啟動:_
|
||||
|
||||

|
||||
```
|
||||
|
||||
## 📚 技術文檔
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 路線圖
|
||||
|
||||
詳見 [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214) 。
|
||||
|
||||
## 🏄 開源社群
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 貢獻指南
|
||||
|
||||
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](./CONTRIBUTING.md) 。
|
||||
|
||||
## 🤝 商務合作
|
||||
|
||||
- [預約諮詢](https://aao615odquw.feishu.cn/share/base/form/shrcnjw7QleretCLqh1nuPo1xxh)
|
||||
|
||||
## 👥 加入社區
|
||||
|
||||
掃二維碼加入 RAGFlow 小助手,進 RAGFlow 交流群。
|
||||
|
||||
<p align="center">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/bccf284f-46f2-4445-9809-8f1030fb7585" width=50% height=50%>
|
||||
</p>
|
||||
112
README_zh.md
112
README_zh.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,10 +32,9 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -46,28 +47,31 @@
|
||||
## 🎮 Demo 试用
|
||||
|
||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2024-12-18 升级了 Deepdoc 的文档布局分析模型。
|
||||
- 2025-02-28 结合互联网搜索(Tavily),对于任意大模型实现类似 Deep Research 的推理功能.
|
||||
- 2025-02-05 更新硅基流动的模型列表,增加了对 Deepseek-R1/DeepSeek-V3 的支持。
|
||||
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
||||
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||
- 2024-12-04 支持知识库的 Pagerank 分数。
|
||||
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
||||
|
||||
## 🎉 关注项目
|
||||
⭐️点击右上角的 Star 关注RAGFlow,可以获取最新发布的实时通知 !🌟
|
||||
|
||||
⭐️ 点击右上角的 Star 关注 RAGFlow,可以获取最新发布的实时通知 !🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🌟 主要功能
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
@ -143,22 +147,27 @@
|
||||
|
||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.15.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.15.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1` 来下载 RAGFlow 镜像的 `v0.14.1` 完整发行版。
|
||||
> [!CAUTION]
|
||||
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.17.1-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.17.1-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1` 来下载 RAGFlow 镜像的 `v0.17.1` 完整发行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
> [!TIP]
|
||||
> [!TIP]
|
||||
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
||||
>
|
||||
> - 华为云镜像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||
> - 阿里云镜像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||
|
||||
@ -171,18 +180,16 @@
|
||||
_出现以下界面提示说明服务器启动成功:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||
|
||||
> 如果您在没有看到上面的提示信息出来之前,就尝试登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`。
|
||||
|
||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
@ -211,7 +218,7 @@
|
||||
> 所有系统配置都需要通过系统重启生效:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
||||
@ -223,19 +230,19 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 将会删除 docker 容器的 volumes,已有的数据会被清空。
|
||||
|
||||
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
||||
|
||||
3. 启动容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> [!WARNING]
|
||||
> Infinity 目前官方并未正式支持在 Linux/arm64 架构下的机器上运行.
|
||||
|
||||
|
||||
## 🔧 源码编译 Docker 镜像(不含 embedding 模型)
|
||||
|
||||
本 Docker 镜像大小约 2 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||
@ -258,55 +265,59 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
|
||||
## 🔨 以源代码启动服务
|
||||
|
||||
1. 安装 Poetry。如已经安装,可跳过本步骤:
|
||||
1. 安装 uv。如已经安装,可跳过本步骤:
|
||||
|
||||
```bash
|
||||
pipx install poetry
|
||||
pipx inject poetry poetry-plugin-pypi-mirror
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
export POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
pipx install uv
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
2. 下载源代码并安装 Python 依赖:
|
||||
2. 下载源代码并安装 Python 依赖:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
在 `/etc/hosts` 中添加以下代码,将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
在 `/etc/hosts` 中添加以下代码,将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
```
|
||||
|
||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||
|
||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 启动后端服务:
|
||||
5. 启动后端服务:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 安装前端依赖:
|
||||
6. 安装前端依赖:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
7. 启动前端服务:
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
npm install
|
||||
```
|
||||
7. 启动前端服务:
|
||||
|
||||
_以下界面说明系统已经成功启动:_
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_以下界面说明系统已经成功启动:_
|
||||
|
||||

|
||||
|
||||
@ -319,7 +330,7 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
|
||||
## 📜 路线图
|
||||
|
||||
详见 [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162) 。
|
||||
详见 [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214) 。
|
||||
|
||||
## 🏄 开源社区
|
||||
|
||||
@ -342,4 +353,3 @@ RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们
|
||||
<p align="center">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/bccf284f-46f2-4445-9809-8f1030fb7585" width=50% height=50%>
|
||||
</p>
|
||||
|
||||
|
||||
@ -1,2 +1,18 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
|
||||
@ -15,14 +15,16 @@
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
from abc import ABC
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent.component import component_class
|
||||
from agent.component.base import ComponentBase
|
||||
|
||||
|
||||
class Canvas(ABC):
|
||||
class Canvas:
|
||||
"""
|
||||
dsl = {
|
||||
"components": {
|
||||
@ -83,7 +85,8 @@ class Canvas(ABC):
|
||||
}
|
||||
},
|
||||
"downstream": [],
|
||||
"upstream": []
|
||||
"upstream": [],
|
||||
"parent_id": ""
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
@ -158,7 +161,7 @@ class Canvas(ABC):
|
||||
self.components[k]["obj"].reset()
|
||||
self._embed_id = ""
|
||||
|
||||
def get_compnent_name(self, cid):
|
||||
def get_component_name(self, cid):
|
||||
for n in self.dsl["graph"]["nodes"]:
|
||||
if cid == n["id"]:
|
||||
return n["data"]["name"]
|
||||
@ -206,7 +209,15 @@ class Canvas(ABC):
|
||||
if c not in waiting:
|
||||
waiting.append(c)
|
||||
continue
|
||||
yield "*'{}'* is running...🕞".format(self.get_compnent_name(c))
|
||||
yield "*'{}'* is running...🕞".format(self.get_component_name(c))
|
||||
|
||||
if cpn.component_name.lower() == "iteration":
|
||||
st_cpn = cpn.get_start()
|
||||
assert st_cpn, "Start component not found for Iteration."
|
||||
if not st_cpn["obj"].end():
|
||||
cpn = st_cpn["obj"]
|
||||
c = cpn._id
|
||||
|
||||
try:
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
except Exception as e:
|
||||
@ -215,16 +226,26 @@ class Canvas(ABC):
|
||||
ran += 1
|
||||
raise e
|
||||
self.path[-1].append(c)
|
||||
|
||||
ran += 1
|
||||
|
||||
for m in prepare2run(self.components[self.path[-2][-1]]["downstream"]):
|
||||
downstream = self.components[self.path[-2][-1]]["downstream"]
|
||||
if not downstream and self.components[self.path[-2][-1]].get("parent_id"):
|
||||
cid = self.path[-2][-1]
|
||||
pid = self.components[cid]["parent_id"]
|
||||
o, _ = self.components[cid]["obj"].output(allow_partial=False)
|
||||
oo, _ = self.components[pid]["obj"].output(allow_partial=False)
|
||||
self.components[pid]["obj"].set(pd.concat([oo, o], ignore_index=True))
|
||||
downstream = [pid]
|
||||
|
||||
for m in prepare2run(downstream):
|
||||
yield {"content": m, "running_status": True}
|
||||
|
||||
while 0 <= ran < len(self.path[-1]):
|
||||
logging.debug(f"Canvas.run: {ran} {self.path}")
|
||||
cpn_id = self.path[-1][ran]
|
||||
cpn = self.get_component(cpn_id)
|
||||
if not cpn["downstream"]:
|
||||
if not any([cpn["downstream"], cpn.get("parent_id"), waiting]):
|
||||
break
|
||||
|
||||
loop = self._find_loop()
|
||||
@ -239,7 +260,15 @@ class Canvas(ABC):
|
||||
yield {"content": m, "running_status": True}
|
||||
continue
|
||||
|
||||
for m in prepare2run(cpn["downstream"]):
|
||||
downstream = cpn["downstream"]
|
||||
if not downstream and cpn.get("parent_id"):
|
||||
pid = cpn["parent_id"]
|
||||
_, o = cpn["obj"].output(allow_partial=False)
|
||||
_, oo = self.components[pid]["obj"].output(allow_partial=False)
|
||||
self.components[pid]["obj"].set_output(pd.concat([oo.dropna(axis=1), o.dropna(axis=1)], ignore_index=True))
|
||||
downstream = [pid]
|
||||
|
||||
for m in prepare2run(downstream):
|
||||
yield {"content": m, "running_status": True}
|
||||
|
||||
if ran >= len(self.path[-1]) and waiting:
|
||||
@ -247,6 +276,7 @@ class Canvas(ABC):
|
||||
waiting = []
|
||||
for m in prepare2run(without_dependent_checking):
|
||||
yield {"content": m, "running_status": True}
|
||||
without_dependent_checking = []
|
||||
ran -= 1
|
||||
|
||||
if self.answer:
|
||||
@ -294,7 +324,7 @@ class Canvas(ABC):
|
||||
return False
|
||||
|
||||
for i in range(len(path)):
|
||||
if path[i].lower().find("answer") >= 0:
|
||||
if path[i].lower().find("answer") == 0 or path[i].lower().find("iterationitem") == 0:
|
||||
path = path[:i]
|
||||
break
|
||||
|
||||
|
||||
@ -1,3 +1,19 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import importlib
|
||||
from .begin import Begin, BeginParam
|
||||
from .generate import Generate, GenerateParam
|
||||
@ -32,7 +48,8 @@ from .crawler import Crawler, CrawlerParam
|
||||
from .invoke import Invoke, InvokeParam
|
||||
from .template import Template, TemplateParam
|
||||
from .email import Email, EmailParam
|
||||
|
||||
from .iteration import Iteration, IterationParam
|
||||
from .iterationitem import IterationItem, IterationItemParam
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
@ -40,6 +57,7 @@ def component_class(class_name):
|
||||
c = getattr(m, class_name)
|
||||
return c
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Begin",
|
||||
"BeginParam",
|
||||
@ -103,6 +121,10 @@ __all__ = [
|
||||
"CrawlerParam",
|
||||
"Invoke",
|
||||
"InvokeParam",
|
||||
"Iteration",
|
||||
"IterationParam",
|
||||
"IterationItem",
|
||||
"IterationItemParam",
|
||||
"Template",
|
||||
"TemplateParam",
|
||||
"Email",
|
||||
|
||||
@ -1,56 +1,56 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import akshare as ak
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class AkShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the AkShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class AkShare(ComponentBase, ABC):
|
||||
component_name = "AkShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return AkShare.be_output("")
|
||||
|
||||
try:
|
||||
ak_res = []
|
||||
stock_news_em_df = ak.stock_news_em(symbol=ans)
|
||||
stock_news_em_df = stock_news_em_df.head(self._param.top_n)
|
||||
ak_res = [{"content": '<a href="' + i["新闻链接"] + '">' + i["新闻标题"] + '</a>\n 新闻内容: ' + i[
|
||||
"新闻内容"] + " \n发布时间:" + i["发布时间"] + " \n文章来源: " + i["文章来源"]} for index, i in stock_news_em_df.iterrows()]
|
||||
except Exception as e:
|
||||
return AkShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not ak_res:
|
||||
return AkShare.be_output("")
|
||||
|
||||
return pd.DataFrame(ak_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class AkShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the AkShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class AkShare(ComponentBase, ABC):
|
||||
component_name = "AkShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
import akshare as ak
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return AkShare.be_output("")
|
||||
|
||||
try:
|
||||
ak_res = []
|
||||
stock_news_em_df = ak.stock_news_em(symbol=ans)
|
||||
stock_news_em_df = stock_news_em_df.head(self._param.top_n)
|
||||
ak_res = [{"content": '<a href="' + i["新闻链接"] + '">' + i["新闻标题"] + '</a>\n 新闻内容: ' + i[
|
||||
"新闻内容"] + " \n发布时间:" + i["发布时间"] + " \n文章来源: " + i["文章来源"]} for index, i in stock_news_em_df.iterrows()]
|
||||
except Exception as e:
|
||||
return AkShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not ak_res:
|
||||
return AkShare.be_output("")
|
||||
|
||||
return pd.DataFrame(ak_res)
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
from typing import Tuple, Union
|
||||
|
||||
import pandas as pd
|
||||
|
||||
@ -76,4 +77,13 @@ class Answer(ComponentBase, ABC):
|
||||
def set_exception(self, e):
|
||||
self.exception = e
|
||||
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
if allow_partial:
|
||||
return super.output()
|
||||
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
return self._param.output_var_name, pd.DataFrame([{"content": c}])
|
||||
|
||||
self._param.output_var_name, pd.DataFrame([])
|
||||
|
||||
|
||||
@ -44,7 +44,7 @@ class Baidu(ComponentBase, ABC):
|
||||
return Baidu.be_output("")
|
||||
|
||||
try:
|
||||
url = 'https://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
url = 'http://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||
response = requests.get(url=url, headers=headers)
|
||||
|
||||
@ -39,9 +39,6 @@ class BaiduFanyiParam(ComponentParamBase):
|
||||
self.check_empty(self.appid, "BaiduFanyi APPID")
|
||||
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
||||
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
||||
self.check_valid_value(self.trans_type, "Translate domain",
|
||||
['it', 'finance', 'machinery', 'senimed', 'novel', 'academic', 'aerospace', 'wiki',
|
||||
'news', 'law', 'contract'])
|
||||
self.check_valid_value(self.source_lang, "Source language",
|
||||
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||
@ -96,3 +93,4 @@ class BaiduFanyi(ComponentBase, ABC):
|
||||
|
||||
except Exception as e:
|
||||
BaiduFanyi.be_output("**Error**:" + str(e))
|
||||
|
||||
|
||||
@ -426,10 +426,14 @@ class ComponentBase(ABC):
|
||||
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
o = getattr(self._param, self._param.output_var_name)
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
if not isinstance(o, list):
|
||||
o = [o]
|
||||
o = pd.DataFrame(o)
|
||||
if not isinstance(o, partial):
|
||||
if not isinstance(o, pd.DataFrame):
|
||||
if isinstance(o, list):
|
||||
return self._param.output_var_name, pd.DataFrame(o)
|
||||
if o is None:
|
||||
return self._param.output_var_name, pd.DataFrame()
|
||||
return self._param.output_var_name, pd.DataFrame([{"content": str(o)}])
|
||||
return self._param.output_var_name, o
|
||||
|
||||
if allow_partial or not isinstance(o, partial):
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
@ -453,7 +457,7 @@ class ComponentBase(ABC):
|
||||
|
||||
def get_input(self):
|
||||
if self._param.debug_inputs:
|
||||
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs])
|
||||
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs if v.get("value")])
|
||||
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
@ -477,6 +481,15 @@ class ComponentBase(ABC):
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
if q["component_id"].lower().find("answer") == 0:
|
||||
txt = []
|
||||
for r, c in self._canvas.history[::-1][:self._param.message_history_window_size][::-1]:
|
||||
txt.append(f"{r.upper()}: {c}")
|
||||
txt = "\n".join(txt)
|
||||
self._param.inputs.append({"content": txt, "component_id": q["component_id"]})
|
||||
outs.append(pd.DataFrame([{"content": txt}]))
|
||||
continue
|
||||
|
||||
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": "\n".join(
|
||||
@ -542,7 +555,7 @@ class ComponentBase(ABC):
|
||||
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||
continue
|
||||
|
||||
eles.append({"name": self._canvas.get_compnent_name(cpn_id), "key": cpn_id})
|
||||
eles.append({"name": self._canvas.get_component_name(cpn_id), "key": cpn_id})
|
||||
else:
|
||||
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||
return eles
|
||||
@ -566,4 +579,8 @@ class ComponentBase(ABC):
|
||||
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
return self._run([], **kwargs)
|
||||
|
||||
def get_parent(self):
|
||||
pid = self._canvas.get_component(self._id)["parent_id"]
|
||||
return self._canvas.get_component(pid)["obj"]
|
||||
|
||||
@ -39,13 +39,13 @@ class CategorizeParam(GenerateParam):
|
||||
if not v.get("to"):
|
||||
raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
|
||||
def get_prompt(self):
|
||||
def get_prompt(self, chat_hist):
|
||||
cate_lines = []
|
||||
for c, desc in self.category_description.items():
|
||||
for line in desc.get("examples", "").split("\n"):
|
||||
if not line:
|
||||
continue
|
||||
cate_lines.append("Question: {}\tCategory: {}".format(line, c))
|
||||
cate_lines.append("USER: {}\nCategory: {}".format(line, c))
|
||||
descriptions = []
|
||||
for c, desc in self.category_description.items():
|
||||
if desc.get("description"):
|
||||
@ -62,11 +62,15 @@ class CategorizeParam(GenerateParam):
|
||||
{}
|
||||
You could learn from the above examples.
|
||||
Just mention the category names, no need for any additional words.
|
||||
|
||||
---- Real Data ----
|
||||
{}
|
||||
""".format(
|
||||
len(self.category_description.keys()),
|
||||
"/".join(list(self.category_description.keys())),
|
||||
"\n".join(descriptions),
|
||||
"- ".join(cate_lines)
|
||||
"- ".join(cate_lines),
|
||||
chat_hist
|
||||
)
|
||||
return self.prompt
|
||||
|
||||
@ -76,9 +80,9 @@ class Categorize(Generate, ABC):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = self.get_input()
|
||||
input = "Question: " + (list(input["content"])[-1] if "content" in input else "") + "\tCategory: "
|
||||
input = " - ".join(input["content"]) if "content" in input else ""
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
||||
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
||||
self._param.gen_conf())
|
||||
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||
for c in self._param.category_description.keys():
|
||||
@ -90,5 +94,5 @@ class Categorize(Generate, ABC):
|
||||
def debug(self, **kwargs):
|
||||
df = self._run([], **kwargs)
|
||||
cpn_id = df.iloc[0, 0]
|
||||
return Categorize.be_output(self._canvas.get_compnent_name(cpn_id))
|
||||
return Categorize.be_output(self._canvas.get_component_name(cpn_id))
|
||||
|
||||
|
||||
@ -1,36 +1,36 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class ConcentratorParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Concentrator component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Concentrator(ComponentBase, ABC):
|
||||
component_name = "Concentrator"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class ConcentratorParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Concentrator component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Concentrator(ComponentBase, ABC):
|
||||
component_name = "Concentrator"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
return Concentrator.be_output("")
|
||||
@ -41,7 +41,7 @@ class Crawler(ComponentBase, ABC):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not is_valid_url(ans):
|
||||
return Crawler.be_output("")
|
||||
return Crawler.be_output("URL not valid")
|
||||
try:
|
||||
result = asyncio.run(self.get_web(ans))
|
||||
|
||||
|
||||
@ -15,14 +15,17 @@
|
||||
#
|
||||
from abc import ABC
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
import pandas as pd
|
||||
import pymysql
|
||||
import psycopg2
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from agent.component import GenerateParam, Generate
|
||||
import pyodbc
|
||||
import logging
|
||||
|
||||
class ExeSQLParam(ComponentParamBase):
|
||||
|
||||
class ExeSQLParam(GenerateParam):
|
||||
"""
|
||||
Define the ExeSQL component parameters.
|
||||
"""
|
||||
@ -39,6 +42,7 @@ class ExeSQLParam(ComponentParamBase):
|
||||
self.top_n = 30
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb', 'mssql'])
|
||||
self.check_empty(self.database, "Database name")
|
||||
self.check_empty(self.username, "database username")
|
||||
@ -48,43 +52,33 @@ class ExeSQLParam(ComponentParamBase):
|
||||
self.check_positive_integer(self.top_n, "Number of records")
|
||||
if self.database == "rag_flow":
|
||||
if self.host == "ragflow-mysql":
|
||||
raise ValueError("The host is not accessible.")
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
if self.password == "infini_rag_flow":
|
||||
raise ValueError("The host is not accessible.")
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
|
||||
|
||||
class ExeSQL(ComponentBase, ABC):
|
||||
class ExeSQL(Generate, ABC):
|
||||
component_name = "ExeSQL"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
if self._loop >= self._param.loop:
|
||||
self._loop = 0
|
||||
raise Exception("Maximum loop time exceeds. Can't query the correct data via SQL statement.")
|
||||
self._loop += 1
|
||||
|
||||
ans = self.get_input()
|
||||
|
||||
|
||||
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||
if self._param.db_type == 'mssql':
|
||||
# improve the information extraction, most llm return results in markdown format ```sql query ```
|
||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||
if match:
|
||||
ans = match.group(1) # Query content
|
||||
print(ans)
|
||||
else:
|
||||
print("no markdown")
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', (ans), flags=re.IGNORECASE)
|
||||
def _refactor(self, ans):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||
if match:
|
||||
ans = match.group(1) # Query content
|
||||
return ans
|
||||
else:
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
||||
print("no markdown")
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', (ans), flags=re.IGNORECASE)
|
||||
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
||||
ans = re.sub(r';[^;]*$', r';', ans)
|
||||
if not ans:
|
||||
raise Exception("SQL statement not found!")
|
||||
return ans
|
||||
|
||||
logging.info("db_type: ",self._param.db_type)
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||
ans = self._refactor(ans)
|
||||
if self._param.db_type in ["mysql", "mariadb"]:
|
||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
@ -93,36 +87,68 @@ class ExeSQL(ComponentBase, ABC):
|
||||
port=self._param.port, password=self._param.password)
|
||||
elif self._param.db_type == 'mssql':
|
||||
conn_str = (
|
||||
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
||||
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
||||
r'DATABASE=' + self._param.database + ';'
|
||||
r'UID=' + self._param.username + ';'
|
||||
r'PWD=' + self._param.password
|
||||
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
||||
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
||||
r'DATABASE=' + self._param.database + ';'
|
||||
r'UID=' + self._param.username + ';'
|
||||
r'PWD=' + self._param.password
|
||||
)
|
||||
db = pyodbc.connect(conn_str)
|
||||
try:
|
||||
cursor = db.cursor()
|
||||
except Exception as e:
|
||||
raise Exception("Database Connection Failed! \n" + str(e))
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
self._loop += 1
|
||||
input_list = re.split(r';', ans.replace(r"\n", " "))
|
||||
sql_res = []
|
||||
for single_sql in re.split(r';', ans.replace(r"\n", " ")):
|
||||
if not single_sql:
|
||||
continue
|
||||
try:
|
||||
logging.info("single_sql: ",single_sql)
|
||||
cursor.execute(single_sql)
|
||||
if cursor.rowcount == 0:
|
||||
sql_res.append({"content": "\nTotal: 0\n No record in the database!"})
|
||||
continue
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in cursor.description]
|
||||
sql_res.append({"content": "\nTotal: " + str(cursor.rowcount) + "\n" + single_res.to_markdown()})
|
||||
except Exception as e:
|
||||
sql_res.append({"content": "**Error**:" + str(e) + "\nError SQL Statement:" + single_sql})
|
||||
pass
|
||||
for i in range(len(input_list)):
|
||||
single_sql = input_list[i]
|
||||
while self._loop <= self._param.loop:
|
||||
self._loop += 1
|
||||
if not single_sql:
|
||||
break
|
||||
try:
|
||||
cursor.execute(single_sql)
|
||||
if cursor.rowcount == 0:
|
||||
sql_res.append({"content": "No record in the database!"})
|
||||
break
|
||||
if self._param.db_type == 'mssql':
|
||||
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),
|
||||
columns=[desc[0] for desc in cursor.description])
|
||||
else:
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in cursor.description]
|
||||
sql_res.append({"content": single_res.to_markdown(index=False, floatfmt=".6f")})
|
||||
break
|
||||
except Exception as e:
|
||||
single_sql = self._regenerate_sql(single_sql, str(e), **kwargs)
|
||||
single_sql = self._refactor(single_sql)
|
||||
if self._loop > self._param.loop:
|
||||
sql_res.append({"content": "Can't query the correct data via SQL statement."})
|
||||
db.close()
|
||||
|
||||
if not sql_res:
|
||||
return ExeSQL.be_output("")
|
||||
|
||||
return pd.DataFrame(sql_res)
|
||||
|
||||
def _regenerate_sql(self, failed_sql, error_message, **kwargs):
|
||||
prompt = f'''
|
||||
## You are the Repair SQL Statement Helper, please modify the original SQL statement based on the SQL query error report.
|
||||
## The original SQL statement is as follows:{failed_sql}.
|
||||
## The contents of the SQL query error report is as follows:{error_message}.
|
||||
## Answer only the modified SQL statement. Please do not give any explanation, just answer the code.
|
||||
'''
|
||||
self._param.prompt = prompt
|
||||
kwargs_ = deepcopy(kwargs)
|
||||
kwargs_["stream"] = False
|
||||
response = Generate._run(self, [], **kwargs_)
|
||||
try:
|
||||
regenerated_sql = response.loc[0, "content"]
|
||||
return regenerated_sql
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to regenerate SQL: {e}")
|
||||
return None
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
|
||||
@ -18,10 +18,10 @@ from functools import partial
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.db.services.dialog_service import message_fit_in
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.prompts import message_fit_in
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
@ -69,10 +69,8 @@ class Generate(ComponentBase):
|
||||
component_name = "Generate"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs[1:] if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def set_cite(self, retrieval_res, answer):
|
||||
@ -110,10 +108,26 @@ class Generate(ComponentBase):
|
||||
return res
|
||||
|
||||
def get_input_elements(self):
|
||||
if self._param.parameters:
|
||||
return [{"key": "user", "name": "User"}, *self._param.parameters]
|
||||
|
||||
return [{"key": "user", "name": "User"}]
|
||||
key_set = set([])
|
||||
res = [{"key": "user", "name": "Input your question here:"}]
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.prompt, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
@ -121,22 +135,20 @@ class Generate(ComponentBase):
|
||||
|
||||
retrieval_res = []
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"):
|
||||
continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for para in self.get_input_elements()[1:]:
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
{"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
@ -152,8 +164,8 @@ class Generate(ComponentBase):
|
||||
else:
|
||||
if cpn.component_name.lower() == "retrieval":
|
||||
retrieval_res.append(out)
|
||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
kwargs[para["key"]] = " - " + "\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
|
||||
if retrieval_res:
|
||||
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||
@ -175,17 +187,18 @@ class Generate(ComponentBase):
|
||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
return pd.DataFrame([res])
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, ans)
|
||||
@ -196,18 +209,20 @@ class Generate(ComponentBase):
|
||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||
res = None
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if msg and msg[0]['role'] == 'assistant':
|
||||
msg.pop(0)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
@ -230,5 +245,6 @@ class Generate(ComponentBase):
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": kwargs.get("user", "")}], self._param.gen_conf())
|
||||
u = kwargs.get("user")
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": u if u else "Output: "}], self._param.gen_conf())
|
||||
return pd.DataFrame([ans])
|
||||
|
||||
@ -35,12 +35,14 @@ class InvokeParam(ComponentParamBase):
|
||||
self.url = ""
|
||||
self.timeout = 60
|
||||
self.clean_html = False
|
||||
self.datatype = "json" # New parameter to determine data posting type
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
||||
self.check_empty(self.url, "End point URL")
|
||||
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||
self.check_boolean(self.clean_html, "Clean HTML")
|
||||
self.check_valid_value(self.datatype.lower(), "Data post type", ['json', 'formdata']) # Check for valid datapost value
|
||||
|
||||
|
||||
class Invoke(ComponentBase, ABC):
|
||||
@ -50,14 +52,24 @@ class Invoke(ComponentBase, ABC):
|
||||
args = {}
|
||||
for para in self._param.variables:
|
||||
if para.get("component_id"):
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
args[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
args[para["key"]] = "\n".join(out["content"])
|
||||
if '@' in para["component_id"]:
|
||||
component = para["component_id"].split('@')[0]
|
||||
field = para["component_id"].split('@')[1]
|
||||
cpn = self._canvas.get_component(component)["obj"]
|
||||
for param in cpn._param.query:
|
||||
if param["key"] == field:
|
||||
if "value" in param:
|
||||
args[para["key"]] = param["value"]
|
||||
else:
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
args[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if not out.empty:
|
||||
args[para["key"]] = "\n".join(out["content"])
|
||||
else:
|
||||
args[para["key"]] = "\n".join(para["value"])
|
||||
args[para["key"]] = para["value"]
|
||||
|
||||
url = self._param.url.strip()
|
||||
if url.find("http") != 0:
|
||||
@ -84,22 +96,36 @@ class Invoke(ComponentBase, ABC):
|
||||
return Invoke.be_output(response.text)
|
||||
|
||||
if method == 'put':
|
||||
response = requests.put(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.put(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.put(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
return Invoke.be_output(response.text)
|
||||
|
||||
if method == 'post':
|
||||
response = requests.post(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.post(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.post(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
|
||||
45
agent/component/iteration.py
Normal file
45
agent/component/iteration.py
Normal file
@ -0,0 +1,45 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class IterationParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Iteration component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.delimiter = ","
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.delimiter, "Delimiter")
|
||||
|
||||
|
||||
class Iteration(ComponentBase, ABC):
|
||||
component_name = "Iteration"
|
||||
|
||||
def get_start(self):
|
||||
for cid in self._canvas.components.keys():
|
||||
if self._canvas.get_component(cid)["obj"].component_name.lower() != "iterationitem":
|
||||
continue
|
||||
if self._canvas.get_component(cid)["parent_id"] == self._id:
|
||||
return self._canvas.get_component(cid)
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
return self.output(allow_partial=False)[1]
|
||||
|
||||
49
agent/component/iterationitem.py
Normal file
49
agent/component/iterationitem.py
Normal file
@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class IterationItemParam(ComponentParamBase):
|
||||
"""
|
||||
Define the IterationItem component parameters.
|
||||
"""
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class IterationItem(ComponentBase, ABC):
|
||||
component_name = "IterationItem"
|
||||
|
||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||
super().__init__(canvas, id, param)
|
||||
self._idx = 0
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
parent = self.get_parent()
|
||||
ans = parent.get_input()
|
||||
ans = parent._param.delimiter.join(ans["content"]) if "content" in ans else ""
|
||||
ans = [a.strip() for a in ans.split(parent._param.delimiter)]
|
||||
df = pd.DataFrame([{"content": ans[self._idx]}])
|
||||
self._idx += 1
|
||||
if self._idx >= len(ans):
|
||||
self._idx = -1
|
||||
return df
|
||||
|
||||
def end(self):
|
||||
return self._idx == -1
|
||||
|
||||
@ -1,130 +1,130 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class Jin10Param(ComponentParamBase):
|
||||
"""
|
||||
Define the Jin10 component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.type = "flash"
|
||||
self.secret_key = "xxx"
|
||||
self.flash_type = '1'
|
||||
self.calendar_type = 'cj'
|
||||
self.calendar_datatype = 'data'
|
||||
self.symbols_type = 'GOODS'
|
||||
self.symbols_datatype = 'symbols'
|
||||
self.contain = ""
|
||||
self.filter = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.type, "Type", ['flash', 'calendar', 'symbols', 'news'])
|
||||
self.check_valid_value(self.flash_type, "Flash Type", ['1', '2', '3', '4', '5'])
|
||||
self.check_valid_value(self.calendar_type, "Calendar Type", ['cj', 'qh', 'hk', 'us'])
|
||||
self.check_valid_value(self.calendar_datatype, "Calendar DataType", ['data', 'event', 'holiday'])
|
||||
self.check_valid_value(self.symbols_type, "Symbols Type", ['GOODS', 'FOREX', 'FUTURE', 'CRYPTO'])
|
||||
self.check_valid_value(self.symbols_datatype, 'Symbols DataType', ['symbols', 'quotes'])
|
||||
|
||||
|
||||
class Jin10(ComponentBase, ABC):
|
||||
component_name = "Jin10"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Jin10.be_output("")
|
||||
|
||||
jin10_res = []
|
||||
headers = {'secret-key': self._param.secret_key}
|
||||
try:
|
||||
if self._param.type == "flash":
|
||||
params = {
|
||||
'category': self._param.flash_type,
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/flash?category=' + self._param.flash_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
for i in response['data']:
|
||||
jin10_res.append({"content": i['data']['content']})
|
||||
if self._param.type == "calendar":
|
||||
params = {
|
||||
'category': self._param.calendar_type
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/calendar/' + self._param.calendar_datatype + '?category=' + self._param.calendar_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "symbols":
|
||||
params = {
|
||||
'type': self._param.symbols_type
|
||||
}
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
params['codes'] = 'BTCUSD'
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
if self._param.symbols_datatype == "symbols":
|
||||
for i in response['data']:
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Commodity Name'] = i['n']
|
||||
i['Commodity Type'] = i['t']
|
||||
del i['c'], i['e'], i['n'], i['t']
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
for i in response['data']:
|
||||
i['Selling Price'] = i['a']
|
||||
i['Buying Price'] = i['b']
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Highest Price'] = i['h']
|
||||
i['Yesterday’s Closing Price'] = i['hc']
|
||||
i['Lowest Price'] = i['l']
|
||||
i['Opening Price'] = i['o']
|
||||
i['Latest Price'] = i['p']
|
||||
i['Market Quote Time'] = i['t']
|
||||
del i['a'], i['b'], i['c'], i['e'], i['h'], i['hc'], i['l'], i['o'], i['p'], i['t']
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "news":
|
||||
params = {
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/news',
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
except Exception as e:
|
||||
return Jin10.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not jin10_res:
|
||||
return Jin10.be_output("")
|
||||
|
||||
return pd.DataFrame(jin10_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class Jin10Param(ComponentParamBase):
|
||||
"""
|
||||
Define the Jin10 component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.type = "flash"
|
||||
self.secret_key = "xxx"
|
||||
self.flash_type = '1'
|
||||
self.calendar_type = 'cj'
|
||||
self.calendar_datatype = 'data'
|
||||
self.symbols_type = 'GOODS'
|
||||
self.symbols_datatype = 'symbols'
|
||||
self.contain = ""
|
||||
self.filter = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.type, "Type", ['flash', 'calendar', 'symbols', 'news'])
|
||||
self.check_valid_value(self.flash_type, "Flash Type", ['1', '2', '3', '4', '5'])
|
||||
self.check_valid_value(self.calendar_type, "Calendar Type", ['cj', 'qh', 'hk', 'us'])
|
||||
self.check_valid_value(self.calendar_datatype, "Calendar DataType", ['data', 'event', 'holiday'])
|
||||
self.check_valid_value(self.symbols_type, "Symbols Type", ['GOODS', 'FOREX', 'FUTURE', 'CRYPTO'])
|
||||
self.check_valid_value(self.symbols_datatype, 'Symbols DataType', ['symbols', 'quotes'])
|
||||
|
||||
|
||||
class Jin10(ComponentBase, ABC):
|
||||
component_name = "Jin10"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Jin10.be_output("")
|
||||
|
||||
jin10_res = []
|
||||
headers = {'secret-key': self._param.secret_key}
|
||||
try:
|
||||
if self._param.type == "flash":
|
||||
params = {
|
||||
'category': self._param.flash_type,
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/flash?category=' + self._param.flash_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
for i in response['data']:
|
||||
jin10_res.append({"content": i['data']['content']})
|
||||
if self._param.type == "calendar":
|
||||
params = {
|
||||
'category': self._param.calendar_type
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/calendar/' + self._param.calendar_datatype + '?category=' + self._param.calendar_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "symbols":
|
||||
params = {
|
||||
'type': self._param.symbols_type
|
||||
}
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
params['codes'] = 'BTCUSD'
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
if self._param.symbols_datatype == "symbols":
|
||||
for i in response['data']:
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Commodity Name'] = i['n']
|
||||
i['Commodity Type'] = i['t']
|
||||
del i['c'], i['e'], i['n'], i['t']
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
for i in response['data']:
|
||||
i['Selling Price'] = i['a']
|
||||
i['Buying Price'] = i['b']
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Highest Price'] = i['h']
|
||||
i['Yesterday’s Closing Price'] = i['hc']
|
||||
i['Lowest Price'] = i['l']
|
||||
i['Opening Price'] = i['o']
|
||||
i['Latest Price'] = i['p']
|
||||
i['Market Quote Time'] = i['t']
|
||||
del i['a'], i['b'], i['c'], i['e'], i['h'], i['hc'], i['l'], i['o'], i['p'], i['t']
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "news":
|
||||
params = {
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/news',
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
except Exception as e:
|
||||
return Jin10.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not jin10_res:
|
||||
return Jin10.be_output("")
|
||||
|
||||
return pd.DataFrame(jin10_res)
|
||||
|
||||
@ -23,6 +23,7 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
class RetrievalParam(ComponentParamBase):
|
||||
@ -42,7 +43,7 @@ class RetrievalParam(ComponentParamBase):
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keyword similarity weight")
|
||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||
|
||||
|
||||
@ -52,7 +53,9 @@ class Retrieval(ComponentBase, ABC):
|
||||
def _run(self, history, **kwargs):
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
|
||||
lines = query.split('\n')
|
||||
user_queries = [line.split("USER:", 1)[1] for line in lines if line.startswith("USER:")]
|
||||
query = user_queries[-1] if user_queries else ""
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
if not kbs:
|
||||
return Retrieval.be_output("")
|
||||
@ -70,7 +73,8 @@ class Retrieval(ComponentBase, ABC):
|
||||
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl)
|
||||
aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(query, kbs))
|
||||
|
||||
if not kbinfos["chunks"]:
|
||||
df = Retrieval.be_output("")
|
||||
|
||||
@ -13,101 +13,82 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from rag.prompts import full_question
|
||||
|
||||
|
||||
class RewriteQuestionParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the QuestionRewrite component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.temperature = 0.9
|
||||
self.prompt = ""
|
||||
self.loop = 1
|
||||
self.language = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
|
||||
def get_prompt(self, conv):
|
||||
self.prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
return f"""
|
||||
Role: A helpful assistant
|
||||
Task: Generate a full user question that would follow the conversation.
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
# Example 1
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
###############
|
||||
Output: What's the name of Donald Trump's mother?
|
||||
------------
|
||||
# Example 2
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
User: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
######################
|
||||
# Real Data
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
return self.prompt
|
||||
|
||||
|
||||
class RewriteQuestion(Generate, ABC):
|
||||
component_name = "RewriteQuestion"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
if self._loop >= self._param.loop:
|
||||
self._loop = 0
|
||||
raise Exception("Sorry! Nothing relevant found.")
|
||||
self._loop += 1
|
||||
|
||||
hist = self._canvas.get_history(4)
|
||||
conv = []
|
||||
for m in hist:
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(conv), [{"role": "user", "content": "Output: "}],
|
||||
self._param.gen_conf())
|
||||
hist = self._canvas.get_history(self._param.message_history_window_size)
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
messages = [h for h in hist if h["role"]!="system"]
|
||||
if messages[-1]["role"] != "user":
|
||||
messages.append({"role": "user", "content": query})
|
||||
ans = full_question(self._canvas.get_tenant_id(), self._param.llm_id, messages, self.gen_lang(self._param.language))
|
||||
self._canvas.history.pop()
|
||||
self._canvas.history.append(("user", ans))
|
||||
|
||||
logging.debug(ans)
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
|
||||
|
||||
@staticmethod
|
||||
def gen_lang(language):
|
||||
# convert code lang to language word for the prompt
|
||||
language_dict = {'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'ws': 'Samoan', 'am': 'Amharic',
|
||||
'ar': 'Arabic', 'hy': 'Armenian', 'az': 'Azerbaijani', 'eu': 'Basque', 'be': 'Belarusian',
|
||||
'bem': 'Bemba', 'bn': 'Bengali', 'bh': 'Bihari',
|
||||
'xx-bork': 'Bork', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'bt': 'Bhutani',
|
||||
'km': 'Cambodian', 'ca': 'Catalan', 'chr': 'Cherokee', 'ny': 'Chichewa', 'zh-cn': 'Chinese',
|
||||
'zh-tw': 'Chinese', 'co': 'Corsican',
|
||||
'hr': 'Croatian', 'cs': 'Czech', 'da': 'Danish', 'nl': 'Dutch', 'xx-elmer': 'Elmer',
|
||||
'en': 'English', 'eo': 'Esperanto', 'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese',
|
||||
'tl': 'Filipino', 'fi': 'Finnish', 'fr': 'French',
|
||||
'fy': 'Frisian', 'gaa': 'Ga', 'gl': 'Galician', 'ka': 'Georgian', 'de': 'German',
|
||||
'el': 'Greek', 'kl': 'Greenlandic', 'gn': 'Guarani', 'gu': 'Gujarati', 'xx-hacker': 'Hacker',
|
||||
'ht': 'Haitian Creole', 'ha': 'Hausa', 'haw': 'Hawaiian',
|
||||
'iw': 'Hebrew', 'hi': 'Hindi', 'hu': 'Hungarian', 'is': 'Icelandic', 'ig': 'Igbo',
|
||||
'id': 'Indonesian', 'ia': 'Interlingua', 'ga': 'Irish', 'it': 'Italian', 'ja': 'Japanese',
|
||||
'jw': 'Javanese', 'kn': 'Kannada', 'kk': 'Kazakh', 'rw': 'Kinyarwanda',
|
||||
'rn': 'Kirundi', 'xx-klingon': 'Klingon', 'kg': 'Kongo', 'ko': 'Korean', 'kri': 'Krio',
|
||||
'ku': 'Kurdish', 'ckb': 'Kurdish (Sorani)', 'ky': 'Kyrgyz', 'lo': 'Laothian', 'la': 'Latin',
|
||||
'lv': 'Latvian', 'ln': 'Lingala', 'lt': 'Lithuanian',
|
||||
'loz': 'Lozi', 'lg': 'Luganda', 'ach': 'Luo', 'mk': 'Macedonian', 'mg': 'Malagasy',
|
||||
'ms': 'Malay', 'ml': 'Malayalam', 'mt': 'Maltese', 'mv': 'Maldivian', 'mi': 'Maori',
|
||||
'mr': 'Marathi', 'mfe': 'Mauritian Creole', 'mo': 'Moldavian', 'mn': 'Mongolian',
|
||||
'sr-me': 'Montenegrin', 'my': 'Burmese', 'ne': 'Nepali', 'pcm': 'Nigerian Pidgin',
|
||||
'nso': 'Northern Sotho', 'no': 'Norwegian', 'nn': 'Norwegian Nynorsk', 'oc': 'Occitan',
|
||||
'or': 'Oriya', 'om': 'Oromo', 'ps': 'Pashto', 'fa': 'Persian',
|
||||
'xx-pirate': 'Pirate', 'pl': 'Polish', 'pt': 'Portuguese', 'pt-br': 'Portuguese (Brazilian)',
|
||||
'pt-pt': 'Portuguese (Portugal)', 'pa': 'Punjabi', 'qu': 'Quechua', 'ro': 'Romanian',
|
||||
'rm': 'Romansh', 'nyn': 'Runyankole', 'ru': 'Russian', 'gd': 'Scots Gaelic',
|
||||
'sr': 'Serbian', 'sh': 'Serbo-Croatian', 'st': 'Sesotho', 'tn': 'Setswana',
|
||||
'crs': 'Seychellois Creole', 'sn': 'Shona', 'sd': 'Sindhi', 'si': 'Sinhalese', 'sk': 'Slovak',
|
||||
'sl': 'Slovenian', 'so': 'Somali', 'es': 'Spanish', 'es-419': 'Spanish (Latin America)',
|
||||
'su': 'Sundanese',
|
||||
'sw': 'Swahili', 'sv': 'Swedish', 'tg': 'Tajik', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu',
|
||||
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tongan', 'lua': 'Tshiluba', 'tum': 'Tumbuka',
|
||||
'tr': 'Turkish', 'tk': 'Turkmen', 'tw': 'Twi',
|
||||
'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 'vu': 'Vanuatu',
|
||||
'vi': 'Vietnamese', 'cy': 'Welsh', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish',
|
||||
'yo': 'Yoruba', 'zu': 'Zulu'}
|
||||
if language in language_dict:
|
||||
return language_dict[language]
|
||||
else:
|
||||
return ""
|
||||
|
||||
@ -13,8 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from jinja2 import Template as Jinja2Template
|
||||
|
||||
|
||||
class TemplateParam(ComponentParamBase):
|
||||
@ -36,32 +38,49 @@ class Template(ComponentBase):
|
||||
component_name = "Template"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def get_input_elements(self):
|
||||
key_set = set([])
|
||||
res = []
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.content, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
content = self._param.content
|
||||
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"):
|
||||
continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for para in self.get_input_elements():
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
value = p.get("value", "")
|
||||
self.make_kwargs(para, kwargs, value)
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
@ -69,18 +88,49 @@ class Template(ComponentBase):
|
||||
hist = hist[0]["content"]
|
||||
else:
|
||||
hist = ""
|
||||
kwargs[para["key"]] = hist
|
||||
self.make_kwargs(para, kwargs, hist)
|
||||
continue
|
||||
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if "content" not in out.columns:
|
||||
kwargs[para["key"]] = ""
|
||||
else:
|
||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
|
||||
result = ""
|
||||
if "content" in out.columns:
|
||||
result = "\n".join(
|
||||
[o if isinstance(o, str) else str(o) for o in out["content"]]
|
||||
)
|
||||
|
||||
self.make_kwargs(para, kwargs, result)
|
||||
|
||||
template = Jinja2Template(content)
|
||||
|
||||
try:
|
||||
content = template.render(kwargs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for n, v in kwargs.items():
|
||||
content = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), content)
|
||||
try:
|
||||
v = json.dumps(v, ensure_ascii=False)
|
||||
except Exception:
|
||||
pass
|
||||
content = re.sub(
|
||||
r"\{%s\}" % re.escape(n), v, content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(\\\"|\")", "", content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(#+)", r" \1 ", content
|
||||
)
|
||||
|
||||
return Template.be_output(content)
|
||||
|
||||
def make_kwargs(self, para, kwargs, value):
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["key"], "content": value}
|
||||
)
|
||||
try:
|
||||
value = json.loads(value)
|
||||
except Exception:
|
||||
pass
|
||||
kwargs[para["key"]] = value
|
||||
|
||||
@ -1,72 +1,72 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import time
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class TuShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the TuShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.token = "xxx"
|
||||
self.src = "eastmoney"
|
||||
self.start_date = "2024-01-01 09:00:00"
|
||||
self.end_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
self.keyword = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.src, "Quick News Source",
|
||||
["sina", "wallstreetcn", "10jqka", "eastmoney", "yuncaijing", "fenghuang", "jinrongjie"])
|
||||
|
||||
|
||||
class TuShare(ComponentBase, ABC):
|
||||
component_name = "TuShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return TuShare.be_output("")
|
||||
|
||||
try:
|
||||
tus_res = []
|
||||
params = {
|
||||
"api_name": "news",
|
||||
"token": self._param.token,
|
||||
"params": {"src": self._param.src, "start_date": self._param.start_date,
|
||||
"end_date": self._param.end_date}
|
||||
}
|
||||
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
||||
response = response.json()
|
||||
if response['code'] != 0:
|
||||
return TuShare.be_output(response['msg'])
|
||||
df = pd.DataFrame(response['data']['items'])
|
||||
df.columns = response['data']['fields']
|
||||
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
||||
except Exception as e:
|
||||
return TuShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not tus_res:
|
||||
return TuShare.be_output("")
|
||||
|
||||
return pd.DataFrame(tus_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import time
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class TuShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the TuShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.token = "xxx"
|
||||
self.src = "eastmoney"
|
||||
self.start_date = "2024-01-01 09:00:00"
|
||||
self.end_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
self.keyword = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.src, "Quick News Source",
|
||||
["sina", "wallstreetcn", "10jqka", "eastmoney", "yuncaijing", "fenghuang", "jinrongjie"])
|
||||
|
||||
|
||||
class TuShare(ComponentBase, ABC):
|
||||
component_name = "TuShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return TuShare.be_output("")
|
||||
|
||||
try:
|
||||
tus_res = []
|
||||
params = {
|
||||
"api_name": "news",
|
||||
"token": self._param.token,
|
||||
"params": {"src": self._param.src, "start_date": self._param.start_date,
|
||||
"end_date": self._param.end_date}
|
||||
}
|
||||
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
||||
response = response.json()
|
||||
if response['code'] != 0:
|
||||
return TuShare.be_output(response['msg'])
|
||||
df = pd.DataFrame(response['data']['items'])
|
||||
df.columns = response['data']['fields']
|
||||
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
||||
except Exception as e:
|
||||
return TuShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not tus_res:
|
||||
return TuShare.be_output("")
|
||||
|
||||
return pd.DataFrame(tus_res)
|
||||
|
||||
@ -1,80 +1,80 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import pywencai
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class WenCaiParam(ComponentParamBase):
|
||||
"""
|
||||
Define the WenCai component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.query_type = "stock"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.query_type, "Query type",
|
||||
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
||||
'futures', 'lccp',
|
||||
'foreign_exchange'])
|
||||
|
||||
|
||||
class WenCai(ComponentBase, ABC):
|
||||
component_name = "WenCai"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return WenCai.be_output("")
|
||||
|
||||
try:
|
||||
wencai_res = []
|
||||
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
||||
if isinstance(res, pd.DataFrame):
|
||||
wencai_res.append({"content": res.to_markdown()})
|
||||
if isinstance(res, dict):
|
||||
for item in res.items():
|
||||
if isinstance(item[1], list):
|
||||
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], str):
|
||||
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
||||
continue
|
||||
if isinstance(item[1], dict):
|
||||
if "meta" in item[1].keys():
|
||||
continue
|
||||
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], pd.DataFrame):
|
||||
if "image_url" in item[1].columns:
|
||||
continue
|
||||
wencai_res.append({"content": item[1].to_markdown()})
|
||||
continue
|
||||
|
||||
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
||||
except Exception as e:
|
||||
return WenCai.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not wencai_res:
|
||||
return WenCai.be_output("")
|
||||
|
||||
return pd.DataFrame(wencai_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import pywencai
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class WenCaiParam(ComponentParamBase):
|
||||
"""
|
||||
Define the WenCai component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.query_type = "stock"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.query_type, "Query type",
|
||||
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
||||
'futures', 'lccp',
|
||||
'foreign_exchange'])
|
||||
|
||||
|
||||
class WenCai(ComponentBase, ABC):
|
||||
component_name = "WenCai"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return WenCai.be_output("")
|
||||
|
||||
try:
|
||||
wencai_res = []
|
||||
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
||||
if isinstance(res, pd.DataFrame):
|
||||
wencai_res.append({"content": res.to_markdown()})
|
||||
if isinstance(res, dict):
|
||||
for item in res.items():
|
||||
if isinstance(item[1], list):
|
||||
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], str):
|
||||
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
||||
continue
|
||||
if isinstance(item[1], dict):
|
||||
if "meta" in item[1].keys():
|
||||
continue
|
||||
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], pd.DataFrame):
|
||||
if "image_url" in item[1].columns:
|
||||
continue
|
||||
wencai_res.append({"content": item[1].to_markdown()})
|
||||
continue
|
||||
|
||||
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
||||
except Exception as e:
|
||||
return WenCai.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not wencai_res:
|
||||
return WenCai.be_output("")
|
||||
|
||||
return pd.DataFrame(wencai_res)
|
||||
|
||||
@ -1,84 +1,84 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import yfinance as yf
|
||||
|
||||
|
||||
class YahooFinanceParam(ComponentParamBase):
|
||||
"""
|
||||
Define the YahooFinance component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.info = True
|
||||
self.history = False
|
||||
self.count = False
|
||||
self.financials = False
|
||||
self.income_stmt = False
|
||||
self.balance_sheet = False
|
||||
self.cash_flow_statement = False
|
||||
self.news = True
|
||||
|
||||
def check(self):
|
||||
self.check_boolean(self.info, "get all stock info")
|
||||
self.check_boolean(self.history, "get historical market data")
|
||||
self.check_boolean(self.count, "show share count")
|
||||
self.check_boolean(self.financials, "show financials")
|
||||
self.check_boolean(self.income_stmt, "income statement")
|
||||
self.check_boolean(self.balance_sheet, "balance sheet")
|
||||
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
||||
self.check_boolean(self.news, "show news")
|
||||
|
||||
|
||||
class YahooFinance(ComponentBase, ABC):
|
||||
component_name = "YahooFinance"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
yohoo_res = []
|
||||
try:
|
||||
msft = yf.Ticker(ans)
|
||||
if self._param.info:
|
||||
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
||||
if self._param.history:
|
||||
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
||||
if self._param.financials:
|
||||
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
||||
if self._param.balance_sheet:
|
||||
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
||||
if self._param.cash_flow_statement:
|
||||
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||
if self._param.news:
|
||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||
except Exception:
|
||||
logging.exception("YahooFinance got exception")
|
||||
|
||||
if not yohoo_res:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
return pd.DataFrame(yohoo_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import yfinance as yf
|
||||
|
||||
|
||||
class YahooFinanceParam(ComponentParamBase):
|
||||
"""
|
||||
Define the YahooFinance component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.info = True
|
||||
self.history = False
|
||||
self.count = False
|
||||
self.financials = False
|
||||
self.income_stmt = False
|
||||
self.balance_sheet = False
|
||||
self.cash_flow_statement = False
|
||||
self.news = True
|
||||
|
||||
def check(self):
|
||||
self.check_boolean(self.info, "get all stock info")
|
||||
self.check_boolean(self.history, "get historical market data")
|
||||
self.check_boolean(self.count, "show share count")
|
||||
self.check_boolean(self.financials, "show financials")
|
||||
self.check_boolean(self.income_stmt, "income statement")
|
||||
self.check_boolean(self.balance_sheet, "balance sheet")
|
||||
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
||||
self.check_boolean(self.news, "show news")
|
||||
|
||||
|
||||
class YahooFinance(ComponentBase, ABC):
|
||||
component_name = "YahooFinance"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
yohoo_res = []
|
||||
try:
|
||||
msft = yf.Ticker(ans)
|
||||
if self._param.info:
|
||||
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
||||
if self._param.history:
|
||||
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
||||
if self._param.financials:
|
||||
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
||||
if self._param.balance_sheet:
|
||||
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
||||
if self._param.cash_flow_statement:
|
||||
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||
if self._param.news:
|
||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||
except Exception:
|
||||
logging.exception("YahooFinance got exception")
|
||||
|
||||
if not yohoo_res:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
return pd.DataFrame(yohoo_res)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2019 The FATE Authors. All Rights Reserved.
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
1107
agent/templates/research_report.json
Normal file
1107
agent/templates/research_report.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,113 +1,113 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
||||
"to": "concentrator:0"
|
||||
},
|
||||
"others": {
|
||||
"description": "The question is not about the product usage, appearance and how it works.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||
"to": "concentrator:1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": ["concentrator:0","concentrator:1"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"concentrator:0": {
|
||||
"obj": {
|
||||
"component_name": "Concentrator",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["message:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"concentrator:1": {
|
||||
"obj": {
|
||||
"component_name": "Concentrator",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["message:1_0","message:1_1","message:1_2"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 0_0!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:0"]
|
||||
},
|
||||
"message:1_0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_0!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
},
|
||||
"message:1_1": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_1!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
},
|
||||
"message:1_2": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_2!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
||||
"to": "concentrator:0"
|
||||
},
|
||||
"others": {
|
||||
"description": "The question is not about the product usage, appearance and how it works.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||
"to": "concentrator:1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": ["concentrator:0","concentrator:1"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"concentrator:0": {
|
||||
"obj": {
|
||||
"component_name": "Concentrator",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["message:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"concentrator:1": {
|
||||
"obj": {
|
||||
"component_name": "Concentrator",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["message:1_0","message:1_1","message:1_2"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 0_0!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:0"]
|
||||
},
|
||||
"message:1_0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_0!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
},
|
||||
"message:1_1": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_1!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
},
|
||||
"message:1_2": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_2!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
}
|
||||
1
agentic_reasoning/__init__.py
Normal file
1
agentic_reasoning/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
from .deep_research import DeepResearcher as DeepResearcher
|
||||
167
agentic_reasoning/deep_research.py
Normal file
167
agentic_reasoning/deep_research.py
Normal file
@ -0,0 +1,167 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
from functools import partial
|
||||
from agentic_reasoning.prompts import BEGIN_SEARCH_QUERY, BEGIN_SEARCH_RESULT, END_SEARCH_RESULT, MAX_SEARCH_LIMIT, \
|
||||
END_SEARCH_QUERY, REASON_PROMPT, RELEVANT_EXTRACTION_PROMPT
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from rag.nlp import extract_between
|
||||
from rag.prompts import kb_prompt
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DeepResearcher:
|
||||
def __init__(self,
|
||||
chat_mdl: LLMBundle,
|
||||
prompt_config: dict,
|
||||
kb_retrieve: partial = None,
|
||||
kg_retrieve: partial = None
|
||||
):
|
||||
self.chat_mdl = chat_mdl
|
||||
self.prompt_config = prompt_config
|
||||
self._kb_retrieve = kb_retrieve
|
||||
self._kg_retrieve = kg_retrieve
|
||||
|
||||
def thinking(self, chunk_info: dict, question: str):
|
||||
def rm_query_tags(line):
|
||||
pattern = re.escape(BEGIN_SEARCH_QUERY) + r"(.*?)" + re.escape(END_SEARCH_QUERY)
|
||||
return re.sub(pattern, "", line)
|
||||
|
||||
def rm_result_tags(line):
|
||||
pattern = re.escape(BEGIN_SEARCH_RESULT) + r"(.*?)" + re.escape(END_SEARCH_RESULT)
|
||||
return re.sub(pattern, "", line)
|
||||
|
||||
executed_search_queries = []
|
||||
msg_hisotry = [{"role": "user", "content": f'Question:\"{question}\"\n'}]
|
||||
all_reasoning_steps = []
|
||||
think = "<think>"
|
||||
for ii in range(MAX_SEARCH_LIMIT + 1):
|
||||
if ii == MAX_SEARCH_LIMIT - 1:
|
||||
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
||||
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append({"role": "assistant", "content": summary_think})
|
||||
break
|
||||
|
||||
query_think = ""
|
||||
if msg_hisotry[-1]["role"] != "user":
|
||||
msg_hisotry.append({"role": "user", "content": "Continues reasoning with the new information.\n"})
|
||||
else:
|
||||
msg_hisotry[-1]["content"] += "\n\nContinues reasoning with the new information.\n"
|
||||
for ans in self.chat_mdl.chat_streamly(REASON_PROMPT, msg_hisotry, {"temperature": 0.7}):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
query_think = ans
|
||||
yield {"answer": think + rm_query_tags(query_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
think += rm_query_tags(query_think)
|
||||
all_reasoning_steps.append(query_think)
|
||||
queries = extract_between(query_think, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
|
||||
if not queries:
|
||||
if ii > 0:
|
||||
break
|
||||
queries = [question]
|
||||
|
||||
for search_query in queries:
|
||||
logging.info(f"[THINK]Query: {ii}. {search_query}")
|
||||
msg_hisotry.append({"role": "assistant", "content": search_query})
|
||||
think += f"\n\n> {ii +1}. {search_query}\n\n"
|
||||
yield {"answer": think + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
summary_think = ""
|
||||
# The search query has been searched in previous steps.
|
||||
if search_query in executed_search_queries:
|
||||
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
||||
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append({"role": "user", "content": summary_think})
|
||||
think += summary_think
|
||||
continue
|
||||
|
||||
truncated_prev_reasoning = ""
|
||||
for i, step in enumerate(all_reasoning_steps):
|
||||
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
|
||||
|
||||
prev_steps = truncated_prev_reasoning.split('\n\n')
|
||||
if len(prev_steps) <= 5:
|
||||
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
||||
else:
|
||||
truncated_prev_reasoning = ''
|
||||
for i, step in enumerate(prev_steps):
|
||||
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
||||
truncated_prev_reasoning += step + '\n\n'
|
||||
else:
|
||||
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
||||
truncated_prev_reasoning += '...\n\n'
|
||||
truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
|
||||
|
||||
# Retrieval procedure:
|
||||
# 1. KB search
|
||||
# 2. Web search (optional)
|
||||
# 3. KG search (optional)
|
||||
kbinfos = self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
|
||||
|
||||
if self.prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(self.prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(search_query)
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if self.prompt_config.get("use_kg") and self._kg_retrieve:
|
||||
ck = self._kg_retrieve(question=search_query)
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
# Merge chunk info for citations
|
||||
if not chunk_info["chunks"]:
|
||||
for k in chunk_info.keys():
|
||||
chunk_info[k] = kbinfos[k]
|
||||
else:
|
||||
cids = [c["chunk_id"] for c in chunk_info["chunks"]]
|
||||
for c in kbinfos["chunks"]:
|
||||
if c["chunk_id"] in cids:
|
||||
continue
|
||||
chunk_info["chunks"].append(c)
|
||||
dids = [d["doc_id"] for d in chunk_info["doc_aggs"]]
|
||||
for d in kbinfos["doc_aggs"]:
|
||||
if d["doc_id"] in dids:
|
||||
continue
|
||||
chunk_info["doc_aggs"].append(d)
|
||||
|
||||
think += "\n\n"
|
||||
for ans in self.chat_mdl.chat_streamly(
|
||||
RELEVANT_EXTRACTION_PROMPT.format(
|
||||
prev_reasoning=truncated_prev_reasoning,
|
||||
search_query=search_query,
|
||||
document="\n".join(kb_prompt(kbinfos, 4096))
|
||||
),
|
||||
[{"role": "user",
|
||||
"content": f'Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.'}],
|
||||
{"temperature": 0.7}):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
summary_think = ans
|
||||
yield {"answer": think + rm_result_tags(summary_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append(
|
||||
{"role": "user", "content": f"\n\n{BEGIN_SEARCH_RESULT}{summary_think}{END_SEARCH_RESULT}\n\n"})
|
||||
think += rm_result_tags(summary_think)
|
||||
logging.info(f"[THINK]Summary: {ii}. {summary_think}")
|
||||
|
||||
yield think + "</think>"
|
||||
112
agentic_reasoning/prompts.py
Normal file
112
agentic_reasoning/prompts.py
Normal file
@ -0,0 +1,112 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
||||
END_SEARCH_QUERY = "<|end_search_query|>"
|
||||
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
||||
END_SEARCH_RESULT = "<|end_search_result|>"
|
||||
MAX_SEARCH_LIMIT = 6
|
||||
|
||||
REASON_PROMPT = (
|
||||
"You are a reasoning assistant with the ability to perform dataset searches to help "
|
||||
"you answer the user's question accurately. You have special tools:\n\n"
|
||||
f"- To perform a search: write {BEGIN_SEARCH_QUERY} your query here {END_SEARCH_QUERY}.\n"
|
||||
f"Then, the system will search and analyze relevant content, then provide you with helpful information in the format {BEGIN_SEARCH_RESULT} ...search results... {END_SEARCH_RESULT}.\n\n"
|
||||
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
||||
"Once you have all the information you need, continue your reasoning.\n\n"
|
||||
"-- Example 1 --\n" ########################################
|
||||
"Question: \"Are both the directors of Jaws and Casino Royale from the same country?\"\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who is the director of Jaws?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nThe director of Jaws is Steven Spielberg...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information.\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Where is Steven Spielberg from?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nSteven Allan Spielberg is an American filmmaker...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who is the director of Casino Royale?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCasino Royale is a 2006 spy film directed by Martin Campbell...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Where is Martin Campbell from?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nMartin Campbell (born 24 October 1943) is a New Zealand film and television director...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\nIt's enough to answer the question\n"
|
||||
|
||||
"-- Example 2 --\n" #########################################
|
||||
"Question: \"When was the founder of craigslist born?\"\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who was the founder of craigslist?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCraigslist was founded by Craig Newmark...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information.\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY} When was Craig Newmark born?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCraig Newmark was born on December 6, 1952...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\nIt's enough to answer the question\n"
|
||||
"**Remember**:\n"
|
||||
f"- You have a dataset to search, so you just provide a proper search query.\n"
|
||||
f"- Use {BEGIN_SEARCH_QUERY} to request a dataset search and end with {END_SEARCH_QUERY}.\n"
|
||||
"- The language of query MUST be as the same as 'Question' or 'search result'.\n"
|
||||
"- When done searching, continue your reasoning.\n\n"
|
||||
'Please answer the following question. You should think step by step to solve it.\n\n'
|
||||
)
|
||||
|
||||
RELEVANT_EXTRACTION_PROMPT = """**Task Instruction:**
|
||||
|
||||
You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
|
||||
|
||||
**Guidelines:**
|
||||
|
||||
1. **Analyze the Searched Web Pages:**
|
||||
- Carefully review the content of each searched web page.
|
||||
- Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
|
||||
|
||||
2. **Extract Relevant Information:**
|
||||
- Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
|
||||
- Ensure that the extracted information is accurate and relevant.
|
||||
|
||||
3. **Output Format:**
|
||||
- **If the web pages provide helpful information for current search query:** Present the information beginning with `**Final Information**` as shown below.
|
||||
- The language of query **MUST BE** as the same as 'Search Query' or 'Web Pages'.\n"
|
||||
**Final Information**
|
||||
|
||||
[Helpful information]
|
||||
|
||||
- **If the web pages do not provide any helpful information for current search query:** Output the following text.
|
||||
|
||||
**Final Information**
|
||||
|
||||
No helpful information found.
|
||||
|
||||
**Inputs:**
|
||||
- **Previous Reasoning Steps:**
|
||||
{prev_reasoning}
|
||||
|
||||
- **Current Search Query:**
|
||||
{search_query}
|
||||
|
||||
- **Searched Web Pages:**
|
||||
{document}
|
||||
|
||||
"""
|
||||
@ -1,2 +1,18 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
|
||||
@ -119,8 +119,9 @@ def register_page(page_path):
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
page_name = getattr(page, "page_name", page_name)
|
||||
sdk_path = "\\sdk\\" if sys.platform.startswith("win") else "/sdk/"
|
||||
url_prefix = (
|
||||
f"/api/{API_VERSION}" if "/sdk/" in path else f"/{API_VERSION}/{page_name}"
|
||||
f"/api/{API_VERSION}" if sdk_path in path else f"/{API_VERSION}/{page_name}"
|
||||
)
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
@ -152,8 +153,8 @@ def load_user(web_request):
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
except Exception:
|
||||
logging.exception("load_user got exception")
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
@ -25,7 +25,7 @@ from api.db import FileType, LLMType, ParserType, FileSource
|
||||
from api.db.db_models import APIToken, Task, File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
from api.db.services.dialog_service import DialogService, chat, keyword_extraction
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
@ -38,6 +38,8 @@ from api.utils.api_utils import server_error_response, get_data_error_result, ge
|
||||
generate_confirmation_token
|
||||
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.app.tag import label_question
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
@ -141,7 +143,7 @@ def set_conversation():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
if objs[0].source == "agent":
|
||||
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
||||
@ -182,7 +184,7 @@ def completion():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
@ -348,7 +350,7 @@ def get(conversation_id):
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||
@ -357,7 +359,7 @@ def get(conversation_id):
|
||||
|
||||
conv = conv.to_dict()
|
||||
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
||||
return get_json_result(data=False, message='Token is not valid for this conversation_id!"',
|
||||
return get_json_result(data=False, message='Authentication error: API key is invalid for this conversation_id!"',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
for referenct_i in conv['reference']:
|
||||
@ -379,7 +381,7 @@ def upload():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
kb_name = request.form.get("kb_name").strip()
|
||||
tenant_id = objs[0].tenant_id
|
||||
@ -491,7 +493,7 @@ def upload_parse():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
@ -514,7 +516,7 @@ def list_chunks():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
|
||||
@ -554,7 +556,7 @@ def list_kb_docs():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
tenant_id = objs[0].tenant_id
|
||||
@ -594,7 +596,7 @@ def docinfos():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
@ -608,7 +610,7 @@ def document_rm():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
tenant_id = objs[0].tenant_id
|
||||
req = request.json
|
||||
@ -670,7 +672,7 @@ def completion_faq():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
@ -809,7 +811,7 @@ def retrieval():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
kb_ids = req.get("kb_id", [])
|
||||
@ -840,7 +842,8 @@ def retrieval():
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl)
|
||||
doc_ids, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(question, kbs))
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
return get_json_result(data=ranks)
|
||||
|
||||
@ -65,7 +65,7 @@ def save():
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
if "id" not in req:
|
||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||
return get_data_error_result(f"{req['title'].strip()} already exists.")
|
||||
return get_data_error_result(message=f"{req['title'].strip()} already exists.")
|
||||
req["id"] = get_uuid()
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(message="Fail to save canvas.")
|
||||
@ -94,7 +94,7 @@ def getsse(canvas_id):
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_data_error_result(message='Token is not valid!"')
|
||||
return get_data_error_result(message='Authentication error: API key is invalid!"')
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
@ -146,12 +146,16 @@ def run():
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if not canvas.path[-1]:
|
||||
canvas.path.pop(-1)
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
except Exception as e:
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
if not canvas.path[-1]:
|
||||
canvas.path.pop(-1)
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
traceback.print_exc()
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
|
||||
@ -19,9 +19,11 @@ import json
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.settings import PAGERANK_FLD
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
@ -92,12 +94,14 @@ def get():
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
tenant_id = tenants[0].tenant_id
|
||||
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
for tenant in tenants:
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant.tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant.tenant_id), kb_ids)
|
||||
if chunk:
|
||||
break
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
@ -115,8 +119,7 @@ def get():
|
||||
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
||||
"important_kwd", "question_kwd")
|
||||
@validate_request("doc_id", "chunk_id", "content_with_weight")
|
||||
def set():
|
||||
req = request.json
|
||||
d = {
|
||||
@ -124,10 +127,16 @@ def set():
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
d["question_kwd"] = req["question_kwd"]
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["question_kwd"]))
|
||||
if "important_kwd" in req:
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
if "question_kwd" in req:
|
||||
d["question_kwd"] = req["question_kwd"]
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["question_kwd"]))
|
||||
if "tag_kwd" in req:
|
||||
d["tag_kwd"] = req["tag_kwd"]
|
||||
if "tag_feas" in req:
|
||||
d["tag_feas"] = req["tag_feas"]
|
||||
if "available_int" in req:
|
||||
d["available_int"] = req["available_int"]
|
||||
|
||||
@ -148,14 +157,11 @@ def set():
|
||||
t for t in re.split(
|
||||
r"[\n\t]",
|
||||
req["content_with_weight"]) if len(t) > 1]
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(
|
||||
message="Q&A must be separated by TAB/ENTER key.")
|
||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
||||
d = beAdoc(d, arr[0], arr[1], not any(
|
||||
q, a = rmPrefix(arr[0]), rmPrefix("\n".join(arr[1:]))
|
||||
d = beAdoc(d, q, a, not any(
|
||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d.get("question_kwd") else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.update({"id": req["chunk_id"]}, d, search.index_name(tenant_id), doc.kb_id)
|
||||
@ -236,7 +242,7 @@ def create():
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
if kb.pagerank:
|
||||
d["pagerank_fea"] = kb.pagerank
|
||||
d[PAGERANK_FLD] = kb.pagerank
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
@ -267,6 +273,7 @@ def retrieval_test():
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
use_kg = req.get("use_kg", False)
|
||||
top = int(req.get("top_k", 1024))
|
||||
tenant_ids = []
|
||||
|
||||
@ -297,12 +304,24 @@ def retrieval_test():
|
||||
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
|
||||
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||
ranks = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
|
||||
labels = label_question(question, [kb])
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"),
|
||||
rank_feature=labels
|
||||
)
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
tenant_ids,
|
||||
kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
ranks["labels"] = labels
|
||||
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
|
||||
@ -17,6 +17,7 @@ import json
|
||||
import re
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
import trio
|
||||
from api.db.db_models import APIToken
|
||||
|
||||
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||
@ -27,11 +28,13 @@ from flask_login import login_required, current_user
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import DialogService, chat, ask
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@ -65,10 +68,6 @@ def set_conversation():
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Fail to new a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -126,7 +125,7 @@ def getsse(dialog_id):
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_data_error_result(message='Token is not valid!"')
|
||||
return get_data_error_result(message='Authentication error: API key is invalid!"')
|
||||
try:
|
||||
e, conv = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
@ -380,13 +379,16 @@ def mindmap():
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||
ranks = settings.retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||
0.3, 0.3, aggs=False)
|
||||
question = req["question"]
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||
0.3, 0.3, aggs=False,
|
||||
rank_feature=label_question(question, [kb])
|
||||
)
|
||||
mindmap = MindMapExtractor(chat_mdl)
|
||||
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
||||
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in ranks["chunks"]])
|
||||
mind_map = mind_map.output
|
||||
if "error" in mind_map:
|
||||
return server_error_response(Exception(mind_map["error"]))
|
||||
return get_json_result(data=mind_map)
|
||||
|
||||
@ -18,6 +18,7 @@ from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api import settings
|
||||
@ -57,11 +58,6 @@ def set_dialog():
|
||||
|
||||
if not prompt_config["system"]:
|
||||
prompt_config["system"] = default_prompt["system"]
|
||||
# if len(prompt_config["parameters"]) < 1:
|
||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
||||
# for p in prompt_config["parameters"]:
|
||||
# if p["key"] == "knowledge":break
|
||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["optional"]:
|
||||
@ -74,22 +70,19 @@ def set_dialog():
|
||||
e, tenant = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids"))
|
||||
embd_count = len(set([kb.embd_id for kb in kbs]))
|
||||
if embd_count != 1:
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids", []))
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = len(set(embd_ids))
|
||||
if embd_count > 1:
|
||||
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
||||
|
||||
llm_id = req.get("llm_id", tenant.llm_id)
|
||||
if not dialog_id:
|
||||
if not req.get("kb_ids"):
|
||||
return get_data_error_result(
|
||||
message="Fail! Please select knowledgebase!")
|
||||
|
||||
dia = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
"name": name,
|
||||
"kb_ids": req["kb_ids"],
|
||||
"kb_ids": req.get("kb_ids", []),
|
||||
"description": description,
|
||||
"llm_id": llm_id,
|
||||
"llm_setting": llm_setting,
|
||||
@ -103,10 +96,7 @@ def set_dialog():
|
||||
}
|
||||
if not DialogService.save(**dia):
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
e, dia = DialogService.get_by_id(dia["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
return get_json_result(data=dia.to_json())
|
||||
return get_json_result(data=dia)
|
||||
else:
|
||||
del req["dialog_id"]
|
||||
if "kb_names" in req:
|
||||
@ -117,6 +107,7 @@ def set_dialog():
|
||||
if not e:
|
||||
return get_data_error_result(message="Fail to update a dialog!")
|
||||
dia = dia.to_dict()
|
||||
dia.update(req)
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
except Exception as e:
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import json
|
||||
import os.path
|
||||
import pathlib
|
||||
import re
|
||||
@ -70,11 +71,13 @@ def upload():
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
err, files = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
files = [f[0] for f in files] # remove the blob
|
||||
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=False, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=True)
|
||||
data=files, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=files)
|
||||
|
||||
|
||||
@manager.route('/web_crawl', methods=['POST']) # noqa: F821
|
||||
@ -593,3 +596,38 @@ def parse():
|
||||
txt = FileService.parse_docs(file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=txt)
|
||||
|
||||
|
||||
@manager.route('/set_meta', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "meta")
|
||||
def set_meta():
|
||||
req = request.json
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
meta = json.loads(req["meta"])
|
||||
except Exception as e:
|
||||
return get_json_result(
|
||||
data=False, message=f'Json syntax error: {e}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if not isinstance(meta, dict):
|
||||
return get_json_result(
|
||||
data=False, message='Meta data should be in Json map format, like {"key": "value"}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"meta_fields": meta}):
|
||||
return get_data_error_result(
|
||||
message="Database error (meta updates)!")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -13,6 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
@ -30,6 +33,7 @@ from api.utils.api_utils import get_json_result
|
||||
from api import settings
|
||||
from rag.nlp import search
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
from rag.settings import PAGERANK_FLD
|
||||
|
||||
|
||||
@manager.route('/create', methods=['post']) # noqa: F821
|
||||
@ -92,6 +96,13 @@ def update():
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
|
||||
if req.get("parser_id", "") == "tag" and os.environ.get('DOC_ENGINE', "elasticsearch") == "infinity":
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='The chunk method Tag has not been supported by Infinity yet.',
|
||||
code=settings.RetCode.OPERATING_ERROR
|
||||
)
|
||||
|
||||
if req["name"].lower() != kb.name.lower() \
|
||||
and len(
|
||||
KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||
@ -104,19 +115,21 @@ def update():
|
||||
|
||||
if kb.pagerank != req.get("pagerank", 0):
|
||||
if req.get("pagerank", 0) > 0:
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {"pagerank_fea": req["pagerank"]},
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: req["pagerank"]},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
else:
|
||||
# Elasticsearch requires pagerank_fea be non-zero!
|
||||
settings.docStoreConn.update({"exist": "pagerank_fea"}, {"remove": "pagerank_fea"},
|
||||
# Elasticsearch requires PAGERANK_FLD be non-zero!
|
||||
settings.docStoreConn.update({"exists": PAGERANK_FLD}, {"remove": PAGERANK_FLD},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Database error (Knowledgebase rename)!")
|
||||
kb = kb.to_dict()
|
||||
kb.update(req)
|
||||
|
||||
return get_json_result(data=kb.to_json())
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -150,12 +163,14 @@ def list_kbs():
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 150))
|
||||
parser_id = request.args.get("parser_id")
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc, keywords)
|
||||
[m["tenant_id"] for m in tenants], current_user.id, page_number,
|
||||
items_per_page, orderby, desc, keywords, parser_id)
|
||||
return get_json_result(data={"kbs": kbs, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -185,7 +200,8 @@ def rm():
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
if f2d:
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
@ -198,3 +214,112 @@ def rm():
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/<kb_id>/tags', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_tags(kb_id):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
tags = settings.retrievaler.all_tags(current_user.id, [kb_id])
|
||||
return get_json_result(data=tags)
|
||||
|
||||
|
||||
@manager.route('/tags', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_tags_from_kbs():
|
||||
kb_ids = request.args.get("kb_ids", "").split(",")
|
||||
for kb_id in kb_ids:
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
tags = settings.retrievaler.all_tags(current_user.id, kb_ids)
|
||||
return get_json_result(data=tags)
|
||||
|
||||
|
||||
@manager.route('/<kb_id>/rm_tags', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def rm_tags(kb_id):
|
||||
req = request.json
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
|
||||
for t in req["tags"]:
|
||||
settings.docStoreConn.update({"tag_kwd": t, "kb_id": [kb_id]},
|
||||
{"remove": {"tag_kwd": t}},
|
||||
search.index_name(kb.tenant_id),
|
||||
kb_id)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/<kb_id>/rename_tag', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def rename_tags(kb_id):
|
||||
req = request.json
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
|
||||
settings.docStoreConn.update({"tag_kwd": req["from_tag"], "kb_id": [kb_id]},
|
||||
{"remove": {"tag_kwd": req["from_tag"].strip()}, "add": {"tag_kwd": req["to_tag"]}},
|
||||
search.index_name(kb.tenant_id),
|
||||
kb_id)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/<kb_id>/knowledge_graph', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def knowledge_graph(kb_id):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
req = {
|
||||
"kb_id": [kb_id],
|
||||
"knowledge_graph_kwd": ["graph"]
|
||||
}
|
||||
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
if not settings.docStoreConn.indexExist(search.index_name(kb.tenant_id), kb_id):
|
||||
return get_json_result(data=obj)
|
||||
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [kb_id])
|
||||
if not len(sres.ids):
|
||||
return get_json_result(data=obj)
|
||||
|
||||
for id in sres.ids[:1]:
|
||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||
try:
|
||||
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
obj[ty] = content_json
|
||||
|
||||
if "nodes" in obj["graph"]:
|
||||
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||
if "edges" in obj["graph"]:
|
||||
node_id_set = { o["id"] for o in obj["graph"]["nodes"] }
|
||||
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
return get_json_result(data=obj)
|
||||
@ -15,7 +15,7 @@
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
|
||||
import os
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
||||
@ -24,8 +24,8 @@ from api.utils.api_utils import server_error_response, get_data_error_result, va
|
||||
from api.db import StatusEnum, LLMType
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
||||
import requests
|
||||
|
||||
|
||||
@manager.route('/factories', methods=['GET']) # noqa: F821
|
||||
@ -135,6 +135,8 @@ def set_api_key():
|
||||
def add_llm():
|
||||
req = request.json
|
||||
factory = req["llm_factory"]
|
||||
api_key = req.get("api_key", "")
|
||||
llm_name = req["llm_name"]
|
||||
|
||||
def apikey_json(keys):
|
||||
nonlocal req
|
||||
@ -143,7 +145,6 @@ def add_llm():
|
||||
if factory == "VolcEngine":
|
||||
# For VolcEngine, due to its special authentication method
|
||||
# Assemble ark_api_key endpoint_id into api_key
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["ark_api_key", "endpoint_id"])
|
||||
|
||||
elif factory == "Tencent Hunyuan":
|
||||
@ -152,52 +153,43 @@ def add_llm():
|
||||
|
||||
elif factory == "Tencent Cloud":
|
||||
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
||||
return set_api_key()
|
||||
|
||||
elif factory == "Bedrock":
|
||||
# For Bedrock, due to its special authentication method
|
||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
||||
|
||||
elif factory == "LocalAI":
|
||||
llm_name = req["llm_name"] + "___LocalAI"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
llm_name += "___LocalAI"
|
||||
|
||||
elif factory == "HuggingFace":
|
||||
llm_name = req["llm_name"] + "___HuggingFace"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
llm_name += "___HuggingFace"
|
||||
|
||||
elif factory == "OpenAI-API-Compatible":
|
||||
llm_name = req["llm_name"] + "___OpenAI-API"
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
llm_name += "___OpenAI-API"
|
||||
|
||||
elif factory == "VLLM":
|
||||
llm_name += "___VLLM"
|
||||
|
||||
elif factory == "XunFei Spark":
|
||||
llm_name = req["llm_name"]
|
||||
if req["model_type"] == "chat":
|
||||
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
||||
api_key = req.get("spark_api_password", "")
|
||||
elif req["model_type"] == "tts":
|
||||
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
||||
|
||||
elif factory == "BaiduYiyan":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["yiyan_ak", "yiyan_sk"])
|
||||
|
||||
elif factory == "Fish Audio":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["fish_audio_ak", "fish_audio_refid"])
|
||||
|
||||
elif factory == "Google Cloud":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
||||
|
||||
elif factory == "Azure-OpenAI":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["api_key", "api_version"])
|
||||
|
||||
else:
|
||||
llm_name = req["llm_name"]
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
|
||||
llm = {
|
||||
"tenant_id": current_user.id,
|
||||
"llm_factory": factory,
|
||||
@ -209,72 +201,69 @@ def add_llm():
|
||||
}
|
||||
|
||||
msg = ""
|
||||
mdl_nm = llm["llm_name"].split("___")[0]
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access embedding model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
if not tc and m.find("**ERROR**:") >= 0:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.RERANK:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||
if len(arr) == 0:
|
||||
raise Exception("Not known.")
|
||||
except KeyError:
|
||||
msg += f"{factory} dose not support this model({mdl_nm})"
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||
mdl = CvModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
img_url = (
|
||||
"https://www.8848seo.cn/zb_users/upload/2022/07/20220705101240_99378.jpg"
|
||||
)
|
||||
res = requests.get(img_url)
|
||||
if res.status_code == 200:
|
||||
m, tc = mdl.describe(res.content)
|
||||
if not tc:
|
||||
with open(os.path.join(get_project_base_directory(), "web/src/assets/yay.jpg"), "rb") as f:
|
||||
m, tc = mdl.describe(f.read())
|
||||
if not m and not tc:
|
||||
raise Exception(m)
|
||||
else:
|
||||
pass
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.TTS:
|
||||
mdl = TTSModel[factory](
|
||||
key=llm["api_key"], model_name=llm["llm_name"], base_url=llm["api_base"]
|
||||
key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
for resp in mdl.tts("Hello~ Ragflower!"):
|
||||
pass
|
||||
except RuntimeError as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||
else:
|
||||
# TODO: check other type of models
|
||||
pass
|
||||
@ -335,7 +324,7 @@ def my_llms():
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_app():
|
||||
self_deploied = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
||||
self_deployed = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"]
|
||||
weighted = ["Youdao", "FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
|
||||
model_type = request.args.get("model_type")
|
||||
try:
|
||||
@ -345,7 +334,7 @@ def list_app():
|
||||
llms = [m.to_dict()
|
||||
for m in llms if m.status == StatusEnum.VALID.value and m.fid not in weighted]
|
||||
for m in llms:
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deployed
|
||||
|
||||
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||
for o in objs:
|
||||
@ -365,4 +354,4 @@ def list_app():
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
return server_error_response(e)
|
||||
|
||||
@ -13,45 +13,46 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
|
||||
from flask import request
|
||||
from api import settings
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result, token_required
|
||||
from api.utils.api_utils import get_result
|
||||
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
req=request.json
|
||||
ids= req.get("dataset_ids")
|
||||
if not ids:
|
||||
return get_error_data_result(message="`dataset_ids` is required")
|
||||
req = request.json
|
||||
ids = [i for i in req.get("dataset_ids", []) if i]
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id,user_id=tenant_id)
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(message='Datasets use different embedding models."',code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
kbs = KnowledgebaseService.get_by_ids(ids) if ids else []
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) > 1:
|
||||
return get_result(message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
# llm
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
@ -65,7 +66,7 @@ def create(tenant_id):
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id","top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
@ -82,8 +83,10 @@ def create(tenant_id):
|
||||
req["top_k"] = req.get("top_k", 1024)
|
||||
req["rerank_id"] = req.get("rerank_id", "")
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3","maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
|
||||
llm_name=req.get("rerank_id"),
|
||||
model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if not req.get("llm_id"):
|
||||
req["llm_id"] = tenant.llm_id
|
||||
@ -106,11 +109,11 @@ def create(tenant_id):
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
|
||||
"quote":True,
|
||||
"tts":False,
|
||||
"refine_multiturn":True
|
||||
"quote": True,
|
||||
"tts": False,
|
||||
"refine_multiturn": True
|
||||
}
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response","quote","tts","refine_multiturn"]
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response", "quote", "tts", "refine_multiturn"]
|
||||
if "prompt_config" not in req:
|
||||
req['prompt_config'] = {}
|
||||
for key in key_list_2:
|
||||
@ -138,7 +141,7 @@ def create(tenant_id):
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
res["prompt"].update(new_dict)
|
||||
@ -151,15 +154,16 @@ def create(tenant_id):
|
||||
res["avatar"] = res.pop("icon")
|
||||
return get_result(data=res)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>', methods=['PUT']) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id,chat_id):
|
||||
def update(tenant_id, chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message='You do not own the chat')
|
||||
req =request.json
|
||||
req = request.json
|
||||
ids = req.get("dataset_ids")
|
||||
if "show_quotation" in req:
|
||||
req["do_refer"]=req.pop("show_quotation")
|
||||
req["do_refer"] = req.pop("show_quotation")
|
||||
if "dataset_ids" in req:
|
||||
if not ids:
|
||||
return get_error_data_result("`dataset_ids` can't be empty")
|
||||
@ -173,8 +177,9 @@ def update(tenant_id,chat_id):
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count=list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_count) != 1 :
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
@ -183,7 +188,7 @@ def update(tenant_id,chat_id):
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
@ -197,7 +202,7 @@ def update(tenant_id,chat_id):
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id","top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
@ -209,8 +214,10 @@ def update(tenant_id,chat_id):
|
||||
e, res = DialogService.get_by_id(chat_id)
|
||||
res = res.to_json()
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3","maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
|
||||
llm_name=req.get("rerank_id"),
|
||||
model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if "name" in req:
|
||||
if not req.get("name"):
|
||||
@ -245,16 +252,16 @@ def update(tenant_id,chat_id):
|
||||
def delete(tenant_id):
|
||||
req = request.json
|
||||
if not req:
|
||||
ids=None
|
||||
ids = None
|
||||
else:
|
||||
ids=req.get("ids")
|
||||
ids = req.get("ids")
|
||||
if not ids:
|
||||
id_list = []
|
||||
dias=DialogService.query(tenant_id=tenant_id,status=StatusEnum.VALID.value)
|
||||
dias = DialogService.query(tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
for dia in dias:
|
||||
id_list.append(dia.id)
|
||||
else:
|
||||
id_list=ids
|
||||
id_list = ids
|
||||
for id in id_list:
|
||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message=f"You don't own the chat {id}")
|
||||
@ -262,14 +269,16 @@ def delete(tenant_id):
|
||||
DialogService.update_by_id(id, temp_dict)
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_chat(tenant_id):
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
chat = DialogService.query(id=id,name=name,status=StatusEnum.VALID.value,tenant_id=tenant_id)
|
||||
if not chat:
|
||||
return get_error_data_result(message="The chat doesn't exist")
|
||||
if id or name:
|
||||
chat = DialogService.query(id=id, name=name, status=StatusEnum.VALID.value, tenant_id=tenant_id)
|
||||
if not chat:
|
||||
return get_error_data_result(message="The chat doesn't exist")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
@ -277,27 +286,27 @@ def list_chat(tenant_id):
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
chats = DialogService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,name)
|
||||
chats = DialogService.get_list(tenant_id, page_number, items_per_page, orderby, desc, id, name)
|
||||
if not chats:
|
||||
return get_result(data=[])
|
||||
list_assts = []
|
||||
renamed_dict = {}
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight",
|
||||
"do_refer":"show_quotation"}
|
||||
"do_refer": "show_quotation"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
for res in chats:
|
||||
renamed_dict = {}
|
||||
for key, value in res["prompt_config"].items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
res["prompt"].update(new_dict)
|
||||
@ -308,11 +317,12 @@ def list_chat(tenant_id):
|
||||
kb_list = []
|
||||
for kb_id in res["kb_ids"]:
|
||||
kb = KnowledgebaseService.query(id=kb_id)
|
||||
if not kb :
|
||||
return get_error_data_result(message=f"Don't exist the kb {kb_id}")
|
||||
if not kb:
|
||||
logging.warning(f"The kb {kb_id} does not exist.")
|
||||
continue
|
||||
kb_list.append(kb[0].to_json())
|
||||
del res["kb_ids"]
|
||||
res["datasets"] = kb_list
|
||||
res["avatar"] = res.pop("icon")
|
||||
list_assts.append(res)
|
||||
return get_result(data=list_assts)
|
||||
return get_result(data=list_assts)
|
||||
|
||||
@ -30,7 +30,7 @@ from api.utils.api_utils import (
|
||||
token_required,
|
||||
get_error_data_result,
|
||||
valid,
|
||||
get_parser_config,
|
||||
get_parser_config, valid_parser_config,
|
||||
)
|
||||
|
||||
|
||||
@ -66,14 +66,11 @@ def create(tenant_id):
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Dataset permission.
|
||||
language:
|
||||
type: string
|
||||
enum: ['Chinese', 'English']
|
||||
description: Language of the dataset.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "knowledge_graph", "email"]
|
||||
"presentation", "picture", "one", "knowledge_graph", "email", "tag"
|
||||
]
|
||||
description: Chunking method.
|
||||
parser_config:
|
||||
type: object
|
||||
@ -90,11 +87,10 @@ def create(tenant_id):
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_parser_config(parser_config)
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
@ -108,12 +104,11 @@ def create(tenant_id):
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
language,
|
||||
valid_language,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
@ -132,13 +127,18 @@ def create(tenant_id):
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"] == "":
|
||||
return get_error_data_result(message="`name` is not empty string!")
|
||||
if len(req["name"]) >= 128:
|
||||
return get_error_data_result(
|
||||
message="Dataset name should not be longer than 128 characters."
|
||||
)
|
||||
if KnowledgebaseService.query(
|
||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||
):
|
||||
return get_error_data_result(
|
||||
message="Duplicated dataset name in creating dataset."
|
||||
)
|
||||
req["tenant_id"] = req["created_by"] = tenant_id
|
||||
req["tenant_id"] = tenant_id
|
||||
req["created_by"] = tenant_id
|
||||
if not req.get("embedding_model"):
|
||||
req["embedding_model"] = t.embd_id
|
||||
else:
|
||||
@ -180,6 +180,10 @@ def create(tenant_id):
|
||||
if old_key in req
|
||||
}
|
||||
req.update(mapped_keys)
|
||||
flds = list(req.keys())
|
||||
for f in flds:
|
||||
if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
|
||||
del req[f]
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||
renamed_data = {}
|
||||
@ -224,6 +228,8 @@ def delete(tenant_id):
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
errors = []
|
||||
success_count = 0
|
||||
req = request.json
|
||||
if not req:
|
||||
ids = None
|
||||
@ -239,12 +245,12 @@ def delete(tenant_id):
|
||||
for id in id_list:
|
||||
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(message=f"You don't own the dataset {id}")
|
||||
errors.append(f"You don't own the dataset {id}")
|
||||
continue
|
||||
for doc in DocumentService.query(kb_id=id):
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_error_data_result(
|
||||
message="Remove document error.(Database error)"
|
||||
)
|
||||
errors.append(f"Remove document error for dataset {id}")
|
||||
continue
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[
|
||||
@ -256,7 +262,17 @@ def delete(tenant_id):
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
if not KnowledgebaseService.delete_by_id(id):
|
||||
return get_error_data_result(message="Delete dataset error.(Database error)")
|
||||
errors.append(f"Delete dataset error for {id}")
|
||||
continue
|
||||
success_count += 1
|
||||
if errors:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
data={"success_count": success_count, "errors": errors},
|
||||
message=f"Partially deleted {success_count} datasets with {len(errors)} errors"
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message="; ".join(errors))
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
|
||||
|
||||
@ -295,14 +311,11 @@ def update(tenant_id, dataset_id):
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Updated permission.
|
||||
language:
|
||||
type: string
|
||||
enum: ['Chinese', 'English']
|
||||
description: Updated language.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "knowledge_graph", "email"]
|
||||
"presentation", "picture", "one", "knowledge_graph", "email", "tag"
|
||||
]
|
||||
description: Updated chunking method.
|
||||
parser_config:
|
||||
type: object
|
||||
@ -321,11 +334,10 @@ def update(tenant_id, dataset_id):
|
||||
if any(key in req for key in invalid_keys):
|
||||
return get_error_data_result(message="The input parameters are invalid.")
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_parser_config(parser_config)
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
@ -339,12 +351,11 @@ def update(tenant_id, dataset_id):
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
language,
|
||||
valid_language,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
@ -412,6 +423,10 @@ def update(tenant_id, dataset_id):
|
||||
req["embd_id"] = req.pop("embedding_model")
|
||||
if "name" in req:
|
||||
req["name"] = req["name"].strip()
|
||||
if len(req["name"]) >= 128:
|
||||
return get_error_data_result(
|
||||
message="Dataset name should not be longer than 128 characters."
|
||||
)
|
||||
if (
|
||||
req["name"].lower() != kb.name.lower()
|
||||
and len(
|
||||
@ -431,7 +446,7 @@ def update(tenant_id, dataset_id):
|
||||
|
||||
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list(tenant_id):
|
||||
def list_datasets(tenant_id):
|
||||
"""
|
||||
List datasets.
|
||||
---
|
||||
@ -500,7 +515,9 @@ def list(tenant_id):
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
if request.args.get("desc", "false").lower() not in ["true", "false"]:
|
||||
return get_error_data_result("desc should be true or false")
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
|
||||
@ -15,11 +15,12 @@
|
||||
#
|
||||
from flask import request, jsonify
|
||||
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
||||
@ -29,6 +30,7 @@ def retrieval(tenant_id):
|
||||
req = request.json
|
||||
question = req["query"]
|
||||
kb_id = req["knowledge_id"]
|
||||
use_kg = req.get("use_kg", False)
|
||||
retrieval_setting = req.get("retrieval_setting", {})
|
||||
similarity_threshold = float(retrieval_setting.get("score_threshold", 0.0))
|
||||
top = int(retrieval_setting.get("top_k", 1024))
|
||||
@ -44,8 +46,7 @@ def retrieval(tenant_id):
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
|
||||
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||
ranks = retr.retrieval(
|
||||
ranks = settings.retrievaler.retrieval(
|
||||
question,
|
||||
embd_mdl,
|
||||
kb.tenant_id,
|
||||
@ -54,13 +55,24 @@ def retrieval(tenant_id):
|
||||
page_size=top,
|
||||
similarity_threshold=similarity_threshold,
|
||||
vector_similarity_weight=0.3,
|
||||
top=top
|
||||
top=top,
|
||||
rank_feature=label_question(question, [kb])
|
||||
)
|
||||
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
[tenant_id],
|
||||
[kb_id],
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
records = []
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
records.append({
|
||||
"content": c["content_ltks"],
|
||||
"content": c["content_with_weight"],
|
||||
"score": c["similarity"],
|
||||
"title": c["docnm_kwd"],
|
||||
"metadata": {}
|
||||
|
||||
@ -16,11 +16,10 @@
|
||||
import pathlib
|
||||
import datetime
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import rag_tokenizer
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.llm_service import TenantLLMService, LLMBundle
|
||||
from api import settings
|
||||
import xxhash
|
||||
import re
|
||||
@ -39,12 +38,35 @@ from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import construct_json_result, get_parser_config
|
||||
from rag.nlp import search
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.app.tag import label_question
|
||||
from rag.utils import rmSpace
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
from pydantic import BaseModel, Field, validator
|
||||
|
||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||
|
||||
|
||||
class Chunk(BaseModel):
|
||||
id: str = ""
|
||||
content: str = ""
|
||||
document_id: str = ""
|
||||
docnm_kwd: str = ""
|
||||
important_keywords: list = Field(default_factory=list)
|
||||
questions: list = Field(default_factory=list)
|
||||
question_tks: str = ""
|
||||
image_id: str = ""
|
||||
available: bool = True
|
||||
positions: list[list[int]] = Field(default_factory=list)
|
||||
|
||||
@validator('positions')
|
||||
def validate_positions(cls, value):
|
||||
for sublist in value:
|
||||
if len(sublist) != 5:
|
||||
raise ValueError("Each sublist in positions must have a length of 5")
|
||||
return value
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def upload(dataset_id, tenant_id):
|
||||
@ -218,6 +240,11 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
if req["progress"] != doc.progress:
|
||||
return get_error_data_result(message="Can't change `progress`.")
|
||||
|
||||
if "meta_fields" in req:
|
||||
if not isinstance(req["meta_fields"], dict):
|
||||
return get_error_data_result(message="meta_fields must be a dictionary")
|
||||
DocumentService.update_meta_fields(document_id, req["meta_fields"])
|
||||
|
||||
if "name" in req and req["name"] != doc.name:
|
||||
if (
|
||||
pathlib.Path(req["name"].lower()).suffix
|
||||
@ -239,6 +266,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if "chunk_method" in req:
|
||||
@ -255,6 +283,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
}
|
||||
if req.get("chunk_method") not in valid_chunk_method:
|
||||
return get_error_data_result(
|
||||
@ -450,10 +479,12 @@ def list_docs(dataset_id, tenant_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
if not DocumentService.query(id=id, kb_id=dataset_id):
|
||||
|
||||
if id and not DocumentService.query(id=id, kb_id=dataset_id):
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if not DocumentService.query(name=name, kb_id=dataset_id):
|
||||
if name and not DocumentService.query(name=name, kb_id=dataset_id):
|
||||
return get_error_data_result(message=f"You don't own the document {name}.")
|
||||
|
||||
page = int(request.args.get("page", 1))
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_size = int(request.args.get("page_size", 30))
|
||||
@ -701,13 +732,13 @@ def stop_parsing(tenant_id, dataset_id):
|
||||
doc = DocumentService.query(id=id, kb_id=dataset_id)
|
||||
if not doc:
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if int(doc[0].progress) == 1 or int(doc[0].progress) == 0:
|
||||
if int(doc[0].progress) == 1 or doc[0].progress == 0:
|
||||
return get_error_data_result(
|
||||
"Can't stop parsing document with progress at 0 or 1"
|
||||
)
|
||||
info = {"run": "2", "progress": 0, "chunk_num": 0}
|
||||
DocumentService.update_by_id(id, info)
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
|
||||
settings.docStoreConn.delete({"doc_id": doc[0].id}, search.index_name(tenant_id), dataset_id)
|
||||
return get_result()
|
||||
|
||||
|
||||
@ -826,72 +857,55 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
renamed_doc["run"] = run_mapping.get(str(value))
|
||||
|
||||
res = {"total": 0, "chunks": [], "doc": renamed_doc}
|
||||
origin_chunks = []
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), dataset_id):
|
||||
if req.get("id"):
|
||||
chunk = settings.docStoreConn.get(req.get("id"), search.index_name(tenant_id), [dataset_id])
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del chunk[n]
|
||||
if not chunk:
|
||||
return get_error_data_result(f"Chunk `{req.get('id')}` not found.")
|
||||
res['total'] = 1
|
||||
final_chunk = {
|
||||
"id":chunk.get("id",chunk.get("chunk_id")),
|
||||
"content":chunk["content_with_weight"],
|
||||
"document_id":chunk.get("doc_id",chunk.get("document_id")),
|
||||
"docnm_kwd":chunk["docnm_kwd"],
|
||||
"important_keywords":chunk.get("important_kwd",[]),
|
||||
"questions":chunk.get("question_kwd",[]),
|
||||
"dataset_id":chunk.get("kb_id",chunk.get("dataset_id")),
|
||||
"image_id":chunk["img_id"],
|
||||
"available":bool(chunk.get("available_int",1)),
|
||||
"positions":chunk.get("position_int",[]),
|
||||
}
|
||||
res["chunks"].append(final_chunk)
|
||||
_ = Chunk(**final_chunk)
|
||||
|
||||
elif settings.docStoreConn.indexExist(search.index_name(tenant_id), dataset_id):
|
||||
sres = settings.retrievaler.search(query, search.index_name(tenant_id), [dataset_id], emb_mdl=None,
|
||||
highlight=True)
|
||||
res["total"] = sres.total
|
||||
sign = 0
|
||||
for id in sres.ids:
|
||||
d = {
|
||||
"id": id,
|
||||
"content_with_weight": (
|
||||
"content": (
|
||||
rmSpace(sres.highlight[id])
|
||||
if question and id in sres.highlight
|
||||
else sres.field[id].get("content_with_weight", "")
|
||||
),
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"document_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"question_kwd": sres.field[id].get("question_kwd", []),
|
||||
"img_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": sres.field[id].get("position_int", []),
|
||||
"important_keywords": sres.field[id].get("important_kwd", []),
|
||||
"questions": sres.field[id].get("question_kwd", []),
|
||||
"dataset_id": sres.field[id].get("kb_id", sres.field[id].get("dataset_id")),
|
||||
"image_id": sres.field[id].get("img_id", ""),
|
||||
"available": bool(sres.field[id].get("available_int", 1)),
|
||||
"positions": sres.field[id].get("position_int",[]),
|
||||
}
|
||||
if len(d["positions"]) % 5 == 0:
|
||||
poss = []
|
||||
for i in range(0, len(d["positions"]), 5):
|
||||
poss.append(
|
||||
[
|
||||
float(d["positions"][i]),
|
||||
float(d["positions"][i + 1]),
|
||||
float(d["positions"][i + 2]),
|
||||
float(d["positions"][i + 3]),
|
||||
float(d["positions"][i + 4]),
|
||||
]
|
||||
)
|
||||
d["positions"] = poss
|
||||
|
||||
origin_chunks.append(d)
|
||||
if req.get("id"):
|
||||
if req.get("id") == id:
|
||||
origin_chunks.clear()
|
||||
origin_chunks.append(d)
|
||||
sign = 1
|
||||
break
|
||||
if req.get("id"):
|
||||
if sign == 0:
|
||||
return get_error_data_result(f"Can't find this chunk {req.get('id')}")
|
||||
|
||||
for chunk in origin_chunks:
|
||||
key_mapping = {
|
||||
"id": "id",
|
||||
"content_with_weight": "content",
|
||||
"doc_id": "document_id",
|
||||
"important_kwd": "important_keywords",
|
||||
"question_kwd": "questions",
|
||||
"img_id": "image_id",
|
||||
"available_int": "available",
|
||||
}
|
||||
renamed_chunk = {}
|
||||
for key, value in chunk.items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_chunk[new_key] = value
|
||||
if renamed_chunk["available"] == 0:
|
||||
renamed_chunk["available"] = False
|
||||
if renamed_chunk["available"] == 1:
|
||||
renamed_chunk["available"] = True
|
||||
res["chunks"].append(renamed_chunk)
|
||||
res["chunks"].append(d)
|
||||
_ = Chunk(**d) # validate the chunk
|
||||
return get_result(data=res)
|
||||
|
||||
|
||||
@ -1031,6 +1045,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
if key in key_mapping:
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_chunk[new_key] = value
|
||||
_ = Chunk(**renamed_chunk) # validate the chunk
|
||||
return get_result(data={"chunk": renamed_chunk})
|
||||
# return get_result(data={"chunk_id": chunk_id})
|
||||
|
||||
@ -1291,15 +1306,15 @@ def retrieval_test(tenant_id):
|
||||
kb_ids = req["dataset_ids"]
|
||||
if not isinstance(kb_ids, list):
|
||||
return get_error_data_result("`dataset_ids` should be a list")
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
for id in kb_ids:
|
||||
if not KnowledgebaseService.accessible(kb_id=id, user_id=tenant_id):
|
||||
return get_error_data_result(f"You don't own the dataset {id}.")
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embd_nms = list(set([TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs])) # remove vendor suffix for comparison
|
||||
if len(embd_nms) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
code=settings.RetCode.DATA_ERROR,
|
||||
)
|
||||
if "question" not in req:
|
||||
return get_error_data_result("`question` is required.")
|
||||
@ -1307,6 +1322,7 @@ def retrieval_test(tenant_id):
|
||||
size = int(req.get("page_size", 30))
|
||||
question = req["question"]
|
||||
doc_ids = req.get("document_ids", [])
|
||||
use_kg = req.get("use_kg", False)
|
||||
if not isinstance(doc_ids, list):
|
||||
return get_error_data_result("`documents` should be a list")
|
||||
doc_ids_list = KnowledgebaseService.list_documents_by_ids(kb_ids)
|
||||
@ -1326,22 +1342,17 @@ def retrieval_test(tenant_id):
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||
if not e:
|
||||
return get_error_data_result(message="Dataset not found!")
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id
|
||||
)
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
|
||||
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"]
|
||||
)
|
||||
rerank_mdl = LLMBundle(kb.tenant_id, LLMType.RERANK, llm_name=req["rerank_id"])
|
||||
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
|
||||
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
|
||||
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||
ranks = retr.retrieval(
|
||||
ranks = settings.retrievaler.retrieval(
|
||||
question,
|
||||
embd_mdl,
|
||||
kb.tenant_id,
|
||||
@ -1354,7 +1365,17 @@ def retrieval_test(tenant_id):
|
||||
doc_ids,
|
||||
rerank_mdl=rerank_mdl,
|
||||
highlight=highlight,
|
||||
rank_feature=label_question(question, kbs)
|
||||
)
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
[k.tenant_id for k in kbs],
|
||||
kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
|
||||
@ -1368,6 +1389,7 @@ def retrieval_test(tenant_id):
|
||||
"important_kwd": "important_keywords",
|
||||
"question_kwd": "questions",
|
||||
"docnm_kwd": "document_keyword",
|
||||
"kb_id":"dataset_id"
|
||||
}
|
||||
rename_chunk = {}
|
||||
for key, value in chunk.items():
|
||||
|
||||
@ -15,13 +15,13 @@
|
||||
#
|
||||
import re
|
||||
import json
|
||||
from api.db import LLMType
|
||||
from flask import request, Response
|
||||
import time
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import ConversationService, iframe_completion
|
||||
from api.db.services.conversation_service import completion as rag_completion
|
||||
from api.db.services.canvas_service import completion as agent_completion
|
||||
from api.db.services.dialog_service import ask
|
||||
from api.db.services.dialog_service import ask, chat
|
||||
from agent.canvas import Canvas
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import APIToken
|
||||
@ -30,10 +30,12 @@ from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result
|
||||
from api.utils.api_utils import get_error_data_result, validate_request
|
||||
from api.utils.api_utils import get_result, token_required
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.file_service import FileService
|
||||
|
||||
from flask import jsonify, request, Response
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
@ -47,7 +49,8 @@ def create(tenant_id, chat_id):
|
||||
"id": get_uuid(),
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}]
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}],
|
||||
"user_id": req.get("user_id", "")
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
@ -65,23 +68,66 @@ def create(tenant_id, chat_id):
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create_agent_session(tenant_id, agent_id):
|
||||
req = request.json
|
||||
if not request.is_json:
|
||||
req = request.form
|
||||
files = request.files
|
||||
user_id = request.args.get('user_id', '')
|
||||
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_error_data_result("You cannot access the agent.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
if canvas.get_preset_param():
|
||||
return get_error_data_result("The agent can't create a session directly")
|
||||
canvas.reset()
|
||||
query = canvas.get_preset_param()
|
||||
if query:
|
||||
for ele in query:
|
||||
if not ele["optional"]:
|
||||
if ele["type"] == "file":
|
||||
if files is None or not files.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||
upload_file = files.get(ele["key"])
|
||||
file_content = FileService.parse_docs([upload_file], user_id)
|
||||
file_name = upload_file.filename
|
||||
ele["value"] = file_name + "\n" + file_content
|
||||
else:
|
||||
if req is None or not req.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||
ele["value"] = req[ele["key"]]
|
||||
else:
|
||||
if ele["type"] == "file":
|
||||
if files is not None and files.get(ele["key"]):
|
||||
upload_file = files.get(ele["key"])
|
||||
file_content = FileService.parse_docs([upload_file], user_id)
|
||||
file_name = upload_file.filename
|
||||
ele["value"] = file_name + "\n" + file_content
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
else:
|
||||
if req is not None and req.get(ele["key"]):
|
||||
ele["value"] = req[ele['key']]
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
else:
|
||||
for ans in canvas.run(stream=False):
|
||||
pass
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": tenant_id,
|
||||
"user_id": user_id,
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent",
|
||||
"dsl": json.loads(cvs.dsl)
|
||||
"dsl": cvs.dsl
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv["agent_id"] = conv.pop("dialog_id")
|
||||
@ -114,12 +160,14 @@ def update(tenant_id, chat_id, session_id):
|
||||
@token_required
|
||||
def chat_completion(tenant_id, chat_id):
|
||||
req = request.json
|
||||
if not req or not req.get("session_id"):
|
||||
req = {"question":""}
|
||||
if not DialogService.query(tenant_id=tenant_id,id=chat_id,status=StatusEnum.VALID.value):
|
||||
if not req:
|
||||
req = {"question": ""}
|
||||
if not req.get("session_id"):
|
||||
req["question"]=""
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
if req.get("session_id"):
|
||||
if not ConversationService.query(id=req["session_id"],dialog_id=chat_id):
|
||||
if not ConversationService.query(id=req["session_id"], dialog_id=chat_id):
|
||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||
if req.get("stream", True):
|
||||
resp = Response(rag_completion(tenant_id, chat_id, **req), mimetype="text/event-stream")
|
||||
@ -137,31 +185,212 @@ def chat_completion(tenant_id, chat_id):
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('chats_openai/<chat_id>/chat/completions', methods=['POST']) # noqa: F821
|
||||
@validate_request("model", "messages") # noqa: F821
|
||||
@token_required
|
||||
def chat_completion_openai_like(tenant_id, chat_id):
|
||||
"""
|
||||
OpenAI-like chat completion API that simulates the behavior of OpenAI's completions endpoint.
|
||||
|
||||
This function allows users to interact with a model and receive responses based on a series of historical messages.
|
||||
If `stream` is set to True (by default), the response will be streamed in chunks, mimicking the OpenAI-style API.
|
||||
Set `stream` to False explicitly, the response will be returned in a single complete answer.
|
||||
Example usage:
|
||||
|
||||
curl -X POST https://ragflow_address.com/api/v1/chats_openai/<chat_id>/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $RAGFLOW_API_KEY" \
|
||||
-d '{
|
||||
"model": "model",
|
||||
"messages": [{"role": "user", "content": "Say this is a test!"}],
|
||||
"stream": true
|
||||
}'
|
||||
|
||||
Alternatively, you can use Python's `OpenAI` client:
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
model = "model"
|
||||
client = OpenAI(api_key="ragflow-api-key", base_url=f"http://ragflow_address/api/v1/chats_openai/<chat_id>")
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who are you?"},
|
||||
{"role": "assistant", "content": "I am an AI assistant named..."},
|
||||
{"role": "user", "content": "Can you tell me how to install neovim"},
|
||||
],
|
||||
stream=True
|
||||
)
|
||||
|
||||
stream = True
|
||||
if stream:
|
||||
for chunk in completion:
|
||||
print(chunk)
|
||||
else:
|
||||
print(completion.choices[0].message.content)
|
||||
"""
|
||||
req = request.json
|
||||
|
||||
messages = req.get("messages", [])
|
||||
# To prevent empty [] input
|
||||
if len(messages) < 1:
|
||||
return get_error_data_result("You have to provide messages.")
|
||||
if messages[-1]["role"] != "user":
|
||||
return get_error_data_result("The last content of this conversation is not from user.")
|
||||
|
||||
prompt = messages[-1]["content"]
|
||||
# Treat context tokens as reasoning tokens
|
||||
context_token_used = sum(len(message["content"]) for message in messages)
|
||||
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value)
|
||||
if not dia:
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
dia = dia[0]
|
||||
|
||||
# Filter system and non-sense assistant messages
|
||||
msg = None
|
||||
msg = [m for m in messages if m["role"] != "system" and (m["role"] != "assistant" or msg)]
|
||||
|
||||
if req.get("stream", True):
|
||||
# The value for the usage field on all chunks except for the last one will be null.
|
||||
# The usage field on the last chunk contains token usage statistics for the entire request.
|
||||
# The choices field on the last chunk will always be an empty array [].
|
||||
def streamed_response_generator(chat_id, dia, msg):
|
||||
token_used = 0
|
||||
response = {
|
||||
"id": f"chatcmpl-{chat_id}",
|
||||
"choices": [
|
||||
{
|
||||
"delta": {
|
||||
"content": "",
|
||||
"role": "assistant",
|
||||
"function_call": None,
|
||||
"tool_calls": None
|
||||
},
|
||||
"finish_reason": None,
|
||||
"index": 0,
|
||||
"logprobs": None
|
||||
}
|
||||
],
|
||||
"created": int(time.time()),
|
||||
"model": "model",
|
||||
"object": "chat.completion.chunk",
|
||||
"system_fingerprint": "",
|
||||
"usage": None
|
||||
}
|
||||
|
||||
try:
|
||||
for ans in chat(dia, msg, True):
|
||||
answer = ans["answer"]
|
||||
incremental = answer[token_used:]
|
||||
token_used += len(incremental)
|
||||
response["choices"][0]["delta"]["content"] = incremental
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
except Exception as e:
|
||||
response["choices"][0]["delta"]["content"] = "**ERROR**: " + str(e)
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
|
||||
# The last chunk
|
||||
response["choices"][0]["delta"]["content"] = None
|
||||
response["choices"][0]["finish_reason"] = "stop"
|
||||
response["usage"] = {
|
||||
"prompt_tokens": len(prompt),
|
||||
"completion_tokens": token_used,
|
||||
"total_tokens": len(prompt) + token_used
|
||||
}
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
yield "data:[DONE]\n\n"
|
||||
|
||||
|
||||
resp = Response(streamed_response_generator(chat_id, dia, msg), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False):
|
||||
# focus answer content only
|
||||
answer = ans
|
||||
break
|
||||
content = answer["answer"]
|
||||
|
||||
response = {
|
||||
"id": f"chatcmpl-{chat_id}",
|
||||
"object": "chat.completion",
|
||||
"created": int(time.time()),
|
||||
"model": req.get("model", ""),
|
||||
"usage": {
|
||||
"prompt_tokens": len(prompt),
|
||||
"completion_tokens": len(content),
|
||||
"total_tokens": len(prompt) + len(content),
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": context_token_used,
|
||||
"accepted_prediction_tokens": len(content),
|
||||
"rejected_prediction_tokens": 0 # 0 for simplicity
|
||||
}
|
||||
},
|
||||
"choices": [
|
||||
{
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": content
|
||||
},
|
||||
"logprobs": None,
|
||||
"finish_reason": "stop",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def agent_completions(tenant_id, agent_id):
|
||||
req = request.json
|
||||
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||
if not cvs:
|
||||
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||
if req.get("session_id"):
|
||||
conv = API4ConversationService.query(id=req["session_id"], dialog_id=agent_id)
|
||||
if not conv:
|
||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||
else:
|
||||
req["question"]=""
|
||||
if req.get("stream", True):
|
||||
resp = Response(agent_completion(tenant_id, agent_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
try:
|
||||
for answer in agent_completion(tenant_id, agent_id, **req):
|
||||
return get_result(data=answer)
|
||||
except Exception as e:
|
||||
return get_error_data_result(str(e))
|
||||
req = request.json
|
||||
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||
if not cvs:
|
||||
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||
if req.get("session_id"):
|
||||
dsl = cvs[0].dsl
|
||||
if not isinstance(dsl, str):
|
||||
dsl = json.dumps(dsl)
|
||||
#canvas = Canvas(dsl, tenant_id)
|
||||
#if canvas.get_preset_param():
|
||||
# req["question"] = ""
|
||||
conv = API4ConversationService.query(id=req["session_id"], dialog_id=agent_id)
|
||||
if not conv:
|
||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||
# If an update to UserCanvas is detected, update the API4Conversation.dsl
|
||||
sync_dsl = req.get("sync_dsl", False)
|
||||
if sync_dsl is True and cvs[0].update_time > conv[0].update_time:
|
||||
current_dsl = conv[0].dsl
|
||||
new_dsl = json.loads(dsl)
|
||||
state_fields = ["history", "messages", "path", "reference"]
|
||||
states = {field: current_dsl.get(field, []) for field in state_fields}
|
||||
current_dsl.update(new_dsl)
|
||||
current_dsl.update(states)
|
||||
API4ConversationService.update_by_id(req["session_id"], {
|
||||
"dsl": current_dsl
|
||||
})
|
||||
else:
|
||||
req["question"] = ""
|
||||
if req.get("stream", True):
|
||||
resp = Response(agent_completion(tenant_id, agent_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
try:
|
||||
for answer in agent_completion(tenant_id, agent_id, **req):
|
||||
return get_result(data=answer)
|
||||
except Exception as e:
|
||||
return get_error_data_result(str(e))
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET']) # noqa: F821
|
||||
@ -174,11 +403,12 @@ def list_session(tenant_id, chat_id):
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
user_id = request.args.get("user_id")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
convs = ConversationService.get_list(chat_id, page_number, items_per_page, orderby, desc, id, name)
|
||||
convs = ConversationService.get_list(chat_id, page_number, items_per_page, orderby, desc, id, name, user_id)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
@ -199,17 +429,15 @@ def list_session(tenant_id, chat_id):
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk.get("image_id", ""),
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
"id": chunk.get("chunk_id", chunk.get("id")),
|
||||
"content": chunk.get("content_with_weight", chunk.get("content")),
|
||||
"document_id": chunk.get("doc_id", chunk.get("document_id")),
|
||||
"document_name": chunk.get("docnm_kwd", chunk.get("document_name")),
|
||||
"dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
|
||||
"image_id": chunk.get("image_id", chunk.get("img_id")),
|
||||
"positions": chunk.get("positions", chunk.get("position_int")),
|
||||
}
|
||||
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
@ -224,8 +452,7 @@ def list_agent_session(tenant_id, agent_id):
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_error_data_result(message=f"You don't own the agent {agent_id}.")
|
||||
id = request.args.get("id")
|
||||
if not API4ConversationService.query(id=id, user_id=tenant_id):
|
||||
return get_error_data_result(f"You don't own the session {id}")
|
||||
user_id = request.args.get("user_id")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "update_time")
|
||||
@ -233,7 +460,10 @@ def list_agent_session(tenant_id, agent_id):
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id)
|
||||
# dsl defaults to True in all cases except for False and false
|
||||
include_dsl = request.args.get("dsl") != "False" and request.args.get("dsl") != "false"
|
||||
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id,
|
||||
user_id, include_dsl)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
@ -254,16 +484,13 @@ def list_agent_session(tenant_id, agent_id):
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk.get("image_id", ""),
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
"id": chunk.get("chunk_id", chunk.get("id")),
|
||||
"content": chunk.get("content_with_weight", chunk.get("content")),
|
||||
"document_id": chunk.get("doc_id", chunk.get("document_id")),
|
||||
"document_name": chunk.get("docnm_kwd", chunk.get("document_name")),
|
||||
"dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
|
||||
"image_id": chunk.get("image_id", chunk.get("img_id")),
|
||||
"positions": chunk.get("positions", chunk.get("position_int")),
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
@ -299,6 +526,38 @@ def delete(tenant_id, chat_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete_agent_session(tenant_id, agent_id):
|
||||
req = request.json
|
||||
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||
if not cvs:
|
||||
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||
|
||||
convs = API4ConversationService.query(dialog_id=agent_id)
|
||||
if not convs:
|
||||
return get_error_data_result(f"Agent {agent_id} has no sessions")
|
||||
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids = req.get("ids")
|
||||
|
||||
if not ids:
|
||||
conv_list = []
|
||||
for conv in convs:
|
||||
conv_list.append(conv.id)
|
||||
else:
|
||||
conv_list = ids
|
||||
|
||||
for session_id in conv_list:
|
||||
conv = API4ConversationService.query(id=session_id, dialog_id=agent_id)
|
||||
if not conv:
|
||||
return get_error_data_result(f"The agent doesn't own the session ${session_id}")
|
||||
API4ConversationService.delete_by_id(session_id)
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/sessions/ask', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def ask_about(tenant_id):
|
||||
@ -387,7 +646,7 @@ def chatbot_completions(dialog_id):
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_error_data_result(message='Token is not valid!"')
|
||||
return get_error_data_result(message='Authentication error: API key is invalid!"')
|
||||
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
@ -414,7 +673,7 @@ def agent_bot_completions(agent_id):
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_error_data_result(message='Token is not valid!"')
|
||||
return get_error_data_result(message='Authentication error: API key is invalid!"')
|
||||
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
@ -429,5 +688,3 @@ def agent_bot_completions(agent_id):
|
||||
|
||||
for answer in agent_completion(objs[0].tenant_id, agent_id, **req):
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
|
||||
@ -201,7 +201,7 @@ def new_token():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
obj = {
|
||||
"tenant_id": tenant_id,
|
||||
"token": generate_confirmation_token(tenant_id),
|
||||
@ -256,7 +256,7 @@ def token_list():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
objs = APITokenService.query(tenant_id=tenant_id)
|
||||
objs = [o.to_dict() for o in objs]
|
||||
for o in objs:
|
||||
|
||||
@ -566,7 +566,7 @@ def user_add():
|
||||
email_address = req["email"]
|
||||
|
||||
# Validate the email address
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,5}$", email_address):
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", email_address):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message=f"Invalid email address: {email_address}!",
|
||||
|
||||
@ -89,6 +89,7 @@ class ParserType(StrEnum):
|
||||
AUDIO = "audio"
|
||||
EMAIL = "email"
|
||||
KG = "knowledge_graph"
|
||||
TAG = "tag"
|
||||
|
||||
|
||||
class FileSource(StrEnum):
|
||||
|
||||
@ -31,7 +31,6 @@ from peewee import (
|
||||
)
|
||||
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase
|
||||
|
||||
|
||||
from api.db import SerializedType, ParserType
|
||||
from api import settings
|
||||
from api import utils
|
||||
@ -761,6 +760,7 @@ class Document(DataBaseModel):
|
||||
default="")
|
||||
process_begin_at = DateTimeField(null=True, index=True)
|
||||
process_duation = FloatField(default=0)
|
||||
meta_fields = JSONField(null=True, default={})
|
||||
|
||||
run = CharField(
|
||||
max_length=1,
|
||||
@ -843,8 +843,8 @@ class Task(DataBaseModel):
|
||||
id = CharField(max_length=32, primary_key=True)
|
||||
doc_id = CharField(max_length=32, null=False, index=True)
|
||||
from_page = IntegerField(default=0)
|
||||
|
||||
to_page = IntegerField(default=100000000)
|
||||
task_type = CharField(max_length=32, null=False, default="")
|
||||
|
||||
begin_at = DateTimeField(null=True, index=True)
|
||||
process_duation = FloatField(default=0)
|
||||
@ -926,6 +926,7 @@ class Conversation(DataBaseModel):
|
||||
name = CharField(max_length=255, null=True, help_text="converastion name", index=True)
|
||||
message = JSONField(null=True)
|
||||
reference = JSONField(null=True, default=[])
|
||||
user_id = CharField(max_length=255, null=True, help_text="user_id", index=True)
|
||||
|
||||
class Meta:
|
||||
db_table = "conversation"
|
||||
@ -934,7 +935,7 @@ class Conversation(DataBaseModel):
|
||||
class APIToken(DataBaseModel):
|
||||
tenant_id = CharField(max_length=32, null=False, index=True)
|
||||
token = CharField(max_length=255, null=False, index=True)
|
||||
dialog_id = CharField(max_length=32, null=False, index=True)
|
||||
dialog_id = CharField(max_length=32, null=True, index=True)
|
||||
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
||||
beta = CharField(max_length=255, null=True, index=True)
|
||||
|
||||
@ -1050,11 +1051,6 @@ def migrate_db():
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
DB.execute_sql('ALTER TABLE llm DROP PRIMARY KEY;')
|
||||
DB.execute_sql('ALTER TABLE llm ADD PRIMARY KEY (llm_name,fid);')
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('task', 'retry_count', IntegerField(default=0))
|
||||
@ -1070,13 +1066,13 @@ def migrate_db():
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("tenant_llm","max_tokens",IntegerField(default=8192,index=True))
|
||||
migrator.add_column("tenant_llm", "max_tokens", IntegerField(default=8192, index=True))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("api_4_conversation","dsl",JSONField(null=True, default={}))
|
||||
migrator.add_column("api_4_conversation", "dsl", JSONField(null=True, default={}))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
@ -1105,3 +1101,24 @@ def migrate_db():
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("conversation", "user_id",
|
||||
CharField(max_length=255, null=True, help_text="user_id", index=True))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("document", "meta_fields",
|
||||
JSONField(null=True, default={}))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("task", "task_type",
|
||||
CharField(max_length=32, null=False, default=""))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@ -133,7 +133,7 @@ def init_llm_factory():
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "cohere"], {"llm_factory": "Cohere"})
|
||||
TenantService.filter_update([1 == 1], {
|
||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
|
||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag"})
|
||||
## insert openai two embedding models to the current openai user.
|
||||
# print("Start to insert 2 OpenAI embedding models...")
|
||||
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
||||
@ -153,21 +153,14 @@ def init_llm_factory():
|
||||
break
|
||||
for kb_id in KnowledgebaseService.get_all_ids():
|
||||
KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
|
||||
"""
|
||||
drop table llm;
|
||||
drop table llm_factories;
|
||||
update tenant set parser_ids='naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph';
|
||||
alter table knowledgebase modify avatar longtext;
|
||||
alter table user modify avatar longtext;
|
||||
alter table dialog modify icon longtext;
|
||||
"""
|
||||
|
||||
|
||||
|
||||
def add_graph_templates():
|
||||
dir = os.path.join(get_project_base_directory(), "agent", "templates")
|
||||
for fnm in os.listdir(dir):
|
||||
try:
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r",encoding="utf-8"))
|
||||
try:
|
||||
CanvasTemplateService.save(**cnvs)
|
||||
except Exception:
|
||||
|
||||
@ -41,16 +41,22 @@ class API4ConversationService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls,dialog_id, tenant_id,
|
||||
page_number, items_per_page, orderby, desc, id):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
||||
def get_list(cls, dialog_id, tenant_id,
|
||||
page_number, items_per_page,
|
||||
orderby, desc, id, user_id=None, include_dsl=True):
|
||||
if include_dsl:
|
||||
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
||||
else:
|
||||
fields = [field for field in cls.model._meta.fields.values() if field.name != 'dsl']
|
||||
sessions = cls.model.select(*fields).where(cls.model.dialog_id == dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if user_id:
|
||||
sessions = sessions.where(cls.model.user_id == user_id)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
sessions = sessions.where(cls.model.user_id == tenant_id)
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return list(sessions.dicts())
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import time
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
from agent.canvas import Canvas
|
||||
@ -74,21 +75,32 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
temp_dsl = cvs.dsl
|
||||
UserCanvasService.update_by_id(agent_id, cvs.to_dict())
|
||||
else:
|
||||
temp_dsl = json.loads(cvs.dsl)
|
||||
session_id = get_uuid()
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
session_id=get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": kwargs.get("user_id", ""),
|
||||
"user_id": kwargs.get("user_id", "") if isinstance(kwargs, dict) else "",
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue(), "created_at": time.time()}],
|
||||
"source": "agent",
|
||||
"dsl": temp_dsl
|
||||
"dsl": cvs.dsl
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv = API4Conversation(**conv)
|
||||
if query:
|
||||
yield "data:" + json.dumps({"code": 0,
|
||||
"message": "",
|
||||
"data": {
|
||||
"session_id": session_id,
|
||||
"answer": canvas.get_prologue(),
|
||||
"reference": [],
|
||||
"param": canvas.get_preset_param()
|
||||
}
|
||||
},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
else:
|
||||
conv = API4Conversation(**conv)
|
||||
else:
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
assert e, "Session not found!"
|
||||
@ -123,7 +135,7 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "created_at": time.time(), "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import time
|
||||
from uuid import uuid4
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import Conversation, DB
|
||||
@ -22,18 +23,22 @@ from api.db.services.dialog_service import DialogService, chat
|
||||
from api.utils import get_uuid
|
||||
import json
|
||||
|
||||
from rag.prompts import chunks_format
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
||||
def get_list(cls, dialog_id, page_number, items_per_page, orderby, desc, id, name, user_id=None):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if name:
|
||||
sessions = sessions.where(cls.model.name == name)
|
||||
if user_id:
|
||||
sessions = sessions.where(cls.model.user_id == user_id)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
@ -50,17 +55,7 @@ def structure_answer(conv, ans, message_id, session_id):
|
||||
reference = {}
|
||||
ans["reference"] = {}
|
||||
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
chunk_list = [{
|
||||
"id": get_value(chunk, "chunk_id", "id"),
|
||||
"content": get_value(chunk, "content", "content_with_weight"),
|
||||
"document_id": get_value(chunk, "doc_id", "document_id"),
|
||||
"document_name": get_value(chunk, "docnm_kwd", "document_name"),
|
||||
"dataset_id": get_value(chunk, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(chunk, "image_id", "img_id"),
|
||||
"positions": get_value(chunk, "positions", "position_int"),
|
||||
} for chunk in reference.get("chunks", [])]
|
||||
chunk_list = chunks_format(reference)
|
||||
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
@ -72,9 +67,9 @@ def structure_answer(conv, ans, message_id, session_id):
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
if not conv.message or conv.message[-1].get("role", "") != "assistant":
|
||||
conv.message.append({"role": "assistant", "content": ans["answer"], "id": message_id})
|
||||
conv.message.append({"role": "assistant", "content": ans["answer"], "created_at": time.time(), "id": message_id})
|
||||
else:
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "created_at": time.time(), "id": message_id}
|
||||
if conv.reference:
|
||||
conv.reference[-1] = reference
|
||||
return ans
|
||||
@ -88,10 +83,11 @@ def completion(tenant_id, chat_id, question, name="New session", session_id=None
|
||||
if not session_id:
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id":session_id ,
|
||||
"id": session_id,
|
||||
"dialog_id": chat_id,
|
||||
"name": name,
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}]
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue"), "created_at": time.time()}],
|
||||
"user_id": kwargs.get("user_id", "")
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
@ -162,7 +158,7 @@ def iframe_completion(dialog_id, question, session_id=None, stream=True, **kwarg
|
||||
"id": session_id,
|
||||
"dialog_id": dialog_id,
|
||||
"user_id": kwargs.get("user_id", ""),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"], "created_at": time.time()}]
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
@ -226,4 +222,3 @@ def iframe_completion(dialog_id, question, session_id=None, stream=True, **kwarg
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
break
|
||||
yield answer
|
||||
|
||||
|
||||
@ -15,24 +15,24 @@
|
||||
#
|
||||
import logging
|
||||
import binascii
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
from functools import partial
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from copy import deepcopy
|
||||
from timeit import default_timer as timer
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
from api.db import LLMType, ParserType,StatusEnum
|
||||
from agentic_reasoning import DeepResearcher
|
||||
from api.db import LLMType, ParserType, StatusEnum
|
||||
from api.db.db_models import Dialog, DB
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
from api.db.services.llm_service import TenantLLMService, LLMBundle
|
||||
from api import settings
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp.search import index_name
|
||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.prompts import kb_prompt, message_fit_in, llm_id2llm_type, keyword_extraction, full_question, chunks_format
|
||||
from rag.utils import rmSpace, num_tokens_from_string
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DialogService(CommonService):
|
||||
@ -41,14 +41,14 @@ class DialogService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, tenant_id,
|
||||
page_number, items_per_page, orderby, desc, id , name):
|
||||
page_number, items_per_page, orderby, desc, id, name):
|
||||
chats = cls.model.select()
|
||||
if id:
|
||||
chats = chats.where(cls.model.id == id)
|
||||
if name:
|
||||
chats = chats.where(cls.model.name == name)
|
||||
chats = chats.where(
|
||||
(cls.model.tenant_id == tenant_id)
|
||||
(cls.model.tenant_id == tenant_id)
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if desc:
|
||||
@ -61,119 +61,84 @@ class DialogService(CommonService):
|
||||
return list(chats.dicts())
|
||||
|
||||
|
||||
def message_fit_in(msg, max_length=4000):
|
||||
def count():
|
||||
nonlocal msg
|
||||
tks_cnts = []
|
||||
for m in msg:
|
||||
tks_cnts.append(
|
||||
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
||||
total = 0
|
||||
for m in tks_cnts:
|
||||
total += m["count"]
|
||||
return total
|
||||
def chat_solo(dialog, messages, stream=True):
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||
if len(msg) > 1:
|
||||
msg_.append(msg[-1])
|
||||
msg = msg_
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
ll = num_tokens_from_string(msg_[0]["content"])
|
||||
ll2 = num_tokens_from_string(msg_[-1]["content"])
|
||||
if ll / (ll + ll2) > 0.8:
|
||||
m = msg_[0]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||
msg[0]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
m = msg_[1]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||
msg[1]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
|
||||
def llm_id2llm_type(llm_id):
|
||||
llm_id, _ = TenantLLMService.split_model_name_and_factory(llm_id)
|
||||
fnm = os.path.join(get_project_base_directory(), "conf")
|
||||
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
||||
for llm_factory in llm_factories["factory_llm_infos"]:
|
||||
for llm in llm_factory["llm"]:
|
||||
if llm_id == llm["llm_name"]:
|
||||
return llm["model_type"].strip(",")[-1]
|
||||
|
||||
|
||||
def kb_prompt(kbinfos, max_tokens):
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
used_token_count = 0
|
||||
chunks_num = 0
|
||||
for i, c in enumerate(knowledges):
|
||||
used_token_count += num_tokens_from_string(c)
|
||||
chunks_num += 1
|
||||
if max_tokens * 0.97 < used_token_count:
|
||||
knowledges = knowledges[:i]
|
||||
break
|
||||
|
||||
doc2chunks = defaultdict(list)
|
||||
for i, ck in enumerate(kbinfos["chunks"]):
|
||||
if i >= chunks_num:
|
||||
break
|
||||
doc2chunks["docnm_kwd"].append(ck["content_with_weight"])
|
||||
|
||||
knowledges = []
|
||||
for nm, chunks in doc2chunks.items():
|
||||
txt = f"Document: {nm} \nContains the following relevant fragments:\n"
|
||||
for i, chunk in enumerate(chunks, 1):
|
||||
txt += f"{i}. {chunk}\n"
|
||||
knowledges.append(txt)
|
||||
return knowledges
|
||||
prompt_config = dialog.prompt_config
|
||||
tts_mdl = None
|
||||
if prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
|
||||
for m in messages if m["role"] != "system"]
|
||||
if stream:
|
||||
last_ans = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
st = timer()
|
||||
llm_id, fid = TenantLLMService.split_model_name_and_factory(dialog.llm_id)
|
||||
llm = LLMService.query(llm_name=llm_id) if not fid else LLMService.query(llm_name=llm_id, fid=fid)
|
||||
if not llm:
|
||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not fid else \
|
||||
TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id, llm_factory=fid)
|
||||
if not llm:
|
||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
||||
max_tokens = 8192
|
||||
if not dialog.kb_ids:
|
||||
for ans in chat_solo(dialog, messages, stream):
|
||||
yield ans
|
||||
return
|
||||
|
||||
chat_start_ts = timer()
|
||||
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
max_tokens = llm[0].max_tokens
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
max_tokens = llm_model_config.get("max_tokens", 8192)
|
||||
|
||||
check_llm_ts = timer()
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_nms) != 1:
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embedding_list) != 1:
|
||||
yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retr = settings.retrievaler if not is_kg else settings.kg_retrievaler
|
||||
embedding_model_name = embedding_list[0]
|
||||
|
||||
retriever = settings.retrievaler
|
||||
|
||||
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||
if "doc_ids" in messages[-1]:
|
||||
attachments = messages[-1]["doc_ids"]
|
||||
for m in messages[:-1]:
|
||||
if "doc_ids" in m:
|
||||
attachments.extend(m["doc_ids"])
|
||||
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
create_retriever_ts = timer()
|
||||
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_model_name)
|
||||
if not embd_mdl:
|
||||
raise LookupError("Embedding model(%s) not found" % embd_nms[0])
|
||||
raise LookupError("Embedding model(%s) not found" % embedding_model_name)
|
||||
|
||||
bind_embedding_ts = timer()
|
||||
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
bind_llm_ts = timer()
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
|
||||
tts_mdl = None
|
||||
@ -200,39 +165,73 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
||||
else:
|
||||
questions = questions[-1:]
|
||||
refineQ_tm = timer()
|
||||
keyword_tm = timer()
|
||||
|
||||
refine_question_ts = timer()
|
||||
|
||||
rerank_mdl = None
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
|
||||
for _ in range(len(questions) // 2):
|
||||
questions.append(questions[-1])
|
||||
bind_reranker_ts = timer()
|
||||
generate_keyword_ts = bind_reranker_ts
|
||||
thought = ""
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
knowledges = []
|
||||
else:
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
keyword_tm = timer()
|
||||
generate_keyword_ts = timer()
|
||||
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
kbinfos = retr.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
|
||||
knowledges = []
|
||||
if prompt_config.get("reasoning", False):
|
||||
reasoner = DeepResearcher(chat_mdl,
|
||||
prompt_config,
|
||||
partial(retriever.retrieval, embd_mdl=embd_mdl, tenant_ids=tenant_ids, kb_ids=dialog.kb_ids, page=1, page_size=dialog.top_n, similarity_threshold=0.2, vector_similarity_weight=0.3))
|
||||
|
||||
for think in reasoner.thinking(kbinfos, " ".join(questions)):
|
||||
if isinstance(think, str):
|
||||
thought = think
|
||||
knowledges = [t for t in think.split("\n") if t]
|
||||
elif stream:
|
||||
yield think
|
||||
else:
|
||||
kbinfos = retriever.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(" ".join(questions), kbs)
|
||||
)
|
||||
if prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(" ".join(questions))
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if prompt_config.get("use_kg"):
|
||||
ck = settings.kg_retrievaler.retrieval(" ".join(questions),
|
||||
tenant_ids,
|
||||
dialog.kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(dialog.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
|
||||
logging.debug(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
retrieval_tm = timer()
|
||||
|
||||
retrieval_ts = timer()
|
||||
if not knowledges and prompt_config.get("empty_response"):
|
||||
empty_res = prompt_config["empty_response"]
|
||||
yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
|
||||
yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions), "audio_binary": tts(tts_mdl, empty_res)}
|
||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
|
||||
kwargs["knowledge"] = "\n\n------\n\n".join(knowledges)
|
||||
kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
|
||||
gen_conf = dialog.llm_setting
|
||||
|
||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||
@ -241,7 +240,6 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||
prompt = msg[0]["content"]
|
||||
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_tokens"] = min(
|
||||
@ -249,17 +247,23 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
max_tokens - used_token_count)
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_tm
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions
|
||||
|
||||
refs = []
|
||||
ans = answer.split("</think>")
|
||||
think = ""
|
||||
if len(ans) == 2:
|
||||
think = ans[0] + "</think>"
|
||||
answer = ans[1]
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer, idx = retr.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
answer, idx = retriever.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
@ -274,45 +278,58 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||
done_tm = timer()
|
||||
prompt += "\n\n### Elapsed\n - Refine Question: %.1f ms\n - Keywords: %.1f ms\n - Retrieval: %.1f ms\n - LLM: %.1f ms" % (
|
||||
(refineQ_tm - st) * 1000, (keyword_tm - refineQ_tm) * 1000, (retrieval_tm - keyword_tm) * 1000,
|
||||
(done_tm - retrieval_tm) * 1000)
|
||||
return {"answer": answer, "reference": refs, "prompt": prompt}
|
||||
finish_chat_ts = timer()
|
||||
|
||||
total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
|
||||
check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
|
||||
create_retriever_time_cost = (create_retriever_ts - check_llm_ts) * 1000
|
||||
bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
|
||||
bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
|
||||
refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
|
||||
bind_reranker_time_cost = (bind_reranker_ts - refine_question_ts) * 1000
|
||||
generate_keyword_time_cost = (generate_keyword_ts - bind_reranker_ts) * 1000
|
||||
retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
|
||||
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
|
||||
|
||||
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||
prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms"
|
||||
return {"answer": think+answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
|
||||
|
||||
if stream:
|
||||
last_ans = ""
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
|
||||
if thought:
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield {"answer": thought+answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
delta_ans = answer[len(last_ans):]
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(answer)
|
||||
yield {"answer": thought+answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(thought+answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
||||
logging.debug("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
res = decorate_answer(answer)
|
||||
res["audio_binary"] = tts(tts_mdl, answer)
|
||||
yield res
|
||||
|
||||
|
||||
def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
|
||||
user_promt = """
|
||||
表名:{};
|
||||
数据库表字段说明如下:
|
||||
sys_prompt = "You are a Database Administrator. You need to check the fields of the following tables based on the user's list of questions and write the SQL corresponding to the last question."
|
||||
user_prompt = """
|
||||
Table name: {};
|
||||
Table of database fields are as follows:
|
||||
{}
|
||||
|
||||
问题如下:
|
||||
Question are as follows:
|
||||
{}
|
||||
请写出SQL, 且只要SQL,不要有其他说明及文字。
|
||||
Please write the SQL, only SQL, without any other explanations or text.
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
@ -321,10 +338,11 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
tried_times = 0
|
||||
|
||||
def get_table():
|
||||
nonlocal sys_prompt, user_promt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
||||
nonlocal sys_prompt, user_prompt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {
|
||||
"temperature": 0.06})
|
||||
logging.debug(f"{question} ==> {user_promt} get SQL: {sql}")
|
||||
sql = re.sub(r"<think>.*</think>", "", sql, flags=re.DOTALL)
|
||||
logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||
sql = re.sub(r" +", " ", sql)
|
||||
@ -352,21 +370,23 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
if tbl is None:
|
||||
return None
|
||||
if tbl.get("error") and tried_times <= 2:
|
||||
user_promt = """
|
||||
表名:{};
|
||||
数据库表字段说明如下:
|
||||
user_prompt = """
|
||||
Table name: {};
|
||||
Table of database fields are as follows:
|
||||
{}
|
||||
|
||||
Question are as follows:
|
||||
{}
|
||||
Please write the SQL, only SQL, without any other explanations or text.
|
||||
|
||||
|
||||
The SQL error you provided last time is as follows:
|
||||
{}
|
||||
|
||||
问题如下:
|
||||
Error issued by database as follows:
|
||||
{}
|
||||
|
||||
你上一次给出的错误SQL如下:
|
||||
{}
|
||||
|
||||
后台报错如下:
|
||||
{}
|
||||
|
||||
请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
|
||||
Please correct the error and write SQL again, only SQL, without any other explanations or text.
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
@ -381,21 +401,21 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
|
||||
docid_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "doc_id"])
|
||||
docnm_idx = set([ii for ii, c in enumerate(
|
||||
doc_name_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||
clmn_idx = [ii for ii in range(
|
||||
len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
|
||||
column_idx = [ii for ii in range(
|
||||
len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
|
||||
|
||||
# compose markdown table
|
||||
clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||
tbl["columns"][i]["name"])) for i in
|
||||
clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
# compose Markdown table
|
||||
columns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||
tbl["columns"][i]["name"])) for i in
|
||||
column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
|
||||
line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
|
||||
line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + \
|
||||
("|------|" if docid_idx and docid_idx else "")
|
||||
|
||||
rows = ["|" +
|
||||
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
||||
"|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") +
|
||||
"|" for r in tbl["rows"]]
|
||||
rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
|
||||
if quota:
|
||||
@ -404,196 +424,30 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||
|
||||
if not docid_idx or not docnm_idx:
|
||||
if not docid_idx or not doc_name_idx:
|
||||
logging.warning("SQL missing field: " + sql)
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"answer": "\n".join([columns, line, rows]),
|
||||
"reference": {"chunks": [], "doc_aggs": []},
|
||||
"prompt": sys_prompt
|
||||
}
|
||||
|
||||
docid_idx = list(docid_idx)[0]
|
||||
docnm_idx = list(docnm_idx)[0]
|
||||
doc_name_idx = list(doc_name_idx)[0]
|
||||
doc_aggs = {}
|
||||
for r in tbl["rows"]:
|
||||
if r[docid_idx] not in doc_aggs:
|
||||
doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
|
||||
doc_aggs[r[docid_idx]] = {"doc_name": r[doc_name_idx], "count": 0}
|
||||
doc_aggs[r[docid_idx]]["count"] += 1
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
|
||||
"answer": "\n".join([columns, line, rows]),
|
||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
|
||||
doc_aggs.items()]},
|
||||
"prompt": sys_prompt
|
||||
}
|
||||
|
||||
|
||||
def relevant(tenant_id, llm_id, question, contents: list):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
if not contents:
|
||||
return False
|
||||
contents = "Documents: \n" + " - ".join(contents)
|
||||
contents = f"Question: {question}\n" + contents
|
||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||
if ans.lower().find("yes") >= 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def rewrite(tenant_id, llm_id, question):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
||||
return ans
|
||||
|
||||
|
||||
def keyword_extraction(chat_mdl, content, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
Task: extract the most important keywords/phrases of a given piece of text content.
|
||||
Requirements:
|
||||
- Summarize the text content, and give top {topn} important keywords/phrases.
|
||||
- The keywords MUST be in language of the given piece of text content.
|
||||
- The keywords are delimited by ENGLISH COMMA.
|
||||
- Keywords ONLY in output.
|
||||
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >=0:
|
||||
return ""
|
||||
return kwd
|
||||
|
||||
|
||||
def question_proposal(chat_mdl, content, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
Task: propose {topn} questions about a given piece of text content.
|
||||
Requirements:
|
||||
- Understand and summarize the text content, and propose top {topn} important questions.
|
||||
- The questions SHOULD NOT have overlapping meanings.
|
||||
- The questions SHOULD cover the main content of the text as much as possible.
|
||||
- The questions MUST be in language of the given piece of text content.
|
||||
- One question per line.
|
||||
- Question ONLY in output.
|
||||
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0:
|
||||
return ""
|
||||
return kwd
|
||||
|
||||
|
||||
def full_question(tenant_id, llm_id, messages):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
conv = []
|
||||
for m in messages:
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
today = datetime.date.today().isoformat()
|
||||
yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
|
||||
tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
|
||||
prompt = f"""
|
||||
Role: A helpful assistant
|
||||
|
||||
Task and steps:
|
||||
1. Generate a full user question that would follow the conversation.
|
||||
2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
|
||||
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
|
||||
# Example 1
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
###############
|
||||
Output: What's the name of Donald Trump's mother?
|
||||
|
||||
------------
|
||||
# Example 2
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
User: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
|
||||
------------
|
||||
# Example 3
|
||||
## Conversation
|
||||
USER: What's the weather today in London?
|
||||
ASSISTANT: Cloudy.
|
||||
USER: What's about tomorrow in Rochester?
|
||||
###############
|
||||
Output: What's the weather in Rochester on {tomorrow}?
|
||||
######################
|
||||
|
||||
# Real Data
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
|
||||
return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
|
||||
|
||||
|
||||
def tts(tts_mdl, text):
|
||||
if not tts_mdl or not text:
|
||||
return
|
||||
@ -605,16 +459,19 @@ def tts(tts_mdl, text):
|
||||
|
||||
def ask(question, kb_ids, tenant_id):
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retr = settings.retrievaler if not is_kg else settings.kg_retrievaler
|
||||
is_knowledge_graph = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
|
||||
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embedding_list[0])
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||
max_tokens = chat_mdl.max_length
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
kbinfos = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
||||
kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids,
|
||||
1, 12, 0.1, 0.3, aggs=False,
|
||||
rank_feature=label_question(question, kbs)
|
||||
)
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
prompt = """
|
||||
Role: You're a smart assistant. Your name is Miss R.
|
||||
@ -636,14 +493,14 @@ def ask(question, kb_ids, tenant_id):
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal knowledges, kbinfos, prompt
|
||||
answer, idx = retr.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
answer, idx = retriever.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
@ -657,6 +514,7 @@ def ask(question, kb_ids, tenant_id):
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
refs["chunks"] = chunks_format(refs)
|
||||
return {"answer": answer, "reference": refs}
|
||||
|
||||
answer = ""
|
||||
@ -664,4 +522,3 @@ def ask(question, kb_ids, tenant_id):
|
||||
answer = ans
|
||||
yield {"answer": answer, "reference": {}}
|
||||
yield decorate_answer(answer)
|
||||
|
||||
|
||||
@ -22,13 +22,13 @@ from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from io import BytesIO
|
||||
import trio
|
||||
|
||||
from peewee import fn
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from api import settings
|
||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
@ -66,8 +66,8 @@ class DocumentService(CommonService):
|
||||
else:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
count = docs.count()
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
return list(docs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@ -96,20 +96,28 @@ class DocumentService(CommonService):
|
||||
def insert(cls, doc):
|
||||
if not cls.save(**doc):
|
||||
raise RuntimeError("Database error (Document)!")
|
||||
e, doc = cls.get_by_id(doc["id"])
|
||||
if not e:
|
||||
raise RuntimeError("Database error (Document retrieval)!")
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
e, kb = KnowledgebaseService.get_by_id(doc["kb_id"])
|
||||
if not KnowledgebaseService.update_by_id(
|
||||
kb.id, {"doc_num": kb.doc_num + 1}):
|
||||
raise RuntimeError("Database error (Knowledgebase)!")
|
||||
return doc
|
||||
return Document(**doc)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def remove_document(cls, doc, tenant_id):
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
cls.clear_chunk_num(doc.id)
|
||||
try:
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "community_report"], "source_id": doc.id},
|
||||
{"remove": {"source_id": doc.id}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]},
|
||||
{"removed_kwd": "Y"},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "community_report"], "must_not": {"exists": "source_id"}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
except Exception:
|
||||
pass
|
||||
return cls.delete_by_id(doc.id)
|
||||
|
||||
@classmethod
|
||||
@ -145,7 +153,7 @@ class DocumentService(CommonService):
|
||||
@DB.connection_context()
|
||||
def get_unfinished_docs(cls):
|
||||
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg,
|
||||
cls.model.run]
|
||||
cls.model.run, cls.model.parser_id]
|
||||
docs = cls.model.select(*fields) \
|
||||
.where(
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
@ -298,9 +306,9 @@ class DocumentService(CommonService):
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
)
|
||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id))
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||
.where(cls.model.id == doc_id)
|
||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id))
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||
.where(cls.model.id == doc_id)
|
||||
)
|
||||
configs = configs.dicts()
|
||||
if not configs:
|
||||
@ -364,6 +372,10 @@ class DocumentService(CommonService):
|
||||
"progress_msg": "Task is queued...",
|
||||
"process_begin_at": get_format_time()
|
||||
})
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_meta_fields(cls, doc_id, meta_fields):
|
||||
return cls.update_by_id(doc_id, {"meta_fields": meta_fields})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -378,30 +390,36 @@ class DocumentService(CommonService):
|
||||
prg = 0
|
||||
finished = True
|
||||
bad = 0
|
||||
has_raptor = False
|
||||
has_graphrag = False
|
||||
e, doc = DocumentService.get_by_id(d["id"])
|
||||
status = doc.run # TaskStatus.RUNNING.value
|
||||
for t in tsks:
|
||||
if 0 <= t.progress < 1:
|
||||
finished = False
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
if t.progress_msg not in msg:
|
||||
msg.append(t.progress_msg)
|
||||
if t.progress == -1:
|
||||
bad += 1
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
msg.append(t.progress_msg)
|
||||
if t.task_type == "raptor":
|
||||
has_raptor = True
|
||||
elif t.task_type == "graphrag":
|
||||
has_graphrag = True
|
||||
prg /= len(tsks)
|
||||
if finished and bad:
|
||||
prg = -1
|
||||
status = TaskStatus.FAIL.value
|
||||
elif finished:
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(
|
||||
" raptor") < 0:
|
||||
queue_raptor_tasks(d)
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and not has_raptor:
|
||||
queue_raptor_o_graphrag_tasks(d, "raptor")
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") and not has_graphrag:
|
||||
queue_raptor_o_graphrag_tasks(d, "graphrag")
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
msg.append("------ RAPTOR -------")
|
||||
else:
|
||||
status = TaskStatus.DONE.value
|
||||
|
||||
msg = "\n".join(msg)
|
||||
msg = "\n".join(sorted(msg))
|
||||
info = {
|
||||
"process_duation": datetime.timestamp(
|
||||
datetime.now()) -
|
||||
@ -433,7 +451,7 @@ class DocumentService(CommonService):
|
||||
return False
|
||||
|
||||
|
||||
def queue_raptor_tasks(doc):
|
||||
def queue_raptor_o_graphrag_tasks(doc, ty):
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
@ -446,15 +464,16 @@ def queue_raptor_tasks(doc):
|
||||
"doc_id": doc["id"],
|
||||
"from_page": 100000000,
|
||||
"to_page": 100000000,
|
||||
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing for Tree-Organized Retrieval)."
|
||||
"task_type": ty,
|
||||
"progress_msg": datetime.now().strftime("%H:%M:%S") + " created task " + ty
|
||||
}
|
||||
|
||||
task = new_task()
|
||||
for field in ["doc_id", "from_page", "to_page"]:
|
||||
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||
hasher.update(ty.encode("utf-8"))
|
||||
task["digest"] = hasher.hexdigest()
|
||||
bulk_insert_into_db(Task, [task], True)
|
||||
task["type"] = "raptor"
|
||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
@ -473,6 +492,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
assert e, "Conversation not found!"
|
||||
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not dia.kb_ids:
|
||||
raise LookupError("No knowledge base associated with this conversation. "
|
||||
"Please add a knowledge base before uploading documents")
|
||||
kb_id = dia.kb_ids[0]
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
@ -492,7 +514,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
ParserType.AUDIO.value: audio,
|
||||
ParserType.EMAIL.value: email
|
||||
}
|
||||
parser_config = {"chunk_token_num": 4096, "delimiter": "\n!?;。;!?", "layout_recognize": False}
|
||||
parser_config = {"chunk_token_num": 4096, "delimiter": "\n!?;。;!?", "layout_recognize": "Plain Text"}
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
doc_nm = {}
|
||||
@ -561,10 +583,11 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
cks = [c for c in docs if c["doc_id"] == doc_id]
|
||||
|
||||
if parser_ids[doc_id] != ParserType.PICTURE.value:
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
mindmap = MindMapExtractor(llm_bdl)
|
||||
try:
|
||||
mind_map = json.dumps(mindmap([c["content_with_weight"] for c in docs if c["doc_id"] == doc_id]).output,
|
||||
ensure_ascii=False, indent=2)
|
||||
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in docs if c["doc_id"] == doc_id])
|
||||
mind_map = json.dumps(mind_map.output, ensure_ascii=False, indent=2)
|
||||
if len(mind_map) < 32:
|
||||
raise Exception("Few content: " + mind_map)
|
||||
cks.append({
|
||||
@ -573,7 +596,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
"kb_id": [kb.id],
|
||||
"docnm_kwd": doc_nm[doc_id],
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", doc_nm[doc_id])),
|
||||
"content_ltks": "",
|
||||
"content_ltks": rag_tokenizer.tokenize("summary summarize 总结 概况 file 文件 概括"),
|
||||
"content_with_weight": mind_map,
|
||||
"knowledge_graph_kwd": "mind_map"
|
||||
})
|
||||
@ -595,4 +618,4 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
DocumentService.increment_chunk_num(
|
||||
doc_id, kb.id, token_counts[doc_id], chunk_counts[doc_id], 0)
|
||||
|
||||
return [d["id"] for d, _ in files]
|
||||
return [d["id"] for d, _ in files]
|
||||
|
||||
@ -43,10 +43,7 @@ class File2DocumentService(CommonService):
|
||||
def insert(cls, obj):
|
||||
if not cls.save(**obj):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
e, obj = cls.get_by_id(obj["id"])
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
return obj
|
||||
return File2Document(**obj)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -63,9 +60,8 @@ class File2DocumentService(CommonService):
|
||||
def update_by_file_id(cls, file_id, obj):
|
||||
obj["update_time"] = current_timestamp()
|
||||
obj["update_date"] = datetime_format(datetime.now())
|
||||
# num = cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||
e, obj = cls.get_by_id(cls.model.id)
|
||||
return obj
|
||||
cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||
return File2Document(**obj)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
|
||||
@ -251,10 +251,7 @@ class FileService(CommonService):
|
||||
def insert(cls, file):
|
||||
if not cls.save(**file):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
e, file = cls.get_by_id(file["id"])
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
return file
|
||||
return File(**file)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -345,6 +342,8 @@ class FileService(CommonService):
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
raise RuntimeError("Exceed the maximum file number of a free user!")
|
||||
if len(file.filename) >= 128:
|
||||
raise RuntimeError("Exceed the maximum length of file name!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
@ -402,7 +401,7 @@ class FileService(CommonService):
|
||||
ParserType.AUDIO.value: audio,
|
||||
ParserType.EMAIL.value: email
|
||||
}
|
||||
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": False}
|
||||
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": "Plain Text"}
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
for file in file_objs:
|
||||
|
||||
@ -35,7 +35,10 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
||||
page_number, items_per_page, orderby, desc, keywords):
|
||||
page_number, items_per_page,
|
||||
orderby, desc, keywords,
|
||||
parser_id=None
|
||||
):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
@ -67,6 +70,8 @@ class KnowledgebaseService(CommonService):
|
||||
cls.model.tenant_id == user_id))
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if parser_id:
|
||||
kbs = kbs.where(cls.model.parser_id == parser_id)
|
||||
if desc:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
|
||||
@ -72,10 +72,12 @@ class TenantLLMService(CommonService):
|
||||
return model_name, None
|
||||
if len(arr) > 2:
|
||||
return "@".join(arr[0:-1]), arr[-1]
|
||||
|
||||
# model name must be xxx@yyy
|
||||
try:
|
||||
fact = json.load(open(os.path.join(get_project_base_directory(), "conf/llm_factories.json"), "r"))["factory_llm_infos"]
|
||||
fact = set([f["name"] for f in fact])
|
||||
if arr[-1] not in fact:
|
||||
model_factories = json.load(open(os.path.join(get_project_base_directory(), "conf/llm_factories.json"), "r"))["factory_llm_infos"]
|
||||
model_providers = set([f["name"] for f in model_factories])
|
||||
if arr[-1] not in model_providers:
|
||||
return model_name, None
|
||||
return arr[0], arr[-1]
|
||||
except Exception as e:
|
||||
@ -84,8 +86,7 @@ class TenantLLMService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type,
|
||||
llm_name=None, lang="Chinese"):
|
||||
def get_model_config(cls, tenant_id, llm_type, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
@ -113,16 +114,22 @@ class TenantLLMService(CommonService):
|
||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||
if llm and llm[0].fid in ["Youdao", "FastEmbed", "BAAI"]:
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key":"", "llm_name": mdlnm, "api_base": ""}
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key": "", "llm_name": mdlnm, "api_base": ""}
|
||||
if not model_config:
|
||||
if mdlnm == "flag-embedding":
|
||||
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "",
|
||||
"llm_name": llm_name, "api_base": ""}
|
||||
"llm_name": llm_name, "api_base": ""}
|
||||
else:
|
||||
if not mdlnm:
|
||||
raise LookupError(f"Type of {llm_type} model is not set.")
|
||||
raise LookupError("Model({}) not authorized".format(mdlnm))
|
||||
return model_config
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type,
|
||||
llm_name=None, lang="Chinese"):
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
if model_config["llm_factory"] not in EmbeddingModel:
|
||||
return
|
||||
@ -171,40 +178,39 @@ class TenantLLMService(CommonService):
|
||||
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
logging.error(f"Tenant not found: {tenant_id}")
|
||||
return 0
|
||||
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
mdlnm = tenant.embd_id
|
||||
elif llm_type == LLMType.SPEECH2TEXT.value:
|
||||
mdlnm = tenant.asr_id
|
||||
elif llm_type == LLMType.IMAGE2TEXT.value:
|
||||
mdlnm = tenant.img2txt_id
|
||||
elif llm_type == LLMType.CHAT.value:
|
||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.RERANK:
|
||||
mdlnm = tenant.rerank_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.TTS:
|
||||
mdlnm = tenant.tts_id if not llm_name else llm_name
|
||||
else:
|
||||
assert False, "LLM type error"
|
||||
llm_map = {
|
||||
LLMType.EMBEDDING.value: tenant.embd_id,
|
||||
LLMType.SPEECH2TEXT.value: tenant.asr_id,
|
||||
LLMType.IMAGE2TEXT.value: tenant.img2txt_id,
|
||||
LLMType.CHAT.value: tenant.llm_id if not llm_name else llm_name,
|
||||
LLMType.RERANK.value: tenant.rerank_id if not llm_name else llm_name,
|
||||
LLMType.TTS.value: tenant.tts_id if not llm_name else llm_name
|
||||
}
|
||||
|
||||
mdlnm = llm_map.get(llm_type)
|
||||
if mdlnm is None:
|
||||
logging.error(f"LLM type error: {llm_type}")
|
||||
return 0
|
||||
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
|
||||
num = 0
|
||||
try:
|
||||
if llm_factory:
|
||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory)
|
||||
else:
|
||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name)
|
||||
if not tenant_llms:
|
||||
return num
|
||||
else:
|
||||
tenant_llm = tenant_llms[0]
|
||||
num = cls.model.update(used_tokens=tenant_llm.used_tokens + used_tokens)\
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_factory == tenant_llm.llm_factory, cls.model.llm_name == llm_name)\
|
||||
.execute()
|
||||
num = cls.model.update(
|
||||
used_tokens=cls.model.used_tokens + used_tokens
|
||||
).where(
|
||||
cls.model.tenant_id == tenant_id,
|
||||
cls.model.llm_name == llm_name,
|
||||
cls.model.llm_factory == llm_factory if llm_factory else True
|
||||
).execute()
|
||||
except Exception:
|
||||
logging.exception("TenantLLMService.increase_usage got exception")
|
||||
logging.exception(
|
||||
"TenantLLMService.increase_usage got exception,Failed to update used_tokens for tenant_id=%s, llm_name=%s",
|
||||
tenant_id, llm_name)
|
||||
return 0
|
||||
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@ -218,7 +224,7 @@ class TenantLLMService(CommonService):
|
||||
return list(objs)
|
||||
|
||||
|
||||
class LLMBundle(object):
|
||||
class LLMBundle:
|
||||
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese"):
|
||||
self.tenant_id = tenant_id
|
||||
self.llm_type = llm_type
|
||||
@ -227,11 +233,9 @@ class LLMBundle(object):
|
||||
tenant_id, llm_type, llm_name, lang=lang)
|
||||
assert self.mdl, "Can't find model for {}/{}/{}".format(
|
||||
tenant_id, llm_type, llm_name)
|
||||
self.max_length = 8192
|
||||
for lm in LLMService.query(llm_name=llm_name):
|
||||
self.max_length = lm.max_tokens
|
||||
break
|
||||
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
self.max_length = model_config.get("max_tokens", 8192)
|
||||
|
||||
def encode(self, texts: list):
|
||||
embeddings, used_tokens = self.mdl.encode(texts)
|
||||
if not TenantLLMService.increase_usage(
|
||||
@ -274,11 +278,11 @@ class LLMBundle(object):
|
||||
|
||||
def tts(self, text):
|
||||
for chunk in self.mdl.tts(text):
|
||||
if isinstance(chunk,int):
|
||||
if isinstance(chunk, int):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
return
|
||||
yield chunk
|
||||
|
||||
@ -287,7 +291,8 @@ class LLMBundle(object):
|
||||
if isinstance(txt, int) and not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name, used_tokens))
|
||||
"LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name,
|
||||
used_tokens))
|
||||
return txt
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
@ -296,6 +301,7 @@ class LLMBundle(object):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name, txt))
|
||||
"LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name,
|
||||
txt))
|
||||
return
|
||||
yield txt
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
import os
|
||||
import random
|
||||
import xxhash
|
||||
import bisect
|
||||
from datetime import datetime
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from deepdoc.parser import PdfParser
|
||||
@ -68,6 +68,7 @@ class TaskService(CommonService):
|
||||
Knowledgebase.language,
|
||||
Knowledgebase.embd_id,
|
||||
Knowledgebase.pagerank,
|
||||
Knowledgebase.parser_config.alias("kb_parser_config"),
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
@ -84,7 +85,7 @@ class TaskService(CommonService):
|
||||
if not docs:
|
||||
return None
|
||||
|
||||
msg = "\nTask has been received."
|
||||
msg = f"\n{datetime.now().strftime('%H:%M:%S')} Task has been received."
|
||||
prog = random.random() / 10.0
|
||||
if docs[0]["retry_count"] >= 3:
|
||||
msg = "\nERROR: Task is abandoned after 3 times attempts."
|
||||
@ -181,7 +182,7 @@ class TaskService(CommonService):
|
||||
if os.environ.get("MACOS"):
|
||||
if info["progress_msg"]:
|
||||
task = cls.model.get_by_id(id)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 1000)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
@ -192,7 +193,7 @@ class TaskService(CommonService):
|
||||
with DB.lock("update_progress", -1):
|
||||
if info["progress_msg"]:
|
||||
task = cls.model.get_by_id(id)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 1000)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
@ -204,16 +205,16 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
def new_task():
|
||||
return {"id": get_uuid(), "doc_id": doc["id"], "progress": 0.0, "from_page": 0, "to_page": 100000000}
|
||||
|
||||
tsks = []
|
||||
parse_task_array = []
|
||||
|
||||
if doc["type"] == FileType.PDF.value:
|
||||
file_bin = STORAGE_IMPL.get(bucket, name)
|
||||
do_layout = doc["parser_config"].get("layout_recognize", True)
|
||||
do_layout = doc["parser_config"].get("layout_recognize", "DeepDOC")
|
||||
pages = PdfParser.total_page_number(doc["name"], file_bin)
|
||||
page_size = doc["parser_config"].get("task_page_size", 12)
|
||||
if doc["parser_id"] == "paper":
|
||||
page_size = doc["parser_config"].get("task_page_size", 22)
|
||||
if doc["parser_id"] in ["one", "knowledge_graph"] or not do_layout:
|
||||
if doc["parser_id"] in ["one", "knowledge_graph"] or do_layout != "DeepDOC":
|
||||
page_size = 10 ** 9
|
||||
page_ranges = doc["parser_config"].get("pages") or [(1, 10 ** 5)]
|
||||
for s, e in page_ranges:
|
||||
@ -224,7 +225,7 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
task = new_task()
|
||||
task["from_page"] = p
|
||||
task["to_page"] = min(p + page_size, e)
|
||||
tsks.append(task)
|
||||
parse_task_array.append(task)
|
||||
|
||||
elif doc["parser_id"] == "table":
|
||||
file_bin = STORAGE_IMPL.get(bucket, name)
|
||||
@ -233,14 +234,18 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
task = new_task()
|
||||
task["from_page"] = i
|
||||
task["to_page"] = min(i + 3000, rn)
|
||||
tsks.append(task)
|
||||
parse_task_array.append(task)
|
||||
else:
|
||||
tsks.append(new_task())
|
||||
parse_task_array.append(new_task())
|
||||
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
for task in tsks:
|
||||
for task in parse_task_array:
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
if field == "parser_config":
|
||||
for k in ["raptor", "graphrag"]:
|
||||
if k in chunking_config[field]:
|
||||
del chunking_config[field][k]
|
||||
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||
for field in ["doc_id", "from_page", "to_page"]:
|
||||
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||
@ -251,7 +256,7 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
prev_tasks = TaskService.get_tasks(doc["id"])
|
||||
ck_num = 0
|
||||
if prev_tasks:
|
||||
for task in tsks:
|
||||
for task in parse_task_array:
|
||||
ck_num += reuse_prev_task_chunks(task, prev_tasks, chunking_config)
|
||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||
chunk_ids = []
|
||||
@ -263,31 +268,38 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
chunking_config["kb_id"])
|
||||
DocumentService.update_by_id(doc["id"], {"chunk_num": ck_num})
|
||||
|
||||
bulk_insert_into_db(Task, tsks, True)
|
||||
bulk_insert_into_db(Task, parse_task_array, True)
|
||||
DocumentService.begin2parse(doc["id"])
|
||||
|
||||
tsks = [task for task in tsks if task["progress"] < 1.0]
|
||||
for t in tsks:
|
||||
unfinished_task_array = [task for task in parse_task_array if task["progress"] < 1.0]
|
||||
for unfinished_task in unfinished_task_array:
|
||||
assert REDIS_CONN.queue_product(
|
||||
SVR_QUEUE_NAME, message=t
|
||||
SVR_QUEUE_NAME, message=unfinished_task
|
||||
), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
|
||||
idx = bisect.bisect_left(prev_tasks, (task.get("from_page", 0), task.get("digest", "")),
|
||||
key=lambda x: (x.get("from_page", 0), x.get("digest", "")))
|
||||
idx = 0
|
||||
while idx < len(prev_tasks):
|
||||
prev_task = prev_tasks[idx]
|
||||
if prev_task.get("from_page", 0) == task.get("from_page", 0) \
|
||||
and prev_task.get("digest", 0) == task.get("digest", ""):
|
||||
break
|
||||
idx += 1
|
||||
|
||||
if idx >= len(prev_tasks):
|
||||
return 0
|
||||
prev_task = prev_tasks[idx]
|
||||
if prev_task["progress"] < 1.0 or prev_task["digest"] != task["digest"] or not prev_task["chunk_ids"]:
|
||||
if prev_task["progress"] < 1.0 or not prev_task["chunk_ids"]:
|
||||
return 0
|
||||
task["chunk_ids"] = prev_task["chunk_ids"]
|
||||
task["progress"] = 1.0
|
||||
if "from_page" in task and "to_page" in task:
|
||||
if "from_page" in task and "to_page" in task and int(task['to_page']) - int(task['from_page']) >= 10 ** 6:
|
||||
task["progress_msg"] = f"Page({task['from_page']}~{task['to_page']}): "
|
||||
else:
|
||||
task["progress_msg"] = ""
|
||||
task["progress_msg"] += "reused previous task's chunks."
|
||||
task["progress_msg"] = " ".join(
|
||||
[datetime.now().strftime("%H:%M:%S"), task["progress_msg"], "Reused previous task's chunks."])
|
||||
prev_task["chunk_ids"] = ""
|
||||
|
||||
return len(task["chunk_ids"].split())
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import hashlib
|
||||
from datetime import datetime
|
||||
|
||||
import peewee
|
||||
@ -24,6 +25,7 @@ from api.db.db_models import User, Tenant
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||
from api.db import StatusEnum
|
||||
from rag.settings import MINIO
|
||||
|
||||
|
||||
class UserService(CommonService):
|
||||
@ -126,6 +128,12 @@ class TenantService(CommonService):
|
||||
if num == 0:
|
||||
raise LookupError("Tenant not found which is supposed to be there")
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def user_gateway(cls, tenant_id):
|
||||
hashobj = hashlib.sha256(tenant_id.encode("utf-8"))
|
||||
return int(hashobj.hexdigest(), 16)%len(MINIO)
|
||||
|
||||
|
||||
class UserTenantService(CommonService):
|
||||
model = UserTenant
|
||||
|
||||
@ -28,6 +28,7 @@ import sys
|
||||
import time
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import threading
|
||||
|
||||
from werkzeug.serving import run_simple
|
||||
from api import settings
|
||||
@ -41,16 +42,28 @@ from api.db.init_data import init_web_data
|
||||
from api.versions import get_ragflow_version
|
||||
from api.utils import show_configs
|
||||
from rag.settings import print_rag_settings
|
||||
from rag.utils.redis_conn import RedisDistributedLock
|
||||
|
||||
stop_event = threading.Event()
|
||||
|
||||
def update_progress():
|
||||
while True:
|
||||
time.sleep(3)
|
||||
redis_lock = RedisDistributedLock("update_progress", timeout=60)
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
if not redis_lock.acquire():
|
||||
continue
|
||||
DocumentService.update_progress()
|
||||
stop_event.wait(6)
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
finally:
|
||||
redis_lock.release()
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
logging.info("Received interrupt signal, shutting down...")
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.info(r"""
|
||||
@ -96,6 +109,9 @@ if __name__ == '__main__':
|
||||
RuntimeConfig.init_env()
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
thread = ThreadPoolExecutor(max_workers=1)
|
||||
thread.submit(update_progress)
|
||||
|
||||
@ -112,4 +128,6 @@ if __name__ == '__main__':
|
||||
)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
|
||||
@ -66,81 +66,34 @@ def init_settings():
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
LLM = get_base_config("user_default_llm", {})
|
||||
LLM_DEFAULT_MODELS = LLM.get("default_models", {})
|
||||
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
|
||||
LLM_BASE_URL = LLM.get("base_url")
|
||||
|
||||
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
|
||||
if not LIGHTEN:
|
||||
default_llm = {
|
||||
"Tongyi-Qianwen": {
|
||||
"chat_model": "qwen-plus",
|
||||
"embedding_model": "text-embedding-v2",
|
||||
"image2text_model": "qwen-vl-max",
|
||||
"asr_model": "paraformer-realtime-8k-v1",
|
||||
},
|
||||
"OpenAI": {
|
||||
"chat_model": "gpt-3.5-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"Azure-OpenAI": {
|
||||
"chat_model": "gpt-35-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"ZHIPU-AI": {
|
||||
"chat_model": "glm-3-turbo",
|
||||
"embedding_model": "embedding-2",
|
||||
"image2text_model": "glm-4v",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Ollama": {
|
||||
"chat_model": "qwen-14B-chat",
|
||||
"embedding_model": "flag-embedding",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Moonshot": {
|
||||
"chat_model": "moonshot-v1-8k",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"DeepSeek": {
|
||||
"chat_model": "deepseek-chat",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"VolcEngine": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"BAAI": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "BAAI/bge-large-zh-v1.5",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
"rerank_model": "BAAI/bge-reranker-v2-m3",
|
||||
}
|
||||
}
|
||||
EMBEDDING_MDL = "BAAI/bge-large-zh-v1.5@BAAI"
|
||||
|
||||
if LLM_FACTORY:
|
||||
CHAT_MDL = default_llm[LLM_FACTORY]["chat_model"] + f"@{LLM_FACTORY}"
|
||||
ASR_MDL = default_llm[LLM_FACTORY]["asr_model"] + f"@{LLM_FACTORY}"
|
||||
IMAGE2TEXT_MDL = default_llm[LLM_FACTORY]["image2text_model"] + f"@{LLM_FACTORY}"
|
||||
EMBEDDING_MDL = default_llm["BAAI"]["embedding_model"] + "@BAAI"
|
||||
RERANK_MDL = default_llm["BAAI"]["rerank_model"] + "@BAAI"
|
||||
if LLM_DEFAULT_MODELS:
|
||||
CHAT_MDL = LLM_DEFAULT_MODELS.get("chat_model", CHAT_MDL)
|
||||
EMBEDDING_MDL = LLM_DEFAULT_MODELS.get("embedding_model", EMBEDDING_MDL)
|
||||
RERANK_MDL = LLM_DEFAULT_MODELS.get("rerank_model", RERANK_MDL)
|
||||
ASR_MDL = LLM_DEFAULT_MODELS.get("asr_model", ASR_MDL)
|
||||
IMAGE2TEXT_MDL = LLM_DEFAULT_MODELS.get("image2text_model", IMAGE2TEXT_MDL)
|
||||
|
||||
# factory can be specified in the config name with "@". LLM_FACTORY will be used if not specified
|
||||
CHAT_MDL = CHAT_MDL + (f"@{LLM_FACTORY}" if "@" not in CHAT_MDL and CHAT_MDL != "" else "")
|
||||
EMBEDDING_MDL = EMBEDDING_MDL + (f"@{LLM_FACTORY}" if "@" not in EMBEDDING_MDL and EMBEDDING_MDL != "" else "")
|
||||
RERANK_MDL = RERANK_MDL + (f"@{LLM_FACTORY}" if "@" not in RERANK_MDL and RERANK_MDL != "" else "")
|
||||
ASR_MDL = ASR_MDL + (f"@{LLM_FACTORY}" if "@" not in ASR_MDL and ASR_MDL != "" else "")
|
||||
IMAGE2TEXT_MDL = IMAGE2TEXT_MDL + (
|
||||
f"@{LLM_FACTORY}" if "@" not in IMAGE2TEXT_MDL and IMAGE2TEXT_MDL != "" else "")
|
||||
|
||||
global API_KEY, PARSERS, HOST_IP, HOST_PORT, SECRET_KEY
|
||||
API_KEY = LLM.get("api_key", "")
|
||||
PARSERS = LLM.get(
|
||||
"parsers",
|
||||
"naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email")
|
||||
"naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email,tag:Tag")
|
||||
|
||||
HOST_IP = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
|
||||
HOST_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
|
||||
|
||||
@ -24,6 +24,7 @@ import time
|
||||
import uuid
|
||||
import requests
|
||||
import logging
|
||||
import copy
|
||||
from enum import Enum, IntEnum
|
||||
import importlib
|
||||
from Cryptodome.PublicKey import RSA
|
||||
@ -65,6 +66,16 @@ CONFIGS = read_config()
|
||||
def show_configs():
|
||||
msg = f"Current configs, from {conf_realpath(SERVICE_CONF)}:"
|
||||
for k, v in CONFIGS.items():
|
||||
if isinstance(v, dict):
|
||||
if "password" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["password"] = "*" * 8
|
||||
if "access_key" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["access_key"] = "*" * 8
|
||||
if "secret_key" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["secret_key"] = "*" * 8
|
||||
msg += f"\n\t{k}: {v}"
|
||||
logging.info(msg)
|
||||
|
||||
@ -346,6 +357,26 @@ def decrypt(line):
|
||||
line), "Fail to decrypt password!").decode('utf-8')
|
||||
|
||||
|
||||
def decrypt2(crypt_text):
|
||||
from base64 import b64decode, b16decode
|
||||
from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5
|
||||
from Crypto.PublicKey import RSA
|
||||
decode_data = b64decode(crypt_text)
|
||||
if len(decode_data) == 127:
|
||||
hex_fixed = '00' + decode_data.hex()
|
||||
decode_data = b16decode(hex_fixed.upper())
|
||||
|
||||
file_path = os.path.join(
|
||||
file_utils.get_project_base_directory(),
|
||||
"conf",
|
||||
"private.pem")
|
||||
pem = open(file_path).read()
|
||||
rsa_key = RSA.importKey(pem, "Welcome")
|
||||
cipher = Cipher_PKCS1_v1_5.new(rsa_key)
|
||||
decrypt_text = cipher.decrypt(decode_data, None)
|
||||
return (b64decode(decrypt_text)).decode()
|
||||
|
||||
|
||||
def download_img(url):
|
||||
if not url:
|
||||
return ""
|
||||
|
||||
@ -98,14 +98,10 @@ def get_exponential_backoff_interval(retries, full_jitter=False):
|
||||
|
||||
def get_data_error_result(code=settings.RetCode.DATA_ERROR,
|
||||
message='Sorry! Data missing!'):
|
||||
import re
|
||||
logging.exception(Exception(message))
|
||||
result_dict = {
|
||||
"code": code,
|
||||
"message": re.sub(
|
||||
r"rag",
|
||||
"seceum",
|
||||
message,
|
||||
flags=re.IGNORECASE)}
|
||||
"message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
@ -125,6 +121,10 @@ def server_error_response(e):
|
||||
if len(e.args) > 1:
|
||||
return get_json_result(
|
||||
code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||
if repr(e).find("index_not_found_exception") >= 0:
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR,
|
||||
message="No chunk found, please upload file and parse it.")
|
||||
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||
|
||||
|
||||
@ -173,6 +173,7 @@ def validate_request(*args, **kwargs):
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def not_allowed_parameters(*params):
|
||||
def decorator(f):
|
||||
def wrapper(*args, **kwargs):
|
||||
@ -182,7 +183,9 @@ def not_allowed_parameters(*params):
|
||||
return get_json_result(
|
||||
code=settings.RetCode.ARGUMENT_ERROR, message=f"Parameter {param} isn't allowed")
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@ -207,6 +210,7 @@ def get_json_result(code=settings.RetCode.SUCCESS, message='success', data=None)
|
||||
response = {"code": code, "message": message, "data": data}
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
def apikey_required(func):
|
||||
@wraps(func)
|
||||
def decorated_function(*args, **kwargs):
|
||||
@ -250,8 +254,7 @@ def construct_response(code=settings.RetCode.SUCCESS,
|
||||
|
||||
|
||||
def construct_result(code=settings.RetCode.DATA_ERROR, message='data is missing'):
|
||||
import re
|
||||
result_dict = {"code": code, "message": re.sub(r"rag", "seceum", message, flags=re.IGNORECASE)}
|
||||
result_dict = {"code": code, "message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
@ -283,14 +286,18 @@ def construct_error_response(e):
|
||||
def token_required(func):
|
||||
@wraps(func)
|
||||
def decorated_function(*args, **kwargs):
|
||||
authorization_list=flask_request.headers.get('Authorization').split()
|
||||
authorization_str = flask_request.headers.get('Authorization')
|
||||
if not authorization_str:
|
||||
return get_json_result(data=False, message="`Authorization` can't be empty")
|
||||
authorization_list = authorization_str.split()
|
||||
if len(authorization_list) < 2:
|
||||
return get_json_result(data=False,message="Please check your authorization format.")
|
||||
return get_json_result(data=False, message="Please check your authorization format.")
|
||||
token = authorization_list[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!', code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
data=False, message='Authentication error: API key is invalid!',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
kwargs['tenant_id'] = objs[0].tenant_id
|
||||
return func(*args, **kwargs)
|
||||
@ -311,14 +318,9 @@ def get_result(code=settings.RetCode.SUCCESS, message="", data=None):
|
||||
|
||||
def get_error_data_result(message='Sorry! Data missing!', code=settings.RetCode.DATA_ERROR,
|
||||
):
|
||||
import re
|
||||
result_dict = {
|
||||
"code": code,
|
||||
"message": re.sub(
|
||||
r"rag",
|
||||
"seceum",
|
||||
message,
|
||||
flags=re.IGNORECASE)}
|
||||
"message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
@ -333,35 +335,68 @@ def generate_confirmation_token(tenent_id):
|
||||
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenent_id)[2:34]
|
||||
|
||||
|
||||
def valid(permission,valid_permission,language,valid_language,chunk_method,valid_chunk_method):
|
||||
if valid_parameter(permission,valid_permission):
|
||||
return valid_parameter(permission,valid_permission)
|
||||
if valid_parameter(language,valid_language):
|
||||
return valid_parameter(language,valid_language)
|
||||
if valid_parameter(chunk_method,valid_chunk_method):
|
||||
return valid_parameter(chunk_method,valid_chunk_method)
|
||||
def valid(permission, valid_permission, chunk_method, valid_chunk_method):
|
||||
if valid_parameter(permission, valid_permission):
|
||||
return valid_parameter(permission, valid_permission)
|
||||
if valid_parameter(chunk_method, valid_chunk_method):
|
||||
return valid_parameter(chunk_method, valid_chunk_method)
|
||||
|
||||
def valid_parameter(parameter,valid_values):
|
||||
|
||||
def valid_parameter(parameter, valid_values):
|
||||
if parameter and parameter not in valid_values:
|
||||
return get_error_data_result(f"'{parameter}' is not in {valid_values}")
|
||||
return get_error_data_result(f"'{parameter}' is not in {valid_values}")
|
||||
|
||||
def get_parser_config(chunk_method,parser_config):
|
||||
|
||||
def get_parser_config(chunk_method, parser_config):
|
||||
if parser_config:
|
||||
return parser_config
|
||||
if not chunk_method:
|
||||
chunk_method = "naive"
|
||||
key_mapping={"naive":{"chunk_token_num": 128, "delimiter": "\\n!?;。;!?", "html4excel": False,"layout_recognize": True, "raptor": {"use_raptor": False}},
|
||||
"qa":{"raptor":{"use_raptor":False}},
|
||||
"resume":None,
|
||||
"manual":{"raptor":{"use_raptor":False}},
|
||||
"table":None,
|
||||
"paper":{"raptor":{"use_raptor":False}},
|
||||
"book":{"raptor":{"use_raptor":False}},
|
||||
"laws":{"raptor":{"use_raptor":False}},
|
||||
"presentation":{"raptor":{"use_raptor":False}},
|
||||
"one":None,
|
||||
"knowledge_graph":{"chunk_token_num":8192,"delimiter":"\\n!?;。;!?","entity_types":["organization","person","location","event","time"]},
|
||||
"email":None,
|
||||
"picture":None}
|
||||
parser_config=key_mapping[chunk_method]
|
||||
return parser_config
|
||||
key_mapping = {
|
||||
"naive": {"chunk_token_num": 128, "delimiter": "\\n!?;。;!?", "html4excel": False, "layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False}},
|
||||
"qa": {"raptor": {"use_raptor": False}},
|
||||
"tag": None,
|
||||
"resume": None,
|
||||
"manual": {"raptor": {"use_raptor": False}},
|
||||
"table": None,
|
||||
"paper": {"raptor": {"use_raptor": False}},
|
||||
"book": {"raptor": {"use_raptor": False}},
|
||||
"laws": {"raptor": {"use_raptor": False}},
|
||||
"presentation": {"raptor": {"use_raptor": False}},
|
||||
"one": None,
|
||||
"knowledge_graph": {"chunk_token_num": 8192, "delimiter": "\\n!?;。;!?",
|
||||
"entity_types": ["organization", "person", "location", "event", "time"]},
|
||||
"email": None,
|
||||
"picture": None}
|
||||
parser_config = key_mapping[chunk_method]
|
||||
return parser_config
|
||||
|
||||
|
||||
def valid_parser_config(parser_config):
|
||||
if not parser_config:
|
||||
return
|
||||
scopes = set([
|
||||
"chunk_token_num",
|
||||
"delimiter",
|
||||
"raptor",
|
||||
"graphrag",
|
||||
"layout_recognize",
|
||||
"task_page_size",
|
||||
"pages",
|
||||
"html4excel",
|
||||
"auto_keywords",
|
||||
"auto_questions",
|
||||
"tag_kb_ids",
|
||||
"topn_tags",
|
||||
"filename_embd_weight"
|
||||
])
|
||||
for k in parser_config.keys():
|
||||
assert k in scopes, f"Abnormal 'parser_config'. Invalid key: {k}"
|
||||
|
||||
assert 1 <= parser_config.get("chunk_token_num", 1) < 100000000, "chunk_token_num should be in range from 1 to 100000000"
|
||||
assert 1 <= parser_config.get("task_page_size", 1) < 100000000, "task_page_size should be in range from 1 to 100000000"
|
||||
assert 0 <= parser_config.get("auto_keywords", 0) < 32, "auto_keywords should be in range from 0 to 32"
|
||||
assert 0 <= parser_config.get("auto_questions", 0) < 10, "auto_questions should be in range from 0 to 10"
|
||||
assert 0 <= parser_config.get("topn_tags", 0) < 10, "topn_tags should be in range from 0 to 10"
|
||||
assert isinstance(parser_config.get("html4excel", False), bool), "html4excel should be True or False"
|
||||
|
||||
@ -17,6 +17,8 @@ import base64
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
from io import BytesIO
|
||||
|
||||
import pdfplumber
|
||||
@ -30,6 +32,10 @@ from api.constants import IMG_BASE64_PREFIX
|
||||
PROJECT_BASE = os.getenv("RAG_PROJECT_BASE") or os.getenv("RAG_DEPLOY_BASE")
|
||||
RAG_BASE = os.getenv("RAG_BASE")
|
||||
|
||||
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
|
||||
if LOCK_KEY_pdfplumber not in sys.modules:
|
||||
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
|
||||
|
||||
|
||||
def get_project_base_directory(*args):
|
||||
global PROJECT_BASE
|
||||
@ -175,19 +181,21 @@ def thumbnail_img(filename, blob):
|
||||
"""
|
||||
filename = filename.lower()
|
||||
if re.match(r".*\.pdf$", filename):
|
||||
pdf = pdfplumber.open(BytesIO(blob))
|
||||
buffered = BytesIO()
|
||||
resolution = 32
|
||||
img = None
|
||||
for _ in range(10):
|
||||
# https://github.com/jsvine/pdfplumber?tab=readme-ov-file#creating-a-pageimage-with-to_image
|
||||
pdf.pages[0].to_image(resolution=resolution).annotated.save(buffered, format="png")
|
||||
img = buffered.getvalue()
|
||||
if len(img) >= 64000 and resolution >= 2:
|
||||
resolution = resolution / 2
|
||||
buffered = BytesIO()
|
||||
else:
|
||||
break
|
||||
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||
pdf = pdfplumber.open(BytesIO(blob))
|
||||
buffered = BytesIO()
|
||||
resolution = 32
|
||||
img = None
|
||||
for _ in range(10):
|
||||
# https://github.com/jsvine/pdfplumber?tab=readme-ov-file#creating-a-pageimage-with-to_image
|
||||
pdf.pages[0].to_image(resolution=resolution).annotated.save(buffered, format="png")
|
||||
img = buffered.getvalue()
|
||||
if len(img) >= 64000 and resolution >= 2:
|
||||
resolution = resolution / 2
|
||||
buffered = BytesIO()
|
||||
else:
|
||||
break
|
||||
pdf.close()
|
||||
return img
|
||||
|
||||
elif re.match(r".*\.(jpg|jpeg|png|tif|gif|icon|ico|webp)$", filename):
|
||||
|
||||
@ -18,6 +18,8 @@ import os.path
|
||||
import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
initialized_root_logger = False
|
||||
|
||||
def get_project_base_directory():
|
||||
PROJECT_BASE = os.path.abspath(
|
||||
os.path.join(
|
||||
@ -29,10 +31,13 @@ def get_project_base_directory():
|
||||
return PROJECT_BASE
|
||||
|
||||
def initRootLogger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||
logger = logging.getLogger()
|
||||
if logger.hasHandlers():
|
||||
global initialized_root_logger
|
||||
if initialized_root_logger:
|
||||
return
|
||||
initialized_root_logger = True
|
||||
|
||||
logger = logging.getLogger()
|
||||
logger.handlers.clear()
|
||||
log_path = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{logfile_basename}.log"))
|
||||
|
||||
os.makedirs(os.path.dirname(log_path), exist_ok=True)
|
||||
|
||||
@ -1,3 +1,19 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import base64
|
||||
import os
|
||||
import sys
|
||||
|
||||
@ -1,4 +1,23 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import re
|
||||
import socket
|
||||
from urllib.parse import urlparse
|
||||
import ipaddress
|
||||
import json
|
||||
import base64
|
||||
|
||||
@ -76,5 +95,25 @@ def __get_pdf_from_html(
|
||||
return base64.b64decode(result["data"])
|
||||
|
||||
|
||||
def is_private_ip(ip: str) -> bool:
|
||||
try:
|
||||
ip_obj = ipaddress.ip_address(ip)
|
||||
return ip_obj.is_private
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def is_valid_url(url: str) -> bool:
|
||||
return bool(re.match(r"(https?)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]", url))
|
||||
if not re.match(r"(https?)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]", url):
|
||||
return False
|
||||
parsed_url = urlparse(url)
|
||||
hostname = parsed_url.hostname
|
||||
|
||||
if not hostname:
|
||||
return False
|
||||
try:
|
||||
ip = socket.gethostbyname(hostname)
|
||||
if is_private_ip(ip):
|
||||
return False
|
||||
except socket.gaierror:
|
||||
return False
|
||||
return True
|
||||
@ -9,9 +9,10 @@
|
||||
"title_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"title_sm_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"name_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"important_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"important_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"tag_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"important_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"question_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"question_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"question_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"content_with_weight": {"type": "varchar", "default": ""},
|
||||
"content_ltks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
@ -24,8 +25,18 @@
|
||||
"weight_int": {"type": "integer", "default": 0},
|
||||
"weight_flt": {"type": "float", "default": 0.0},
|
||||
"rank_int": {"type": "integer", "default": 0},
|
||||
"rank_flt": {"type": "float", "default": 0},
|
||||
"available_int": {"type": "integer", "default": 1},
|
||||
"knowledge_graph_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"entities_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"pagerank_fea": {"type": "integer", "default": 0}
|
||||
"entities_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"pagerank_fea": {"type": "integer", "default": 0},
|
||||
"tag_feas": {"type": "varchar", "default": ""},
|
||||
|
||||
"from_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"to_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"entity_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"source_id": {"type": "varchar", "default": ""},
|
||||
"n_hop_with_weight": {"type": "varchar", "default": ""},
|
||||
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -5,25 +5,25 @@ mysql:
|
||||
name: 'rag_flow'
|
||||
user: 'root'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'mysql'
|
||||
host: 'localhost'
|
||||
port: 5455
|
||||
max_connections: 100
|
||||
stale_timeout: 30
|
||||
minio:
|
||||
user: 'rag_flow'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'minio:9000'
|
||||
host: 'localhost:9000'
|
||||
es:
|
||||
hosts: 'http://es01:1200'
|
||||
hosts: 'http://localhost:1200'
|
||||
username: 'elastic'
|
||||
password: 'infini_rag_flow'
|
||||
infinity:
|
||||
uri: 'infinity:23817'
|
||||
uri: 'localhost:23817'
|
||||
db_name: 'default_db'
|
||||
redis:
|
||||
db: 1
|
||||
password: 'infini_rag_flow'
|
||||
host: 'redis:6379'
|
||||
password: 'infini_rag_flow'
|
||||
host: 'localhost:6379'
|
||||
|
||||
# postgres:
|
||||
# name: 'rag_flow'
|
||||
@ -34,10 +34,15 @@ redis:
|
||||
# max_connections: 100
|
||||
# stale_timeout: 30
|
||||
# s3:
|
||||
# endpoint: 'endpoint'
|
||||
# access_key: 'access_key'
|
||||
# secret_key: 'secret_key'
|
||||
# region: 'region'
|
||||
# oss:
|
||||
# access_key: 'access_key'
|
||||
# secret_key: 'secret_key'
|
||||
# endpoint_url: 'http://oss-cn-hangzhou.aliyuncs.com'
|
||||
# region: 'cn-hangzhou'
|
||||
# bucket: 'bucket_name'
|
||||
# azure:
|
||||
# auth_type: 'sas'
|
||||
# container_url: 'container_url'
|
||||
|
||||
@ -113,4 +113,4 @@ PDF、DOCX、EXCEL和PPT四种文档格式都有相应的解析器。最复杂
|
||||
|
||||
### 简历
|
||||
|
||||
简历是一种非常复杂的文件。一份由各种布局的非结构化文本组成的简历可以分解为由近百个字段组成的结构化数据。我们还没有打开解析器,因为我们在解析过程之后打开了处理方法。
|
||||
简历是一种非常复杂的文档。由各种格式的非结构化文本构成的简历可以被解析为包含近百个字段的结构化数据。我们还没有启用解析器,因为在解析过程之后才会启动处理方法。
|
||||
|
||||
@ -1,2 +1,18 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
|
||||
@ -1,3 +1,6 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
@ -1,3 +1,6 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
@ -11,20 +11,40 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from openpyxl import load_workbook
|
||||
import logging
|
||||
from openpyxl import load_workbook, Workbook
|
||||
import sys
|
||||
from io import BytesIO
|
||||
|
||||
from rag.nlp import find_codec
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
class RAGFlowExcelParser:
|
||||
def html(self, fnm, chunk_rows=256):
|
||||
if isinstance(fnm, str):
|
||||
wb = load_workbook(fnm)
|
||||
else:
|
||||
wb = load_workbook(BytesIO(fnm))
|
||||
@staticmethod
|
||||
def _load_excel_to_workbook(file_like_object):
|
||||
try:
|
||||
return load_workbook(file_like_object)
|
||||
except Exception as e:
|
||||
logging.info(f"****wxy: openpyxl load error: {e}, try pandas instead")
|
||||
try:
|
||||
df = pd.read_excel(file_like_object)
|
||||
wb = Workbook()
|
||||
ws = wb.active
|
||||
ws.title = "Data"
|
||||
for col_num, column_name in enumerate(df.columns, 1):
|
||||
ws.cell(row=1, column=col_num, value=column_name)
|
||||
for row_num, row in enumerate(df.values, 2):
|
||||
for col_num, value in enumerate(row, 1):
|
||||
ws.cell(row=row_num, column=col_num, value=value)
|
||||
return wb
|
||||
except Exception as e_pandas:
|
||||
raise Exception(f"****wxy: pandas read error: {e_pandas}, original openpyxl error: {e}")
|
||||
|
||||
def html(self, fnm, chunk_rows=256):
|
||||
file_like_object = BytesIO(fnm) if not isinstance(fnm, str) else fnm
|
||||
wb = RAGFlowExcelParser._load_excel_to_workbook(file_like_object)
|
||||
tb_chunks = []
|
||||
for sheetname in wb.sheetnames:
|
||||
ws = wb[sheetname]
|
||||
@ -42,7 +62,7 @@ class RAGFlowExcelParser:
|
||||
tb += f"<table><caption>{sheetname}</caption>"
|
||||
tb += tb_rows_0
|
||||
for r in list(
|
||||
rows[1 + chunk_i * chunk_rows : 1 + (chunk_i + 1) * chunk_rows]
|
||||
rows[1 + chunk_i * chunk_rows: 1 + (chunk_i + 1) * chunk_rows]
|
||||
):
|
||||
tb += "<tr>"
|
||||
for i, c in enumerate(r):
|
||||
@ -57,10 +77,9 @@ class RAGFlowExcelParser:
|
||||
return tb_chunks
|
||||
|
||||
def __call__(self, fnm):
|
||||
if isinstance(fnm, str):
|
||||
wb = load_workbook(fnm)
|
||||
else:
|
||||
wb = load_workbook(BytesIO(fnm))
|
||||
file_like_object = BytesIO(fnm) if not isinstance(fnm, str) else fnm
|
||||
wb = RAGFlowExcelParser._load_excel_to_workbook(file_like_object)
|
||||
|
||||
res = []
|
||||
for sheetname in wb.sheetnames:
|
||||
ws = wb[sheetname]
|
||||
@ -85,12 +104,12 @@ class RAGFlowExcelParser:
|
||||
@staticmethod
|
||||
def row_number(fnm, binary):
|
||||
if fnm.split(".")[-1].lower().find("xls") >= 0:
|
||||
wb = load_workbook(BytesIO(binary))
|
||||
wb = RAGFlowExcelParser._load_excel_to_workbook(BytesIO(binary))
|
||||
total = 0
|
||||
for sheetname in wb.sheetnames:
|
||||
ws = wb[sheetname]
|
||||
total += len(list(ws.rows))
|
||||
return total
|
||||
return total
|
||||
|
||||
if fnm.split(".")[-1].lower() in ["csv", "txt"]:
|
||||
encoding = find_codec(binary)
|
||||
@ -101,3 +120,4 @@ class RAGFlowExcelParser:
|
||||
if __name__ == "__main__":
|
||||
psr = RAGFlowExcelParser()
|
||||
psr(sys.argv[1])
|
||||
|
||||
|
||||
@ -1,4 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
@ -11,6 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from rag.nlp import find_codec
|
||||
import readability
|
||||
import html_text
|
||||
|
||||
@ -1,4 +1,20 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# The following documents are mainly referenced, and only adaptation modifications have been made
|
||||
# from https://github.com/langchain-ai/langchain/blob/master/libs/text-splitters/langchain_text_splitters/json.py
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user