mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
414 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 887651e5fa | |||
| bb3d3f921a | |||
| 8695d60055 | |||
| 936a91c5fe | |||
| ef5e7d8c44 | |||
| 80f1f2723c | |||
| c4e081d4c6 | |||
| 972fd919b4 | |||
| fa3e90c72e | |||
| 403efe81a1 | |||
| 7e87eb2e23 | |||
| 9077ee8d15 | |||
| 4784aa5b0b | |||
| 8f3fe63d73 | |||
| c8b1790c92 | |||
| d6adcc2d50 | |||
| 1b022116d5 | |||
| 311e20599f | |||
| 35034fed73 | |||
| e470645efd | |||
| e96cf89524 | |||
| 3671d20e43 | |||
| c01237ec0f | |||
| 371f61972d | |||
| 6ce282d462 | |||
| 4a2ff633e0 | |||
| 09b7ac26ad | |||
| 0a13d79b94 | |||
| 64e281b398 | |||
| 307d5299e7 | |||
| a9532cb9e7 | |||
| efc3caf702 | |||
| 12303ff18f | |||
| a3bebeb599 | |||
| bde76d2f55 | |||
| 36ee1d271d | |||
| 601e024d77 | |||
| 6287efde18 | |||
| 8f9bcb1c74 | |||
| b1117a8717 | |||
| 0fa1a1469e | |||
| dabbc852c8 | |||
| 545ea229b6 | |||
| df17294865 | |||
| b8e3852d3b | |||
| 0bde5397d0 | |||
| f7074037ef | |||
| 1aa991d914 | |||
| b2eed8fed1 | |||
| 0c0188b688 | |||
| 6b58b67d12 | |||
| 64af09ce7b | |||
| 8f9e7a6f6f | |||
| 65d5268439 | |||
| 6aa0b0819d | |||
| 3d0b440e9f | |||
| 800e263f64 | |||
| ce65ea1fc1 | |||
| 2341939376 | |||
| a9d9215547 | |||
| 99725444f1 | |||
| 1ab0f52832 | |||
| 24ca4cc6b7 | |||
| d36c8d18b1 | |||
| 86a1411b07 | |||
| 54a465f9e8 | |||
| bf7f7c7027 | |||
| 7fbbc9650d | |||
| d0c5ff04a6 | |||
| d5236b71f4 | |||
| e7c85e569b | |||
| 84b4e32c34 | |||
| 56ee69e9d9 | |||
| 44287fb05f | |||
| cef587abc2 | |||
| 1a5f991d86 | |||
| 713b574c9d | |||
| 60c1bf5a19 | |||
| d331866a12 | |||
| 69e1fc496d | |||
| e87ad8126c | |||
| 5e30426916 | |||
| 6aff3e052a | |||
| f29d9fa3f9 | |||
| 31003cd5f6 | |||
| f0a3d91171 | |||
| e6d36f3a3a | |||
| c8269206d7 | |||
| ab67292aa3 | |||
| 4f92af3cd4 | |||
| a43adafc6b | |||
| c5e4684b44 | |||
| 3a34def55f | |||
| e6f68e1ccf | |||
| 60ab7027c0 | |||
| 08f2223a6a | |||
| 9c6c6c51e0 | |||
| baf32ee461 | |||
| 8fb6b5d945 | |||
| 5cc2eda362 | |||
| 9a69d5f367 | |||
| d9b98cbb18 | |||
| 24625e0695 | |||
| 4649accd54 | |||
| 968ffc7ef3 | |||
| 2337bbf6ca | |||
| ad1f89fea0 | |||
| 2ff911b08c | |||
| 1ed0b25910 | |||
| 5825a24d26 | |||
| 157cd8b1b0 | |||
| 06463135ef | |||
| 7ed9efcd4e | |||
| 0bc1f45634 | |||
| 1885a4a4b8 | |||
| 0e03542db5 | |||
| 2e44c3b743 | |||
| d1ff588d46 | |||
| cc1b2c8f09 | |||
| 100ea574a7 | |||
| 92625e1ca9 | |||
| f007c1c772 | |||
| 841291dda0 | |||
| 6488f22540 | |||
| 6953ae89c4 | |||
| 7c7359a9b2 | |||
| ee52000870 | |||
| 91804f28f1 | |||
| 8b7c424617 | |||
| 640fca7dc9 | |||
| de89b84661 | |||
| f819378fb0 | |||
| c163b799d2 | |||
| 4f3abb855a | |||
| a374816fb2 | |||
| ab5e3ded68 | |||
| ec60b322ab | |||
| 8445143359 | |||
| 9938a4cbb6 | |||
| 73f9c226d3 | |||
| 52c814b89d | |||
| b832372c98 | |||
| 7b268eb134 | |||
| 31d2b3cb5a | |||
| ef899a8859 | |||
| e47186cc42 | |||
| b6f1cd7809 | |||
| f56f7a5f94 | |||
| 4cd0df0567 | |||
| e64da8b2aa | |||
| e702431fcb | |||
| 156290f8d0 | |||
| 37075eab98 | |||
| 37998abef3 | |||
| 09f8dfe456 | |||
| 259a7fc7f1 | |||
| 93f5df716f | |||
| 9f38b22a3f | |||
| bd4678bca6 | |||
| 31f4d44c73 | |||
| 241fdf266a | |||
| 62611809e0 | |||
| a835e97440 | |||
| 62de535ac8 | |||
| f0879563d0 | |||
| 02db995e94 | |||
| a31ad7f960 | |||
| e97fd2b5e6 | |||
| 49ff1ca934 | |||
| 46963ab1ca | |||
| 6ba5a4348a | |||
| f584f5c3d0 | |||
| a0f76b7a4d | |||
| 3f695a542c | |||
| 64f930b1c5 | |||
| 81b306aac9 | |||
| 0c562f0a9f | |||
| 7c098f9fd1 | |||
| b95747be4c | |||
| 1239f5afc8 | |||
| 243ed4bc35 | |||
| 47d40806a4 | |||
| 91df073653 | |||
| 20ab6aad4a | |||
| a71376ad6a | |||
| 4d835b7303 | |||
| b922dd06a5 | |||
| 84f5ae20be | |||
| 273f36cc54 | |||
| 28cb4df127 | |||
| bc578e1e83 | |||
| ff0e82988f | |||
| 13528ec328 | |||
| 590070e47d | |||
| 959793e83c | |||
| aaefc3f44c | |||
| 48294e624c | |||
| add4b13856 | |||
| 5d6bf2224a | |||
| c09bd9fe4a | |||
| c7db0eaca6 | |||
| 78fa37f8ae | |||
| be83074131 | |||
| 1f756947da | |||
| ae171956e8 | |||
| 1f32e6e4f4 | |||
| 2f4d803db1 | |||
| 552023ee4b | |||
| 6c9b8ec860 | |||
| f9e6ad86b7 | |||
| e604634d2a | |||
| 590b9dabab | |||
| c283ea57fd | |||
| 50ff16e7a4 | |||
| 453287b06b | |||
| e166f132b3 | |||
| 42f4d4dbc8 | |||
| 7cb8368e0f | |||
| 0d7cfce6e1 | |||
| 2d7c1368f0 | |||
| db4371c745 | |||
| e6cd799d8a | |||
| ab29b58316 | |||
| 3f037c9786 | |||
| 53b991aa0e | |||
| 9e80f39caa | |||
| bdc2b74e8f | |||
| 1fd92e6bee | |||
| 02fd381072 | |||
| b6f3a6a68a | |||
| ae70512f5d | |||
| d4a123d6dd | |||
| ce816edb5f | |||
| ac2643700b | |||
| 558b252c5a | |||
| 754a5e1cee | |||
| e3e7c7ddaa | |||
| 76b278af8e | |||
| 1c6320828c | |||
| d72468426e | |||
| 796f4032b8 | |||
| 1ae7b942d9 | |||
| fed1221302 | |||
| 6ed81d6774 | |||
| 115850945e | |||
| 8e87436725 | |||
| e8e2a95165 | |||
| b908c33464 | |||
| 0ebf05440e | |||
| 7df1bd4b4a | |||
| 5d21cc3660 | |||
| b0275b8483 | |||
| 86c6fee320 | |||
| c0bee906d2 | |||
| bfaa469b9a | |||
| d73a08b9eb | |||
| a1f06a4fdc | |||
| cb26564d50 | |||
| 59705a1c1d | |||
| 205974c359 | |||
| 04edf9729f | |||
| bb1268ef4b | |||
| c5826d4720 | |||
| deb2faf7aa | |||
| 2777941b4e | |||
| ae8b628f0a | |||
| 0e9ff8c1f7 | |||
| d373c46976 | |||
| 008e55a65e | |||
| 772992812a | |||
| a8542508b7 | |||
| 0b4d366514 | |||
| e7a84bdac2 | |||
| d2b346cf9e | |||
| 1d0dcddf61 | |||
| d49025b501 | |||
| dd0fd13ea8 | |||
| 36e32dde1a | |||
| 53a2c8e452 | |||
| 5218ff775c | |||
| 5d5dbb3bcb | |||
| 5a0273e3ea | |||
| ce81e470e3 | |||
| 4ac61fc470 | |||
| bfe97d896d | |||
| e7a6a9e47e | |||
| d06431f670 | |||
| 2fa8e3309f | |||
| fe3b2acde0 | |||
| 01330fa428 | |||
| b4cc37f3c1 | |||
| a8dbb5d3b0 | |||
| 321a280031 | |||
| 5c9025918a | |||
| 573d46a4ef | |||
| 4ae8f87754 | |||
| 63af158086 | |||
| 3877bcfc21 | |||
| f8cc557892 | |||
| e39ceb2bd1 | |||
| 992398bca3 | |||
| baa108f5cc | |||
| 4a891f2d67 | |||
| 514c08a932 | |||
| d05e8a173d | |||
| ad412380cb | |||
| af35e84655 | |||
| 29f45a85e4 | |||
| ea5e8caa69 | |||
| 473aa28422 | |||
| ef0c4b134d | |||
| 35e36cb945 | |||
| 31718581b5 | |||
| 6bd7d572ec | |||
| 5b626870d0 | |||
| 2ccec93d71 | |||
| 2fe332d01d | |||
| a14865e6bb | |||
| d66c17ab5c | |||
| b781207752 | |||
| 34ec550014 | |||
| c2c63b07c3 | |||
| 332e6ffbd4 | |||
| 5352bdf4da | |||
| 138778b51b | |||
| 17e7571639 | |||
| 0fbca63e9d | |||
| 1657755b5d | |||
| 9d3dd13fef | |||
| 3827c47515 | |||
| e9053b6ed4 | |||
| e349635a3d | |||
| 014a1535f2 | |||
| 7b57ab5dea | |||
| e300d90c00 | |||
| 87317bcfc4 | |||
| 9849230a04 | |||
| fa32a2d0fd | |||
| 27ffc0ed74 | |||
| 539876af11 | |||
| b1c8746984 | |||
| bc3160f75a | |||
| 75b24ba02a | |||
| 953b3e1b3f | |||
| c98933499a | |||
| 2f768b96e8 | |||
| d6cc6453d1 | |||
| 45dfaf230c | |||
| 65537b8200 | |||
| 60787f8d5d | |||
| c4b3d3af95 | |||
| f29a5de9f5 | |||
| cb37f00a8f | |||
| fc379e90d1 | |||
| fea9d970ec | |||
| 6e7dd54a50 | |||
| f56b651acb | |||
| 2dbcc0a1bf | |||
| 1f82889001 | |||
| e6c824e606 | |||
| e2b0bceb1b | |||
| 713c055e04 | |||
| 1fc52033ba | |||
| ab27609a64 | |||
| 538a408608 | |||
| 093d280528 | |||
| de166d0ff2 | |||
| 942b94fc3c | |||
| 77bb7750e9 | |||
| 78380fa181 | |||
| c88e4b3fc0 | |||
| 552475dd5c | |||
| c69fbca24f | |||
| 5bb1c383ac | |||
| c7310f7fb2 | |||
| 3a43043c8a | |||
| dbfa859ca3 | |||
| 53c59c47a1 | |||
| af393b0003 | |||
| 1a5608d0f8 | |||
| 23dcbc94ef | |||
| af770c5ced | |||
| 8ce5e69b2f | |||
| 1aa97600df | |||
| 969c596d4c | |||
| 67b087019c | |||
| 6a45d93005 | |||
| 43e507d554 | |||
| a4be6c50cf | |||
| 5043143bc5 | |||
| bdebd1b2e3 | |||
| dadd8d9f94 | |||
| 3da8776a3c | |||
| 3052006ba8 | |||
| 1662c7eda3 | |||
| fef44a71c5 | |||
| b271cc34b3 | |||
| eead838353 | |||
| 02cc867c06 | |||
| 6e98cd311c | |||
| 97a13ef1ab | |||
| 7e1464a950 | |||
| e6a4d6bcf0 | |||
| c8c3b756b0 | |||
| 9a8dda8fc7 | |||
| ff442c48b5 | |||
| 216cd7474b | |||
| 2c62652ea8 | |||
| 4e8fd73a20 | |||
| 19931cd9ed | |||
| 0b460a9a12 | |||
| 4e31eea55f | |||
| 1366712560 | |||
| 51d9bde5a3 |
3
.gitattributes
vendored
3
.gitattributes
vendored
@ -1 +1,2 @@
|
||||
*.sh text eol=lf
|
||||
*.sh text eol=lf
|
||||
docker/entrypoint.sh text eol=lf executable
|
||||
|
||||
38
.github/workflows/tests.yml
vendored
38
.github/workflows/tests.yml
vendored
@ -15,6 +15,8 @@ on:
|
||||
- 'docs/**'
|
||||
- '*.md'
|
||||
- '*.mdx'
|
||||
schedule:
|
||||
- cron: '0 16 * * *' # This schedule runs every 16:00:00Z(00:00:00+08:00)
|
||||
|
||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||
concurrency:
|
||||
@ -48,9 +50,9 @@ jobs:
|
||||
|
||||
# https://github.com/astral-sh/ruff-action
|
||||
- name: Static check with Ruff
|
||||
uses: astral-sh/ruff-action@v2
|
||||
uses: astral-sh/ruff-action@v3
|
||||
with:
|
||||
version: ">=0.8.2"
|
||||
version: ">=0.11.x"
|
||||
args: "check"
|
||||
|
||||
- name: Build ragflow:nightly-slim
|
||||
@ -86,7 +88,12 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv pip install sdk/python && uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_sdk_api
|
||||
|
||||
- name: Run frontend api tests against Elasticsearch
|
||||
run: |
|
||||
@ -96,7 +103,7 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && UV_LINK_MODE=copy uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Elasticsearch
|
||||
run: |
|
||||
@ -106,7 +113,12 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short -m "not slow"
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_http_api
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
@ -125,7 +137,12 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv pip install sdk/python && DOC_ENGINE=infinity uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_sdk_api
|
||||
|
||||
- name: Run frontend api tests against Infinity
|
||||
run: |
|
||||
@ -135,7 +152,7 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && UV_LINK_MODE=copy uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Infinity
|
||||
run: |
|
||||
@ -145,7 +162,12 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_http_api && DOC_ENGINE=infinity pytest -s --tb=short -m "not slow"
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && DOC_ENGINE=infinity uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_http_api
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
|
||||
151
.gitignore
vendored
151
.gitignore
vendored
@ -36,9 +36,160 @@ sdk/python/ragflow.egg-info/
|
||||
sdk/python/build/
|
||||
sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
|
||||
# Exclude dep files
|
||||
libssl*.deb
|
||||
tika-server*.jar*
|
||||
cl100k_base.tiktoken
|
||||
chrome*
|
||||
huggingface.co/
|
||||
nltk_data/
|
||||
|
||||
# Exclude hash-like temporary files like 9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
*[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]*
|
||||
.lh/
|
||||
.venv
|
||||
docker/data
|
||||
|
||||
|
||||
#--------------------------------------------------#
|
||||
# The following was generated with gitignore.nvim: #
|
||||
#--------------------------------------------------#
|
||||
# Gitignore for the following technologies: Node
|
||||
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
.stylelintcache
|
||||
|
||||
# Microbundle cache
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
.cache
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
.nuxt
|
||||
dist
|
||||
|
||||
# Gatsby files
|
||||
.cache/
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
.temp
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
.vscode-test
|
||||
|
||||
# yarn v2
|
||||
.yarn/cache
|
||||
.yarn/unplugged
|
||||
.yarn/build-state.yml
|
||||
.yarn/install-state.gz
|
||||
.pnp.*
|
||||
|
||||
# Serverless Webpack directories
|
||||
.webpack/
|
||||
|
||||
# SvelteKit build / generate output
|
||||
.svelte-kit
|
||||
|
||||
|
||||
19
.pre-commit-config.yaml
Normal file
19
.pre-commit-config.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: mixed-line-ending
|
||||
- id: check-symlinks
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.11.6
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [ --fix ]
|
||||
- id: ruff-format
|
||||
@ -59,7 +59,8 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt install -y libatk-bridge2.0-0 && \
|
||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||
apt install -y libjemalloc-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less && \
|
||||
apt install -y ghostscript
|
||||
|
||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||
@ -199,6 +200,7 @@ COPY graphrag graphrag
|
||||
COPY agentic_reasoning agentic_reasoning
|
||||
COPY pyproject.toml uv.lock ./
|
||||
COPY mcp mcp
|
||||
COPY plugin plugin
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh ./
|
||||
|
||||
@ -33,6 +33,7 @@ ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
ADD ./agent ./agent
|
||||
ADD ./graphrag ./graphrag
|
||||
ADD ./plugin ./plugin
|
||||
|
||||
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||
|
||||
73
README.md
73
README.md
@ -5,13 +5,13 @@
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DBEDFA"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.19.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,6 +30,9 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
@ -40,6 +43,12 @@
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<div align="center">
|
||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</div>
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 Table of Contents</b></summary>
|
||||
|
||||
@ -78,11 +87,11 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Latest Updates
|
||||
|
||||
- 2025-05-23 Adds a Python/JavaScript code executor component to Agent.
|
||||
- 2025-05-05 Supports cross-language query.
|
||||
- 2025-03-19 Supports using a multi-modal model to make sense of images within PDF or DOCX files.
|
||||
- 2025-02-28 Combined with Internet search (Tavily), supports reasoning like Deep Research for any LLMs.
|
||||
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||
- 2024-08-22 Support text to SQL statements through RAG.
|
||||
|
||||
## 🎉 Stay Tuned
|
||||
@ -137,8 +146,10 @@ releases! 🌟
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux),
|
||||
> see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Required only if you intend to use the code executor (sandbox) feature of RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
@ -176,7 +187,7 @@ releases! 🌟
|
||||
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||
|
||||
> The command below downloads the `v0.18.0-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.18.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` for the full edition `v0.18.0`.
|
||||
> The command below downloads the `v0.19.1-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.19.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.1` for the full edition `v0.19.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -189,8 +200,8 @@ releases! 🌟
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
|-------------------|-----------------|-----------------------|--------------------------|
|
||||
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -296,7 +307,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
1. Install uv, or skip this step if it is already installed:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone the source code and install Python dependencies:
|
||||
@ -305,6 +316,8 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||
@ -316,7 +329,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/.env** to `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||
@ -325,7 +338,16 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Launch backend service:
|
||||
5. If your operating system does not have jemalloc, please install it as follows:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. Launch backend service:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -333,12 +355,14 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Install frontend dependencies:
|
||||
7. Install frontend dependencies:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. Launch frontend service:
|
||||
|
||||
8. Launch frontend service:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -348,6 +372,13 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
|
||||

|
||||
|
||||
9. Stop RAGFlow front-end and back-end service after development is complete:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
@ -371,4 +402,4 @@ See the [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214
|
||||
## 🙌 Contributing
|
||||
|
||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community.
|
||||
If you would like to be a part, review our [Contribution Guidelines](./CONTRIBUTING.md) first.
|
||||
If you would like to be a part, review our [Contribution Guidelines](https://ragflow.io/docs/dev/contributing) first.
|
||||
|
||||
68
README_id.md
68
README_id.md
@ -5,13 +5,13 @@
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體中文版自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DBEDFA"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.19.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
@ -30,6 +30,9 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/Lisensi-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="Lisensi">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
@ -40,6 +43,8 @@
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 Daftar Isi </b> </summary>
|
||||
|
||||
@ -75,11 +80,11 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Pembaruan Terbaru
|
||||
|
||||
- 2025-05-23 Menambahkan komponen pelaksana kode Python/JS ke Agen.
|
||||
- 2025-05-05 Mendukung kueri lintas bahasa.
|
||||
- 2025-03-19 Mendukung penggunaan model multi-modal untuk memahami gambar di dalam file PDF atau DOCX.
|
||||
- 2025-02-28 dikombinasikan dengan pencarian Internet (TAVILY), mendukung penelitian mendalam untuk LLM apa pun.
|
||||
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
|
||||
## 🎉 Tetap Terkini
|
||||
@ -132,6 +137,10 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Hanya diperlukan jika Anda ingin menggunakan fitur eksekutor kode (sandbox) dari RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> Jika Anda belum menginstal Docker di komputer lokal Anda (Windows, Mac, atau Linux), lihat [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Menjalankan Server
|
||||
|
||||
@ -169,7 +178,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> Perintah di bawah ini mengunduh edisi v0.18.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.18.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0 untuk edisi lengkap v0.18.0.
|
||||
> Perintah di bawah ini mengunduh edisi v0.19.1-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.19.1-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.1 untuk edisi lengkap v0.19.1.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -182,8 +191,8 @@ $ docker compose -f docker-compose.yml up -d
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -262,7 +271,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
1. Instal uv, atau lewati langkah ini jika sudah terinstal:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone kode sumber dan instal dependensi Python:
|
||||
@ -271,6 +280,8 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||
@ -282,7 +293,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **conf/service_conf.yaml** ke `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||
@ -291,7 +302,16 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Jalankan aplikasi backend:
|
||||
5. Jika sistem operasi Anda tidak memiliki jemalloc, instal sebagai berikut:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. Jalankan aplikasi backend:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -299,12 +319,14 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instal dependensi frontend:
|
||||
7. Instal dependensi frontend:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. Jalankan aplikasi frontend:
|
||||
|
||||
8. Jalankan aplikasi frontend:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -314,6 +336,14 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
|
||||

|
||||
|
||||
|
||||
9. Hentikan layanan front-end dan back-end RAGFlow setelah pengembangan selesai:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Dokumentasi
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
@ -337,4 +367,4 @@ Lihat [Roadmap RAGFlow 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
## 🙌 Kontribusi
|
||||
|
||||
RAGFlow berkembang melalui kolaborasi open-source. Dalam semangat ini, kami menerima kontribusi dari komunitas.
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](https://ragflow.io/docs/dev/contributing).
|
||||
|
||||
68
README_ja.md
68
README_ja.md
@ -5,13 +5,13 @@
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體中文版自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DBEDFA"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.19.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,6 +30,9 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
@ -40,6 +43,8 @@
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
## 💡 RAGFlow とは?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
||||
@ -55,11 +60,11 @@
|
||||
|
||||
## 🔥 最新情報
|
||||
|
||||
- 2025-05-23 エージェントに Python/JS コードエグゼキュータコンポーネントを追加しました。
|
||||
- 2025-05-05 言語間クエリをサポートしました。
|
||||
- 2025-03-19 PDFまたはDOCXファイル内の画像を理解するために、多モーダルモデルを使用することをサポートします。
|
||||
- 2025-02-28 インターネット検索 (TAVILY) と組み合わせて、あらゆる LLM の詳細な調査をサポートします。
|
||||
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
||||
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||
|
||||
## 🎉 続きを楽しみに
|
||||
@ -112,7 +117,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): RAGFlowのコード実行(サンドボックス)機能を利用する場合のみ必要です。
|
||||
|
||||
> [!TIP]
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
|
||||
### 🚀 サーバーを起動
|
||||
|
||||
@ -149,7 +157,7 @@
|
||||
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.18.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.18.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.18.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0 と設定します。
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.19.1-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.19.1-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.19.1 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.1 と設定します。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -162,8 +170,8 @@
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -258,7 +266,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
1. uv をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||
@ -267,6 +275,8 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
||||
@ -278,7 +288,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||
@ -287,7 +297,16 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. バックエンドサービスを起動する:
|
||||
5. オペレーティングシステムにjemallocがない場合は、次のようにインストールします:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. バックエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -295,12 +314,14 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. フロントエンドの依存関係をインストールする:
|
||||
7. フロントエンドの依存関係をインストールする:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. フロントエンドサービスを起動する:
|
||||
|
||||
8. フロントエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -310,6 +331,13 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
|
||||

|
||||
|
||||
9. 開発が完了したら、RAGFlow のフロントエンド サービスとバックエンド サービスを停止します:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 ドキュメンテーション
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
@ -332,4 +360,4 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](./CONTRIBUTING.md)をご覧ください。
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](https://ragflow.io/docs/dev/contributing)をご覧ください。
|
||||
|
||||
69
README_ko.md
69
README_ko.md
@ -5,13 +5,13 @@
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DBEDFA"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.19.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,6 +30,9 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
@ -40,6 +43,8 @@
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
## 💡 RAGFlow란?
|
||||
|
||||
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
||||
@ -55,11 +60,11 @@
|
||||
|
||||
## 🔥 업데이트
|
||||
|
||||
- 2025-05-23 Agent에 Python/JS 코드 실행기 구성 요소를 추가합니다.
|
||||
- 2025-05-05 언어 간 쿼리를 지원합니다.
|
||||
- 2025-03-19 PDF 또는 DOCX 파일 내의 이미지를 이해하기 위해 다중 모드 모델을 사용하는 것을 지원합니다.
|
||||
- 2025-02-28 인터넷 검색(TAVILY)과 결합되어 모든 LLM에 대한 심층 연구를 지원합니다.
|
||||
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
||||
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
## 🎉 계속 지켜봐 주세요
|
||||
@ -112,7 +117,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): RAGFlow의 코드 실행기(샌드박스) 기능을 사용하려는 경우에만 필요합니다.
|
||||
|
||||
> [!TIP]
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||
|
||||
### 🚀 서버 시작하기
|
||||
|
||||
@ -149,7 +157,7 @@
|
||||
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.18.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.18.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.18.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0로 설정합니다.
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.19.1-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.19.1-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.19.1을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.1로 설정합니다.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -162,8 +170,8 @@
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -257,7 +265,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
1. uv를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
||||
@ -266,6 +274,8 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Docker Compose를 사용하여 의존 서비스(MinIO, Elasticsearch, Redis 및 MySQL)를 시작합니다:
|
||||
@ -277,7 +287,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
||||
@ -286,7 +296,16 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 백엔드 서비스를 시작합니다:
|
||||
5. 만약 운영 체제에 jemalloc이 없으면 다음 방식으로 설치하세요:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. 백엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -294,12 +313,14 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 프론트엔드 의존성을 설치합니다:
|
||||
7. 프론트엔드 의존성을 설치합니다:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
8. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -309,6 +330,14 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
|
||||

|
||||
|
||||
|
||||
9. 개발이 완료된 후 RAGFlow 프론트엔드 및 백엔드 서비스를 중지합니다.
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 문서
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
@ -331,4 +360,4 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
|
||||
## 🙌 컨트리뷰션
|
||||
|
||||
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](./CONTRIBUTING.md)을 검토해 주세요.
|
||||
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](https://ragflow.io/docs/dev/contributing)을 검토해 주세요.
|
||||
|
||||
@ -5,13 +5,13 @@
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DBEDFA"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.19.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||
@ -30,6 +30,9 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="licença">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
@ -40,6 +43,8 @@
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 Índice</b></summary>
|
||||
|
||||
@ -75,11 +80,11 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Últimas Atualizações
|
||||
|
||||
- 23-05-2025 Adicione o componente executor de código Python/JS ao Agente.
|
||||
- 05-05-2025 Suporte a consultas entre idiomas.
|
||||
- 19-03-2025 Suporta o uso de um modelo multi-modal para entender imagens dentro de arquivos PDF ou DOCX.
|
||||
- 28-02-2025 combinado com a pesquisa na Internet (T AVI LY), suporta pesquisas profundas para qualquer LLM.
|
||||
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
||||
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
||||
|
||||
## 🎉 Fique Ligado
|
||||
@ -132,7 +137,10 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
- RAM >= 16 GB
|
||||
- Disco >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Necessário apenas se você pretende usar o recurso de executor de código (sandbox) do RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Iniciar o servidor
|
||||
|
||||
@ -169,7 +177,7 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||
|
||||
> O comando abaixo baixa a edição `v0.18.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.18.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` para a edição completa `v0.18.0`.
|
||||
> O comando abaixo baixa a edição `v0.19.1-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.19.1-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.1` para a edição completa `v0.19.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -182,8 +190,8 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
||||
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
||||
| v0.18.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.18.0-slim | ~2 | ❌ | Lançamento estável |
|
||||
| v0.19.1 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.19.1-slim | ~2 | ❌ | Lançamento estável |
|
||||
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
||||
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
||||
|
||||
@ -281,7 +289,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
1. Instale o `uv`, ou pule esta etapa se ele já estiver instalado:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone o código-fonte e instale as dependências Python:
|
||||
@ -290,6 +298,8 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # instala os módulos Python dependentes do RAGFlow
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Inicie os serviços dependentes (MinIO, Elasticsearch, Redis e MySQL) usando Docker Compose:
|
||||
@ -301,7 +311,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
Adicione a seguinte linha ao arquivo `/etc/hosts` para resolver todos os hosts especificados em **docker/.env** para `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. Se não conseguir acessar o HuggingFace, defina a variável de ambiente `HF_ENDPOINT` para usar um site espelho:
|
||||
@ -310,7 +320,16 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Lance o serviço de back-end:
|
||||
5. Se o seu sistema operacional não tiver jemalloc, instale-o da seguinte maneira:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum instalar jemalloc
|
||||
```
|
||||
|
||||
6. Lance o serviço de back-end:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -318,14 +337,14 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instale as dependências do front-end:
|
||||
7. Instale as dependências do front-end:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
7. Lance o serviço de front-end:
|
||||
8. Lance o serviço de front-end:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -335,6 +354,13 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
||||
|
||||

|
||||
|
||||
9. Pare os serviços de front-end e back-end do RAGFlow após a conclusão do desenvolvimento:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Documentação
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
@ -358,4 +384,4 @@ Veja o [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
## 🙌 Contribuindo
|
||||
|
||||
O RAGFlow prospera por meio da colaboração de código aberto. Com esse espírito, abraçamos contribuições diversas da comunidade.
|
||||
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](./CONTRIBUTING.md).
|
||||
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](https://ragflow.io/docs/dev/contributing).
|
||||
|
||||
111
README_tzh.md
111
README_tzh.md
@ -5,12 +5,13 @@
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DBEDFA"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -21,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.19.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -29,6 +30,9 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
@ -39,6 +43,31 @@
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<div align="center">
|
||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</div>
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 目錄</b></summary>
|
||||
|
||||
- 💡 [RAGFlow 是什麼?](#-RAGFlow-是什麼)
|
||||
- 🎮 [Demo-試用](#-demo-試用)
|
||||
- 📌 [近期更新](#-近期更新)
|
||||
- 🌟 [主要功能](#-主要功能)
|
||||
- 🔎 [系統架構](#-系統架構)
|
||||
- 🎬 [快速開始](#-快速開始)
|
||||
- 🔧 [系統配置](#-系統配置)
|
||||
- 🔨 [以原始碼啟動服務](#-以原始碼啟動服務)
|
||||
- 📚 [技術文檔](#-技術文檔)
|
||||
- 📜 [路線圖](#-路線圖)
|
||||
- 🏄 [貢獻指南](#-貢獻指南)
|
||||
- 🙌 [加入社區](#-加入社區)
|
||||
- 🤝 [商務合作](#-商務合作)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 RAGFlow 是什麼?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款基於深度文件理解所建構的開源 RAG(Retrieval-Augmented Generation)引擎。 RAGFlow 可以為各種規模的企業及個人提供一套精簡的 RAG 工作流程,結合大語言模型(LLM)針對用戶各類不同的複雜格式數據提供可靠的問答以及有理有據的引用。
|
||||
@ -54,11 +83,11 @@
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-05-23 為 Agent 新增 Python/JS 程式碼執行器元件。
|
||||
- 2025-05-05 支援跨語言查詢。
|
||||
- 2025-03-19 PDF和DOCX中的圖支持用多模態大模型去解析得到描述.
|
||||
- 2025-02-28 結合網路搜尋(Tavily),對於任意大模型實現類似 Deep Research 的推理功能.
|
||||
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
||||
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
||||
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
||||
|
||||
## 🎉 關注項目
|
||||
@ -111,7 +140,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): 僅在您打算使用 RAGFlow 的代碼執行器(沙箱)功能時才需要安裝。
|
||||
|
||||
> [!TIP]
|
||||
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||
|
||||
### 🚀 啟動伺服器
|
||||
|
||||
@ -148,7 +180,7 @@
|
||||
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.18.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.18.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` 來下載 RAGFlow 鏡像的 `v0.18.0` 完整發行版。
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.19.1-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.19.1-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.1` 來下載 RAGFlow 鏡像的 `v0.19.1` 完整發行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -161,8 +193,8 @@
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -269,7 +301,7 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
1. 安裝 uv。如已安裝,可跳過此步驟:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
@ -279,6 +311,8 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. 透過 Docker Compose 啟動依賴的服務(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
@ -290,7 +324,7 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
在 `/etc/hosts` 中加入以下程式碼,將 **conf/service_conf.yaml** 檔案中的所有 host 位址都解析為 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. 如果無法存取 HuggingFace,可以把環境變數 `HF_ENDPOINT` 設為對應的鏡像網站:
|
||||
@ -299,24 +333,34 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5.啟動後端服務:
|
||||
『`bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||
|
||||
```
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. 安裝前端依賴:
|
||||
『`bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
6. 啟動後端服務:
|
||||
|
||||
7. 啟動前端服務:
|
||||
『`bash
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
7. 安裝前端依賴:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
8. 啟動前端服務:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
|
||||
```
|
||||
|
||||
以下界面說明系統已成功啟動:_
|
||||
@ -324,6 +368,13 @@ npm install
|
||||

|
||||
```
|
||||
|
||||
9. 開發完成後停止 RAGFlow 前端和後端服務:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 技術文檔
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
@ -346,7 +397,7 @@ npm install
|
||||
|
||||
## 🙌 貢獻指南
|
||||
|
||||
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](./CONTRIBUTING.md) 。
|
||||
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](https://ragflow.io/docs/dev/contributing) 。
|
||||
|
||||
## 🤝 商務合作
|
||||
|
||||
|
||||
92
README_zh.md
92
README_zh.md
@ -5,13 +5,13 @@
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DBEDFA"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.19.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,6 +30,9 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
@ -40,6 +43,31 @@
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<div align="center">
|
||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</div>
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 目录</b></summary>
|
||||
|
||||
- 💡 [RAGFlow 是什么?](#-RAGFlow-是什么)
|
||||
- 🎮 [Demo](#-demo)
|
||||
- 📌 [近期更新](#-近期更新)
|
||||
- 🌟 [主要功能](#-主要功能)
|
||||
- 🔎 [系统架构](#-系统架构)
|
||||
- 🎬 [快速开始](#-快速开始)
|
||||
- 🔧 [系统配置](#-系统配置)
|
||||
- 🔨 [以源代码启动服务](#-以源代码启动服务)
|
||||
- 📚 [技术文档](#-技术文档)
|
||||
- 📜 [路线图](#-路线图)
|
||||
- 🏄 [贡献指南](#-贡献指南)
|
||||
- 🙌 [加入社区](#-加入社区)
|
||||
- 🤝 [商务合作](#-商务合作)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 RAGFlow 是什么?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。
|
||||
@ -55,11 +83,11 @@
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-03-19 PDF和DOCX中的图支持用多模态大模型去解析得到描述.
|
||||
- 2025-05-23 Agent 新增 Python/JS 代码执行器组件。
|
||||
- 2025-05-05 支持跨语言查询。
|
||||
- 2025-03-19 PDF 和 DOCX 中的图支持用多模态大模型去解析得到描述.
|
||||
- 2025-02-28 结合互联网搜索(Tavily),对于任意大模型实现类似 Deep Research 的推理功能.
|
||||
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
||||
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||
|
||||
## 🎉 关注项目
|
||||
@ -112,7 +140,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): 仅在你打算使用 RAGFlow 的代码执行器(沙箱)功能时才需要安装。
|
||||
|
||||
> [!TIP]
|
||||
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
||||
|
||||
### 🚀 启动服务器
|
||||
|
||||
@ -149,7 +180,7 @@
|
||||
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.18.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.18.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` 来下载 RAGFlow 镜像的 `v0.18.0` 完整发行版。
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.19.1-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.19.1-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.1` 来下载 RAGFlow 镜像的 `v0.19.1` 完整发行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
@ -162,8 +193,8 @@
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -270,7 +301,7 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
1. 安装 uv。如已经安装,可跳过本步骤:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
@ -280,6 +311,8 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
@ -291,7 +324,7 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
在 `/etc/hosts` 中添加以下代码,目的是将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||
|
||||
@ -299,7 +332,16 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 启动后端服务:
|
||||
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. 启动后端服务:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -307,12 +349,14 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 安装前端依赖:
|
||||
7. 安装前端依赖:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. 启动前端服务:
|
||||
|
||||
8. 启动前端服务:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -321,12 +365,14 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
_以下界面说明系统已经成功启动:_
|
||||
|
||||

|
||||
8. 开发完成后停止 RAGFlow 服务
|
||||
停止 RAGFlow 前端和后端服务:
|
||||
|
||||
9. 开发完成后停止 RAGFlow 前端和后端服务:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 技术文档
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
@ -349,7 +395,7 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
||||
|
||||
## 🙌 贡献指南
|
||||
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](./CONTRIBUTING.md) 。
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](https://ragflow.io/docs/dev/contributing) 。
|
||||
|
||||
## 🤝 商务合作
|
||||
|
||||
|
||||
@ -17,7 +17,6 @@ import logging
|
||||
import json
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent.component import component_class
|
||||
@ -167,7 +166,11 @@ class Canvas:
|
||||
return n["data"]["name"]
|
||||
return ""
|
||||
|
||||
def run(self, **kwargs):
|
||||
def run(self, running_hint_text = "is running...🕞", **kwargs):
|
||||
if not running_hint_text or not isinstance(running_hint_text, str):
|
||||
running_hint_text = "is running...🕞"
|
||||
bypass_begin = bool(kwargs.get("bypass_begin", False))
|
||||
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
@ -186,6 +189,12 @@ class Canvas:
|
||||
if not self.path:
|
||||
self.components["begin"]["obj"].run(self.history, **kwargs)
|
||||
self.path.append(["begin"])
|
||||
if bypass_begin:
|
||||
cpn = self.get_component("begin")
|
||||
downstream = cpn["downstream"]
|
||||
self.path.append(downstream)
|
||||
|
||||
|
||||
|
||||
self.path.append([])
|
||||
|
||||
@ -209,7 +218,7 @@ class Canvas:
|
||||
if c not in waiting:
|
||||
waiting.append(c)
|
||||
continue
|
||||
yield "*'{}'* is running...🕞".format(self.get_component_name(c))
|
||||
yield "*'{}'* {}".format(self.get_component_name(c), running_hint_text)
|
||||
|
||||
if cpn.component_name.lower() == "iteration":
|
||||
st_cpn = cpn.get_start()
|
||||
@ -302,6 +311,8 @@ class Canvas:
|
||||
|
||||
def get_history(self, window_size):
|
||||
convs = []
|
||||
if window_size <= 0:
|
||||
return convs
|
||||
for role, obj in self.history[window_size * -1:]:
|
||||
if isinstance(obj, list) and obj and all([isinstance(o, dict) for o in obj]):
|
||||
convs.append({"role": role, "content": '\n'.join([str(s.get("content", "")) for s in obj])})
|
||||
@ -362,4 +373,7 @@ class Canvas:
|
||||
return self.components["begin"]["obj"]._param.query
|
||||
|
||||
def get_component_input_elements(self, cpnnm):
|
||||
return self.components[cpnnm]["obj"].get_input_elements()
|
||||
return self.components[cpnnm]["obj"].get_input_elements()
|
||||
|
||||
def set_component_infor(self, cpn_id, infor):
|
||||
self.components[cpn_id]["obj"].set_infor(infor)
|
||||
|
||||
@ -50,6 +50,7 @@ from .template import Template, TemplateParam
|
||||
from .email import Email, EmailParam
|
||||
from .iteration import Iteration, IterationParam
|
||||
from .iterationitem import IterationItem, IterationItemParam
|
||||
from .code import Code, CodeParam
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
@ -129,5 +130,7 @@ __all__ = [
|
||||
"TemplateParam",
|
||||
"Email",
|
||||
"EmailParam",
|
||||
"Code",
|
||||
"CodeParam",
|
||||
"component_class"
|
||||
]
|
||||
|
||||
@ -64,14 +64,17 @@ class Answer(ComponentBase, ABC):
|
||||
for ii, row in stream.iterrows():
|
||||
answer += row.to_dict()["content"]
|
||||
yield {"content": answer}
|
||||
else:
|
||||
elif stream is not None:
|
||||
for st in stream():
|
||||
res = st
|
||||
yield st
|
||||
if self._param.post_answers:
|
||||
if self._param.post_answers and res:
|
||||
res["content"] += random.choice(self._param.post_answers)
|
||||
yield res
|
||||
|
||||
if res is None:
|
||||
res = {"content": ""}
|
||||
|
||||
self.set_output(res)
|
||||
|
||||
def set_exception(self, e):
|
||||
|
||||
@ -17,6 +17,7 @@ import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
@ -44,17 +45,28 @@ class Baidu(ComponentBase, ABC):
|
||||
return Baidu.be_output("")
|
||||
|
||||
try:
|
||||
url = 'http://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
url = 'https://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'Connection': 'keep-alive',
|
||||
}
|
||||
response = requests.get(url=url, headers=headers)
|
||||
|
||||
url_res = re.findall(r"'url': \\\"(.*?)\\\"}", response.text)
|
||||
title_res = re.findall(r"'title': \\\"(.*?)\\\",\\n", response.text)
|
||||
body_res = re.findall(r"\"contentText\":\"(.*?)\"", response.text)
|
||||
baidu_res = [{"content": re.sub('<em>|</em>', '', '<a href="' + url + '">' + title + '</a> ' + body)} for
|
||||
url, title, body in zip(url_res, title_res, body_res)]
|
||||
del body_res, url_res, title_res
|
||||
# check if request success
|
||||
if response.status_code == 200:
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
url_res = []
|
||||
title_res = []
|
||||
body_res = []
|
||||
for item in soup.select('.result.c-container'):
|
||||
# extract title
|
||||
title_res.append(item.select_one('h3 a').get_text(strip=True))
|
||||
url_res.append(item.select_one('h3 a')['href'])
|
||||
body_res.append(item.select_one('.c-abstract').get_text(strip=True) if item.select_one('.c-abstract') else '')
|
||||
baidu_res = [{"content": re.sub('<em>|</em>', '', '<a href="' + url + '">' + title + '</a> ' + body)} for
|
||||
url, title, body in zip(url_res, title_res, body_res)]
|
||||
del body_res, url_res, title_res
|
||||
except Exception as e:
|
||||
return Baidu.be_output("**ERROR**: " + str(e))
|
||||
|
||||
|
||||
@ -13,13 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import builtins
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
from typing import Tuple, Union
|
||||
from typing import Any, Tuple, Union
|
||||
|
||||
import pandas as pd
|
||||
|
||||
@ -34,6 +34,7 @@ _IS_RAW_CONF = "_is_raw_conf"
|
||||
class ComponentParamBase(ABC):
|
||||
def __init__(self):
|
||||
self.output_var_name = "output"
|
||||
self.infor_var_name = "infor"
|
||||
self.message_history_window_size = 22
|
||||
self.query = []
|
||||
self.inputs = []
|
||||
@ -109,15 +110,11 @@ class ComponentParamBase(ABC):
|
||||
update_from_raw_conf = conf.get(_IS_RAW_CONF, True)
|
||||
if update_from_raw_conf:
|
||||
deprecated_params_set = self._get_or_init_deprecated_params_set()
|
||||
feeded_deprecated_params_set = (
|
||||
self._get_or_init_feeded_deprecated_params_set()
|
||||
)
|
||||
feeded_deprecated_params_set = self._get_or_init_feeded_deprecated_params_set()
|
||||
user_feeded_params_set = self._get_or_init_user_feeded_params_set()
|
||||
setattr(self, _IS_RAW_CONF, False)
|
||||
else:
|
||||
feeded_deprecated_params_set = (
|
||||
self._get_or_init_feeded_deprecated_params_set(conf)
|
||||
)
|
||||
feeded_deprecated_params_set = self._get_or_init_feeded_deprecated_params_set(conf)
|
||||
user_feeded_params_set = self._get_or_init_user_feeded_params_set(conf)
|
||||
|
||||
def _recursive_update_param(param, config, depth, prefix):
|
||||
@ -153,15 +150,11 @@ class ComponentParamBase(ABC):
|
||||
|
||||
else:
|
||||
# recursive set obj attr
|
||||
sub_params = _recursive_update_param(
|
||||
attr, config_value, depth + 1, prefix=f"{prefix}{config_key}."
|
||||
)
|
||||
sub_params = _recursive_update_param(attr, config_value, depth + 1, prefix=f"{prefix}{config_key}.")
|
||||
setattr(param, config_key, sub_params)
|
||||
|
||||
if not allow_redundant and redundant_attrs:
|
||||
raise ValueError(
|
||||
f"cpn `{getattr(self, '_name', type(self))}` has redundant parameters: `{[redundant_attrs]}`"
|
||||
)
|
||||
raise ValueError(f"cpn `{getattr(self, '_name', type(self))}` has redundant parameters: `{[redundant_attrs]}`")
|
||||
|
||||
return param
|
||||
|
||||
@ -192,9 +185,7 @@ class ComponentParamBase(ABC):
|
||||
param_validation_path_prefix = home_dir + "/param_validation/"
|
||||
|
||||
param_name = type(self).__name__
|
||||
param_validation_path = "/".join(
|
||||
[param_validation_path_prefix, param_name + ".json"]
|
||||
)
|
||||
param_validation_path = "/".join([param_validation_path_prefix, param_name + ".json"])
|
||||
|
||||
validation_json = None
|
||||
|
||||
@ -227,11 +218,7 @@ class ComponentParamBase(ABC):
|
||||
break
|
||||
|
||||
if not value_legal:
|
||||
raise ValueError(
|
||||
"Plase check runtime conf, {} = {} does not match user-parameter restriction".format(
|
||||
variable, value
|
||||
)
|
||||
)
|
||||
raise ValueError("Plase check runtime conf, {} = {} does not match user-parameter restriction".format(variable, value))
|
||||
|
||||
elif variable in validation_json:
|
||||
self._validate_param(attr, validation_json)
|
||||
@ -239,94 +226,63 @@ class ComponentParamBase(ABC):
|
||||
@staticmethod
|
||||
def check_string(param, descr):
|
||||
if type(param).__name__ not in ["str"]:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be string type".format(param)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be string type".format(param))
|
||||
|
||||
@staticmethod
|
||||
def check_empty(param, descr):
|
||||
if not param:
|
||||
raise ValueError(
|
||||
descr + " does not support empty value."
|
||||
)
|
||||
raise ValueError(descr + " does not support empty value.")
|
||||
|
||||
@staticmethod
|
||||
def check_positive_integer(param, descr):
|
||||
if type(param).__name__ not in ["int", "long"] or param <= 0:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be positive integer".format(param)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be positive integer".format(param))
|
||||
|
||||
@staticmethod
|
||||
def check_positive_number(param, descr):
|
||||
if type(param).__name__ not in ["float", "int", "long"] or param <= 0:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be positive numeric".format(param)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be positive numeric".format(param))
|
||||
|
||||
@staticmethod
|
||||
def check_nonnegative_number(param, descr):
|
||||
if type(param).__name__ not in ["float", "int", "long"] or param < 0:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be non-negative numeric".format(param)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be non-negative numeric".format(param))
|
||||
|
||||
@staticmethod
|
||||
def check_decimal_float(param, descr):
|
||||
if type(param).__name__ not in ["float", "int"] or param < 0 or param > 1:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be a float number in range [0, 1]".format(
|
||||
param
|
||||
)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be a float number in range [0, 1]".format(param))
|
||||
|
||||
@staticmethod
|
||||
def check_boolean(param, descr):
|
||||
if type(param).__name__ != "bool":
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be bool type".format(param)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be bool type".format(param))
|
||||
|
||||
@staticmethod
|
||||
def check_open_unit_interval(param, descr):
|
||||
if type(param).__name__ not in ["float"] or param <= 0 or param >= 1:
|
||||
raise ValueError(
|
||||
descr + " should be a numeric number between 0 and 1 exclusively"
|
||||
)
|
||||
raise ValueError(descr + " should be a numeric number between 0 and 1 exclusively")
|
||||
|
||||
@staticmethod
|
||||
def check_valid_value(param, descr, valid_values):
|
||||
if param not in valid_values:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} is not supported, it should be in {}".format(param, valid_values)
|
||||
)
|
||||
raise ValueError(descr + " {} is not supported, it should be in {}".format(param, valid_values))
|
||||
|
||||
@staticmethod
|
||||
def check_defined_type(param, descr, types):
|
||||
if type(param).__name__ not in types:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be one of {}".format(param, types)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be one of {}".format(param, types))
|
||||
|
||||
@staticmethod
|
||||
def check_and_change_lower(param, valid_list, descr=""):
|
||||
if type(param).__name__ != "str":
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be one of {}".format(param, valid_list)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be one of {}".format(param, valid_list))
|
||||
|
||||
lower_param = param.lower()
|
||||
if lower_param in valid_list:
|
||||
return lower_param
|
||||
else:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be one of {}".format(param, valid_list)
|
||||
)
|
||||
raise ValueError(descr + " {} not supported, should be one of {}".format(param, valid_list))
|
||||
|
||||
@staticmethod
|
||||
def _greater_equal_than(value, limit):
|
||||
@ -340,11 +296,7 @@ class ComponentParamBase(ABC):
|
||||
def _range(value, ranges):
|
||||
in_range = False
|
||||
for left_limit, right_limit in ranges:
|
||||
if (
|
||||
left_limit - settings.FLOAT_ZERO
|
||||
<= value
|
||||
<= right_limit + settings.FLOAT_ZERO
|
||||
):
|
||||
if left_limit - settings.FLOAT_ZERO <= value <= right_limit + settings.FLOAT_ZERO:
|
||||
in_range = True
|
||||
break
|
||||
|
||||
@ -360,16 +312,11 @@ class ComponentParamBase(ABC):
|
||||
|
||||
def _warn_deprecated_param(self, param_name, descr):
|
||||
if self._deprecated_params_set.get(param_name):
|
||||
logging.warning(
|
||||
f"{descr} {param_name} is deprecated and ignored in this version."
|
||||
)
|
||||
logging.warning(f"{descr} {param_name} is deprecated and ignored in this version.")
|
||||
|
||||
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
||||
if self._deprecated_params_set.get(param_name):
|
||||
logging.warning(
|
||||
f"{descr} {param_name} will be deprecated in future release; "
|
||||
f"please use {new_param} instead."
|
||||
)
|
||||
logging.warning(f"{descr} {param_name} will be deprecated in future release; please use {new_param} instead.")
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -394,14 +341,16 @@ class ComponentBase(ABC):
|
||||
"params": {},
|
||||
"output": {},
|
||||
"inputs": {}
|
||||
}}""".format(self.component_name,
|
||||
self._param,
|
||||
json.dumps(json.loads(str(self._param)).get("output", {}), ensure_ascii=False),
|
||||
json.dumps(json.loads(str(self._param)).get("inputs", []), ensure_ascii=False)
|
||||
}}""".format(
|
||||
self.component_name,
|
||||
self._param,
|
||||
json.dumps(json.loads(str(self._param)).get("output", {}), ensure_ascii=False),
|
||||
json.dumps(json.loads(str(self._param)).get("inputs", []), ensure_ascii=False),
|
||||
)
|
||||
|
||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||
from agent.canvas import Canvas # Local import to avoid cyclic dependency
|
||||
|
||||
assert isinstance(canvas, Canvas), "canvas must be an instance of Canvas"
|
||||
self._canvas = canvas
|
||||
self._id = id
|
||||
@ -409,15 +358,17 @@ class ComponentBase(ABC):
|
||||
self._param.check()
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.query \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
cpnts = set(
|
||||
[
|
||||
para["component_id"].split("@")[0]
|
||||
for para in self._param.query
|
||||
if para.get("component_id") and para["component_id"].lower().find("answer") < 0 and para["component_id"].lower().find("begin") < 0
|
||||
]
|
||||
)
|
||||
return list(cpnts)
|
||||
|
||||
def run(self, history, **kwargs):
|
||||
logging.debug("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||
json.dumps(kwargs, ensure_ascii=False)))
|
||||
logging.debug("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False), json.dumps(kwargs, ensure_ascii=False)))
|
||||
self._param.debug_inputs = []
|
||||
try:
|
||||
res = self._run(history, **kwargs)
|
||||
@ -462,6 +413,35 @@ class ComponentBase(ABC):
|
||||
def set_output(self, v):
|
||||
setattr(self._param, self._param.output_var_name, v)
|
||||
|
||||
def set_infor(self, v):
|
||||
setattr(self._param, self._param.infor_var_name, v)
|
||||
|
||||
def _fetch_outputs_from(self, sources: list[dict[str, Any]]) -> list[pd.DataFrame]:
|
||||
outs = []
|
||||
for q in sources:
|
||||
if q.get("component_id"):
|
||||
if "@" in q["component_id"] and q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = q["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
outs.append(pd.DataFrame([{"content": p.get("value", "")}]))
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
if q["component_id"].lower().find("answer") == 0:
|
||||
txt = []
|
||||
for r, c in self._canvas.history[::-1][: self._param.message_history_window_size][::-1]:
|
||||
txt.append(f"{r.upper()}:{c}")
|
||||
txt = "\n".join(txt)
|
||||
outs.append(pd.DataFrame([{"content": txt}]))
|
||||
continue
|
||||
|
||||
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||
elif q.get("value"):
|
||||
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||
return outs
|
||||
def get_input(self):
|
||||
if self._param.debug_inputs:
|
||||
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs if v.get("value")])
|
||||
@ -475,41 +455,23 @@ class ComponentBase(ABC):
|
||||
|
||||
if self._param.query:
|
||||
self._param.inputs = []
|
||||
outs = []
|
||||
for q in self._param.query:
|
||||
if q.get("component_id"):
|
||||
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = q["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
outs.append(pd.DataFrame([{"content": p.get("value", "")}]))
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": p.get("value", "")})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
outs = self._fetch_outputs_from(self._param.query)
|
||||
|
||||
if q["component_id"].lower().find("answer") == 0:
|
||||
txt = []
|
||||
for r, c in self._canvas.history[::-1][:self._param.message_history_window_size][::-1]:
|
||||
txt.append(f"{r.upper()}:{c}")
|
||||
txt = "\n".join(txt)
|
||||
self._param.inputs.append({"content": txt, "component_id": q["component_id"]})
|
||||
outs.append(pd.DataFrame([{"content": txt}]))
|
||||
continue
|
||||
for out in outs:
|
||||
records = out.to_dict("records")
|
||||
content: str
|
||||
|
||||
if len(records) > 1:
|
||||
content = "\n".join([str(d["content"]) for d in records])
|
||||
else:
|
||||
content = records[0]["content"]
|
||||
|
||||
self._param.inputs.append({"component_id": records[0].get("component_id"), "content": content})
|
||||
|
||||
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": "\n".join(
|
||||
[str(d["content"]) for d in outs[-1].to_dict('records')])})
|
||||
elif q.get("value"):
|
||||
self._param.inputs.append({"component_id": None, "content": q["value"]})
|
||||
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||
if outs:
|
||||
df = pd.concat(outs, ignore_index=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
df = df.drop_duplicates(subset=["content"]).reset_index(drop=True)
|
||||
return df
|
||||
|
||||
upstream_outs = []
|
||||
@ -523,9 +485,8 @@ class ComponentBase(ABC):
|
||||
o["component_id"] = u
|
||||
upstream_outs.append(o)
|
||||
continue
|
||||
#if self.component_name.lower()!="answer" and u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||
if self.component_name.lower().find("switch") < 0 \
|
||||
and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||
# if self.component_name.lower()!="answer" and u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||
if self.component_name.lower().find("switch") < 0 and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||
continue
|
||||
if u.lower().find("answer") >= 0:
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
@ -545,7 +506,7 @@ class ComponentBase(ABC):
|
||||
|
||||
df = pd.concat(upstream_outs, ignore_index=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
df = df.drop_duplicates(subset=["content"]).reset_index(drop=True)
|
||||
|
||||
self._param.inputs = []
|
||||
for _, r in df.iterrows():
|
||||
@ -597,5 +558,5 @@ class ComponentBase(ABC):
|
||||
return self._canvas.get_component(pid)["obj"]
|
||||
|
||||
def get_upstream(self):
|
||||
cpn_nms = self._canvas.get_component(self._id)['upstream']
|
||||
cpn_nms = self._canvas.get_component(self._id)["upstream"]
|
||||
return cpn_nms
|
||||
|
||||
@ -85,6 +85,8 @@ class Categorize(Generate, ABC):
|
||||
input = self.get_input()
|
||||
input = " - ".join(input["content"]) if "content" in input else ""
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
self._canvas.set_component_infor(self._id, {"prompt":self._param.get_prompt(input),"messages": [{"role": "user", "content": "\nCategory: "}],"conf": self._param.gen_conf()})
|
||||
|
||||
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
||||
self._param.gen_conf())
|
||||
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||
@ -97,9 +99,13 @@ class Categorize(Generate, ABC):
|
||||
# If a category is found, return the category with the highest count.
|
||||
if any(category_counts.values()):
|
||||
max_category = max(category_counts.items(), key=lambda x: x[1])
|
||||
return Categorize.be_output(self._param.category_description[max_category[0]]["to"])
|
||||
res = Categorize.be_output(self._param.category_description[max_category[0]]["to"])
|
||||
self.set_output(res)
|
||||
return res
|
||||
|
||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||
res = Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||
self.set_output(res)
|
||||
return res
|
||||
|
||||
def debug(self, **kwargs):
|
||||
df = self._run([], **kwargs)
|
||||
|
||||
152
agent/component/code.py
Normal file
152
agent/component/code.py
Normal file
@ -0,0 +1,152 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import base64
|
||||
from abc import ABC
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from api import settings
|
||||
|
||||
|
||||
class Language(str, Enum):
|
||||
PYTHON = "python"
|
||||
NODEJS = "nodejs"
|
||||
|
||||
|
||||
class CodeExecutionRequest(BaseModel):
|
||||
code_b64: str = Field(..., description="Base64 encoded code string")
|
||||
language: Language = Field(default=Language.PYTHON, description="Programming language")
|
||||
arguments: Optional[dict] = Field(default={}, description="Arguments")
|
||||
|
||||
@field_validator("code_b64")
|
||||
@classmethod
|
||||
def validate_base64(cls, v: str) -> str:
|
||||
try:
|
||||
base64.b64decode(v, validate=True)
|
||||
return v
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid base64 encoding: {str(e)}")
|
||||
|
||||
@field_validator("language", mode="before")
|
||||
@classmethod
|
||||
def normalize_language(cls, v) -> str:
|
||||
if isinstance(v, str):
|
||||
low = v.lower()
|
||||
if low in ("python", "python3"):
|
||||
return "python"
|
||||
elif low in ("javascript", "nodejs"):
|
||||
return "nodejs"
|
||||
raise ValueError(f"Unsupported language: {v}")
|
||||
|
||||
|
||||
class CodeParam(ComponentParamBase):
|
||||
"""
|
||||
Define the code sandbox component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.lang = "python"
|
||||
self.script = ""
|
||||
self.arguments = []
|
||||
self.address = f"http://{settings.SANDBOX_HOST}:9385/run"
|
||||
self.enable_network = True
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.lang, "Support languages", ["python", "python3", "nodejs", "javascript"])
|
||||
self.check_defined_type(self.enable_network, "Enable network", ["bool"])
|
||||
|
||||
|
||||
class Code(ComponentBase, ABC):
|
||||
component_name = "Code"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
arguments = {}
|
||||
for input in self._param.arguments:
|
||||
if "@" in input["component_id"]:
|
||||
component_id = input["component_id"].split("@")[0]
|
||||
referred_component_key = input["component_id"].split("@")[1]
|
||||
referred_component = self._canvas.get_component(component_id)["obj"]
|
||||
|
||||
for param in referred_component._param.query:
|
||||
if param["key"] == referred_component_key:
|
||||
if "value" in param:
|
||||
arguments[input["name"]] = param["value"]
|
||||
else:
|
||||
referred_component = self._canvas.get_component(input["component_id"])["obj"]
|
||||
referred_component_name = referred_component.component_name
|
||||
referred_component_id = referred_component._id
|
||||
|
||||
debug_inputs = self._param.debug_inputs
|
||||
if debug_inputs:
|
||||
for param in debug_inputs:
|
||||
if param["key"] == referred_component_id:
|
||||
if "value" in param and param["name"] == input["name"]:
|
||||
arguments[input["name"]] = param["value"]
|
||||
else:
|
||||
if referred_component_name.lower() == "answer":
|
||||
arguments[input["name"]] = self._canvas.get_history(1)[0]["content"]
|
||||
continue
|
||||
|
||||
_, out = referred_component.output(allow_partial=False)
|
||||
if not out.empty:
|
||||
arguments[input["name"]] = "\n".join(out["content"])
|
||||
|
||||
return self._execute_code(
|
||||
language=self._param.lang,
|
||||
code=self._param.script,
|
||||
arguments=arguments,
|
||||
address=self._param.address,
|
||||
enable_network=self._param.enable_network,
|
||||
)
|
||||
|
||||
def _execute_code(self, language: str, code: str, arguments: dict, address: str, enable_network: bool):
|
||||
import requests
|
||||
|
||||
try:
|
||||
code_b64 = self._encode_code(code)
|
||||
code_req = CodeExecutionRequest(code_b64=code_b64, language=language, arguments=arguments).model_dump()
|
||||
except Exception as e:
|
||||
return Code.be_output("**Error**: construct code request error: " + str(e))
|
||||
|
||||
try:
|
||||
resp = requests.post(url=address, json=code_req, timeout=10)
|
||||
body = resp.json()
|
||||
if body:
|
||||
stdout = body.get("stdout")
|
||||
stderr = body.get("stderr")
|
||||
return Code.be_output(stdout or stderr)
|
||||
else:
|
||||
return Code.be_output("**Error**: There is no response from sanbox")
|
||||
|
||||
except Exception as e:
|
||||
return Code.be_output("**Error**: Internal error in sanbox: " + str(e))
|
||||
|
||||
def _encode_code(self, code: str) -> str:
|
||||
return base64.b64encode(code.encode("utf-8")).decode("utf-8")
|
||||
|
||||
def get_input_elements(self):
|
||||
elements = []
|
||||
for input in self._param.arguments:
|
||||
cpn_id = input["component_id"]
|
||||
elements.append({"key": cpn_id, "name": input["name"]})
|
||||
return elements
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
@ -61,7 +61,7 @@ class ExeSQL(Generate, ABC):
|
||||
component_name = "ExeSQL"
|
||||
|
||||
def _refactor(self, ans):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||
if match:
|
||||
ans = match.group(1) # Query content
|
||||
@ -105,6 +105,7 @@ class ExeSQL(Generate, ABC):
|
||||
sql_res = []
|
||||
for i in range(len(input_list)):
|
||||
single_sql = input_list[i]
|
||||
single_sql = single_sql.replace('```','')
|
||||
while self._loop <= self._param.loop:
|
||||
self._loop += 1
|
||||
if not single_sql:
|
||||
|
||||
@ -16,15 +16,29 @@
|
||||
import json
|
||||
import re
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from plugin import GlobalPluginManager
|
||||
from plugin.llm_tool_plugin import llm_tool_metadata_to_openai_tool
|
||||
from rag.llm.chat_model import ToolCallSession
|
||||
from rag.prompts import message_fit_in
|
||||
|
||||
|
||||
class LLMToolPluginCallSession(ToolCallSession):
|
||||
def tool_call(self, name: str, arguments: dict[str, Any]) -> str:
|
||||
tool = GlobalPluginManager.get_llm_tool_by_name(name)
|
||||
|
||||
if tool is None:
|
||||
raise ValueError(f"LLM tool {name} does not exist")
|
||||
|
||||
return tool().invoke(**arguments)
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Generate component parameters.
|
||||
@ -41,6 +55,7 @@ class GenerateParam(ComponentParamBase):
|
||||
self.frequency_penalty = 0
|
||||
self.cite = True
|
||||
self.parameters = []
|
||||
self.llm_enabled_tools = []
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.temperature, "[Generate] Temperature")
|
||||
@ -133,6 +148,15 @@ class Generate(ComponentBase):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
|
||||
if len(self._param.llm_enabled_tools) > 0:
|
||||
tools = GlobalPluginManager.get_llm_tools_by_names(self._param.llm_enabled_tools)
|
||||
|
||||
chat_mdl.bind_tools(
|
||||
LLMToolPluginCallSession(),
|
||||
[llm_tool_metadata_to_openai_tool(t.get_metadata()) for t in tools]
|
||||
)
|
||||
|
||||
prompt = self._param.prompt
|
||||
|
||||
retrieval_res = []
|
||||
@ -200,8 +224,8 @@ class Generate(ComponentBase):
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
self._canvas.set_component_infor(self._id, {"prompt":msg[0]["content"],"messages": msg[1:],"conf": self._param.gen_conf()})
|
||||
if self._param.cite and "chunks" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, ans)
|
||||
return pd.DataFrame([res])
|
||||
@ -234,7 +258,7 @@ class Generate(ComponentBase):
|
||||
if self._param.cite and "chunks" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, answer)
|
||||
yield res
|
||||
|
||||
self._canvas.set_component_infor(self._id, {"prompt":msg[0]["content"],"messages": msg[1:],"conf": self._param.gen_conf()})
|
||||
self.set_output(Generate.be_output(res))
|
||||
|
||||
def debug(self, **kwargs):
|
||||
|
||||
@ -51,13 +51,19 @@ class KeywordExtract(Generate, ABC):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
if hasattr(query, "to_dict") and "content" in query:
|
||||
query = ", ".join(map(str, query["content"].dropna()))
|
||||
else:
|
||||
query = str(query)
|
||||
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
self._canvas.set_component_infor(self._id, {"prompt":self._param.get_prompt(),"messages": [{"role": "user", "content": query}],"conf": self._param.gen_conf()})
|
||||
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": query}],
|
||||
self._param.gen_conf())
|
||||
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||
logging.debug(f"ans: {ans}")
|
||||
return KeywordExtract.be_output(ans)
|
||||
|
||||
@ -40,7 +40,9 @@ class Message(ComponentBase, ABC):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
|
||||
return Message.be_output(random.choice(self._param.messages))
|
||||
res = Message.be_output(random.choice(self._param.messages))
|
||||
self.set_output(res)
|
||||
return res
|
||||
|
||||
def stream_output(self):
|
||||
res = None
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
@ -30,10 +31,10 @@ from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class RetrievalParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Retrieval component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.similarity_threshold = 0.2
|
||||
@ -41,6 +42,7 @@ class RetrievalParam(ComponentParamBase):
|
||||
self.top_n = 8
|
||||
self.top_k = 1024
|
||||
self.kb_ids = []
|
||||
self.kb_vars = []
|
||||
self.rerank_id = ""
|
||||
self.empty_response = ""
|
||||
self.tavily_api_key = ""
|
||||
@ -58,7 +60,26 @@ class Retrieval(ComponentBase, ABC):
|
||||
def _run(self, history, **kwargs):
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
query = re.split(r"(USER:|ASSISTANT:)", query)[-1]
|
||||
|
||||
kb_ids: list[str] = self._param.kb_ids or []
|
||||
|
||||
kb_vars = self._fetch_outputs_from(self._param.kb_vars)
|
||||
|
||||
if len(kb_vars) > 0:
|
||||
for kb_var in kb_vars:
|
||||
if len(kb_var) == 1:
|
||||
kb_var_value = str(kb_var["content"][0])
|
||||
|
||||
for v in kb_var_value.split(","):
|
||||
kb_ids.append(v)
|
||||
else:
|
||||
for v in kb_var.to_dict("records"):
|
||||
kb_ids.append(v["content"])
|
||||
|
||||
filtered_kb_ids: list[str] = [kb_id for kb_id in kb_ids if kb_id]
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(filtered_kb_ids)
|
||||
if not kbs:
|
||||
return Retrieval.be_output("")
|
||||
|
||||
@ -75,20 +96,25 @@ class Retrieval(ComponentBase, ABC):
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
if kbs:
|
||||
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(query, kbs))
|
||||
query = re.sub(r"^user[::\s]*", "", query, flags=re.IGNORECASE)
|
||||
kbinfos = settings.retrievaler.retrieval(
|
||||
query,
|
||||
embd_mdl,
|
||||
[kb.tenant_id for kb in kbs],
|
||||
filtered_kb_ids,
|
||||
1,
|
||||
self._param.top_n,
|
||||
self._param.similarity_threshold,
|
||||
1 - self._param.keywords_similarity_weight,
|
||||
aggs=False,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(query, kbs),
|
||||
)
|
||||
else:
|
||||
kbinfos = {"chunks": [], "doc_aggs": []}
|
||||
|
||||
if self._param.use_kg and kbs:
|
||||
ck = settings.kg_retrievaler.retrieval(query,
|
||||
[kbs[0].tenant_id],
|
||||
self._param.kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
|
||||
ck = settings.kg_retrievaler.retrieval(query, [kb.tenant_id for kb in kbs], filtered_kb_ids, embd_mdl, LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
@ -107,5 +133,3 @@ class Retrieval(ComponentBase, ABC):
|
||||
df = pd.DataFrame({"content": kb_prompt(kbinfos, 200000), "chunks": json.dumps(kbinfos["chunks"])})
|
||||
logging.debug("{} {}".format(query, df))
|
||||
return df.dropna()
|
||||
|
||||
|
||||
|
||||
@ -15,8 +15,11 @@
|
||||
#
|
||||
import json
|
||||
import re
|
||||
|
||||
from jinja2 import StrictUndefined
|
||||
from jinja2.sandbox import SandboxedEnvironment
|
||||
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from jinja2 import Template as Jinja2Template
|
||||
|
||||
|
||||
class TemplateParam(ComponentParamBase):
|
||||
@ -75,6 +78,11 @@ class Template(ComponentBase):
|
||||
if p["key"] == key:
|
||||
value = p.get("value", "")
|
||||
self.make_kwargs(para, kwargs, value)
|
||||
|
||||
origin_pattern = "{begin@" + key + "}"
|
||||
new_pattern = "begin_" + key
|
||||
content = content.replace(origin_pattern, new_pattern)
|
||||
kwargs[new_pattern] = kwargs.pop(origin_pattern, "")
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
@ -89,19 +97,27 @@ class Template(ComponentBase):
|
||||
else:
|
||||
hist = ""
|
||||
self.make_kwargs(para, kwargs, hist)
|
||||
|
||||
if ":" in component_id:
|
||||
origin_pattern = "{" + component_id + "}"
|
||||
new_pattern = component_id.replace(":", "_")
|
||||
content = content.replace(origin_pattern, new_pattern)
|
||||
kwargs[new_pattern] = kwargs.pop(component_id, "")
|
||||
continue
|
||||
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
|
||||
result = ""
|
||||
if "content" in out.columns:
|
||||
result = "\n".join(
|
||||
[o if isinstance(o, str) else str(o) for o in out["content"]]
|
||||
)
|
||||
result = "\n".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
|
||||
self.make_kwargs(para, kwargs, result)
|
||||
|
||||
template = Jinja2Template(content)
|
||||
env = SandboxedEnvironment(
|
||||
autoescape=True,
|
||||
undefined=StrictUndefined,
|
||||
)
|
||||
template = env.from_string(content)
|
||||
|
||||
try:
|
||||
content = template.render(kwargs)
|
||||
@ -114,19 +130,16 @@ class Template(ComponentBase):
|
||||
v = json.dumps(v, ensure_ascii=False)
|
||||
except Exception:
|
||||
pass
|
||||
content = re.sub(
|
||||
r"\{%s\}" % re.escape(n), v, content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(#+)", r" \1 ", content
|
||||
)
|
||||
# Process backslashes in strings, Use Lambda function to avoid escape issues
|
||||
if isinstance(v, str):
|
||||
v = v.replace("\\", "\\\\")
|
||||
content = re.sub(r"\{%s\}" % re.escape(n), lambda match: v, content)
|
||||
content = re.sub(r"(#+)", r" \1 ", content)
|
||||
|
||||
return Template.be_output(content)
|
||||
|
||||
def make_kwargs(self, para, kwargs, value):
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["key"], "content": value}
|
||||
)
|
||||
self._param.inputs.append({"component_id": para["key"], "content": value})
|
||||
try:
|
||||
value = json.loads(value)
|
||||
except Exception:
|
||||
|
||||
@ -52,7 +52,10 @@
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "",
|
||||
"query": [],
|
||||
"query": [{
|
||||
"type": "reference",
|
||||
"component_id": "RewriteQuestion:AllNightsSniff"
|
||||
}],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
@ -195,11 +198,15 @@
|
||||
"message_history_window_size": 22,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"query": [],
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 6
|
||||
"top_n": 6,
|
||||
"query": [{
|
||||
"type": "reference",
|
||||
"component_id": "RewriteQuestion:AllNightsSniff"
|
||||
}],
|
||||
"use_kg": false
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
@ -548,7 +555,11 @@
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
"top_p": 0.3,
|
||||
"query": [{
|
||||
"type": "reference",
|
||||
"component_id": "RewriteQuestion:AllNightsSniff"
|
||||
}]
|
||||
},
|
||||
"label": "Categorize",
|
||||
"name": "Question Categorize"
|
||||
@ -625,7 +636,11 @@
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 6
|
||||
"top_n": 6,
|
||||
"query": [{
|
||||
"type": "reference",
|
||||
"component_id": "RewriteQuestion:AllNightsSniff"
|
||||
}]
|
||||
},
|
||||
"label": "Retrieval",
|
||||
"name": "Search product info"
|
||||
@ -932,7 +947,7 @@
|
||||
"y": 962.5655101584402
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": true,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"style": {
|
||||
"height": 163,
|
||||
|
||||
@ -36,17 +36,20 @@ class DeepResearcher:
|
||||
self._kb_retrieve = kb_retrieve
|
||||
self._kg_retrieve = kg_retrieve
|
||||
|
||||
@staticmethod
|
||||
def _remove_query_tags(text):
|
||||
"""Remove query tags from text"""
|
||||
pattern = re.escape(BEGIN_SEARCH_QUERY) + r"(.*?)" + re.escape(END_SEARCH_QUERY)
|
||||
def _remove_tags(text: str, start_tag: str, end_tag: str) -> str:
|
||||
"""General Tag Removal Method"""
|
||||
pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
|
||||
return re.sub(pattern, "", text)
|
||||
|
||||
@staticmethod
|
||||
def _remove_result_tags(text):
|
||||
"""Remove result tags from text"""
|
||||
pattern = re.escape(BEGIN_SEARCH_RESULT) + r"(.*?)" + re.escape(END_SEARCH_RESULT)
|
||||
return re.sub(pattern, "", text)
|
||||
def _remove_query_tags(text: str) -> str:
|
||||
"""Remove Query Tags"""
|
||||
return DeepResearcher._remove_tags(text, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
|
||||
|
||||
@staticmethod
|
||||
def _remove_result_tags(text: str) -> str:
|
||||
"""Remove Result Tags"""
|
||||
return DeepResearcher._remove_tags(text, BEGIN_SEARCH_RESULT, END_SEARCH_RESULT)
|
||||
|
||||
def _generate_reasoning(self, msg_history):
|
||||
"""Generate reasoning steps"""
|
||||
@ -57,7 +60,7 @@ class DeepResearcher:
|
||||
msg_history[-1]["content"] += "\n\nContinues reasoning with the new information.\n"
|
||||
|
||||
for ans in self.chat_mdl.chat_streamly(REASON_PROMPT, msg_history, {"temperature": 0.7}):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
query_think = ans
|
||||
@ -95,21 +98,31 @@ class DeepResearcher:
|
||||
def _retrieve_information(self, search_query):
|
||||
"""Retrieve information from different sources"""
|
||||
# 1. Knowledge base retrieval
|
||||
kbinfos = self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
|
||||
|
||||
kbinfos = []
|
||||
try:
|
||||
kbinfos = self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
|
||||
except Exception as e:
|
||||
logging.error(f"Knowledge base retrieval error: {e}")
|
||||
|
||||
# 2. Web retrieval (if Tavily API is configured)
|
||||
if self.prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(self.prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(search_query)
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
|
||||
try:
|
||||
if self.prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(self.prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(search_query)
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
except Exception as e:
|
||||
logging.error(f"Web retrieval error: {e}")
|
||||
|
||||
# 3. Knowledge graph retrieval (if configured)
|
||||
if self.prompt_config.get("use_kg") and self._kg_retrieve:
|
||||
ck = self._kg_retrieve(question=search_query)
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
try:
|
||||
if self.prompt_config.get("use_kg") and self._kg_retrieve:
|
||||
ck = self._kg_retrieve(question=search_query)
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
except Exception as e:
|
||||
logging.error(f"Knowledge graph retrieval error: {e}")
|
||||
|
||||
return kbinfos
|
||||
|
||||
def _update_chunk_info(self, chunk_info, kbinfos):
|
||||
@ -142,7 +155,7 @@ class DeepResearcher:
|
||||
[{"role": "user",
|
||||
"content": f'Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.'}],
|
||||
{"temperature": 0.7}):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
summary_think = ans
|
||||
|
||||
@ -107,7 +107,7 @@ def search_pages_path(pages_dir):
|
||||
def register_page(page_path):
|
||||
path = f"{page_path}"
|
||||
|
||||
page_name = page_path.stem.rstrip("_app")
|
||||
page_name = page_path.stem.removesuffix("_app")
|
||||
module_name = ".".join(
|
||||
page_path.parts[page_path.parts.index("api"): -1] + (page_name,)
|
||||
)
|
||||
@ -146,10 +146,23 @@ def load_user(web_request):
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
logging.warning("Authentication attempt with empty access token")
|
||||
return None
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
logging.warning(f"Authentication attempt with invalid token format: {len(access_token)} chars")
|
||||
return None
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
logging.warning(f"User {user[0].email} has empty access_token in database")
|
||||
return None
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
@ -18,10 +18,10 @@ import os
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from flask import request, Response
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import FileType, LLMType, ParserType, FileSource
|
||||
from api.db import VALID_FILE_TYPES, VALID_TASK_STATUS, FileType, LLMType, ParserType, FileSource
|
||||
from api.db.db_models import APIToken, Task, File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
@ -345,7 +345,7 @@ def completion():
|
||||
|
||||
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get(conversation_id):
|
||||
def get_conversation(conversation_id):
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
@ -548,6 +548,31 @@ def list_chunks():
|
||||
|
||||
return get_json_result(data=res)
|
||||
|
||||
@manager.route('/get_chunk/<chunk_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get_chunk(chunk_id):
|
||||
from rag.nlp import search
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
tenant_id = objs[0].tenant_id
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del chunk[n]
|
||||
|
||||
return get_json_result(data=chunk)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
@ -577,10 +602,23 @@ def list_kb_docs():
|
||||
orderby = req.get("orderby", "create_time")
|
||||
desc = req.get("desc", True)
|
||||
keywords = req.get("keywords", "")
|
||||
|
||||
status = req.get("status", [])
|
||||
if status:
|
||||
invalid_status = {s for s in status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(
|
||||
message=f"Invalid filter status conditions: {', '.join(invalid_status)}"
|
||||
)
|
||||
types = req.get("types", [])
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(
|
||||
message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}"
|
||||
)
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords, status, types)
|
||||
docs = [{"doc_id": doc['id'], "doc_name": doc['name']} for doc in docs]
|
||||
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
@ -615,7 +653,7 @@ def document_rm():
|
||||
tenant_id = objs[0].tenant_id
|
||||
req = request.json
|
||||
try:
|
||||
doc_ids = [DocumentService.get_doc_id_by_doc_name(doc_name) for doc_name in req.get("doc_names", [])]
|
||||
doc_ids = DocumentService.get_doc_ids_by_doc_names(req.get("doc_names", []))
|
||||
for doc_id in req.get("doc_ids", []):
|
||||
if doc_id not in doc_ids:
|
||||
doc_ids.append(doc_id)
|
||||
@ -633,11 +671,16 @@ def document_rm():
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
|
||||
errors = ""
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
doc_dic = {}
|
||||
for doc in docs:
|
||||
doc_dic[doc.id] = doc
|
||||
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
if doc_id not in doc_dic:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
doc = doc_dic[doc_id]
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
@ -818,10 +861,11 @@ def retrieval():
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
question = req.get("question")
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
size = int(req.get("page_size", 30))
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
highlight = bool(req.get("highlight", False))
|
||||
|
||||
try:
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
@ -831,18 +875,16 @@ def retrieval():
|
||||
data=False, message='Knowledge bases use different embedding models or does not exist."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kbs[0].tenant_id, LLMType.EMBEDDING.value, llm_name=kbs[0].embd_id)
|
||||
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, llm_name=kbs[0].embd_id)
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = TenantLLMService.model_instance(
|
||||
kbs[0].tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, llm_name=req["rerank_id"])
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = TenantLLMService.model_instance(kbs[0].tenant_id, LLMType.CHAT)
|
||||
chat_mdl = LLMBundle(kbs[0].tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight= highlight,
|
||||
rank_feature=label_question(question, kbs))
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
|
||||
76
api/apps/auth/README.md
Normal file
76
api/apps/auth/README.md
Normal file
@ -0,0 +1,76 @@
|
||||
# Auth
|
||||
|
||||
The Auth module provides implementations of OAuth2 and OpenID Connect (OIDC) authentication for integration with third-party identity providers.
|
||||
|
||||
**Features**
|
||||
|
||||
- Supports both OAuth2 and OIDC authentication protocols
|
||||
- Automatic OIDC configuration discovery (via `/.well-known/openid-configuration`)
|
||||
- JWT token validation
|
||||
- Unified user information handling
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
# OAuth2 configuration
|
||||
oauth_config = {
|
||||
"type": "oauth2",
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"authorization_url": "https://your-oauth-provider.com/oauth/authorize",
|
||||
"token_url": "https://your-oauth-provider.com/oauth/token",
|
||||
"userinfo_url": "https://your-oauth-provider.com/oauth/userinfo",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# OIDC configuration
|
||||
oidc_config = {
|
||||
"type": "oidc",
|
||||
"issuer": "https://your-oauth-provider.com/oidc",
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# Github OAuth configuration
|
||||
github_config = {
|
||||
"type": "github"
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# Get client instance
|
||||
client = get_auth_client(oauth_config)
|
||||
```
|
||||
|
||||
### Authentication Flow
|
||||
|
||||
1. Get authorization URL:
|
||||
```python
|
||||
auth_url = client.get_authorization_url()
|
||||
```
|
||||
|
||||
2. After user authorization, exchange authorization code for token:
|
||||
```python
|
||||
token_response = client.exchange_code_for_token(authorization_code)
|
||||
access_token = token_response["access_token"]
|
||||
```
|
||||
|
||||
3. Fetch user information:
|
||||
```python
|
||||
user_info = client.fetch_user_info(access_token)
|
||||
```
|
||||
|
||||
## User Information Structure
|
||||
|
||||
All authentication methods return user information following this structure:
|
||||
|
||||
```python
|
||||
{
|
||||
"email": "user@example.com",
|
||||
"username": "username",
|
||||
"nickname": "User Name",
|
||||
"avatar_url": "https://example.com/avatar.jpg"
|
||||
}
|
||||
```
|
||||
40
api/apps/auth/__init__.py
Normal file
40
api/apps/auth/__init__.py
Normal file
@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from .oauth import OAuthClient
|
||||
from .oidc import OIDCClient
|
||||
from .github import GithubOAuthClient
|
||||
|
||||
|
||||
CLIENT_TYPES = {
|
||||
"oauth2": OAuthClient,
|
||||
"oidc": OIDCClient,
|
||||
"github": GithubOAuthClient
|
||||
}
|
||||
|
||||
|
||||
def get_auth_client(config)->OAuthClient:
|
||||
channel_type = str(config.get("type", "")).lower()
|
||||
if channel_type == "":
|
||||
if config.get("issuer"):
|
||||
channel_type = "oidc"
|
||||
else:
|
||||
channel_type = "oauth2"
|
||||
client_class = CLIENT_TYPES.get(channel_type)
|
||||
if not client_class:
|
||||
raise ValueError(f"Unsupported type: {channel_type}")
|
||||
|
||||
return client_class(config)
|
||||
63
api/apps/auth/github.py
Normal file
63
api/apps/auth/github.py
Normal file
@ -0,0 +1,63 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
from .oauth import OAuthClient, UserInfo
|
||||
|
||||
|
||||
class GithubOAuthClient(OAuthClient):
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the GithubOAuthClient with the provider's configuration.
|
||||
"""
|
||||
config.update({
|
||||
"authorization_url": "https://github.com/login/oauth/authorize",
|
||||
"token_url": "https://github.com/login/oauth/access_token",
|
||||
"userinfo_url": "https://api.github.com/user",
|
||||
"scope": "user:email"
|
||||
})
|
||||
super().__init__(config)
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, **kwargs):
|
||||
"""
|
||||
Fetch github user info.
|
||||
"""
|
||||
user_info = {}
|
||||
try:
|
||||
headers = {"Authorization": f"Bearer {access_token}"}
|
||||
# user info
|
||||
response = requests.get(self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
user_info.update(response.json())
|
||||
# email info
|
||||
response = requests.get(self.userinfo_url+"/emails", headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
email_info = response.json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email["primary"]), None
|
||||
)["email"]
|
||||
return self.normalize_user_info(user_info)
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch github user info: {e}")
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
email = user_info.get("email")
|
||||
username = user_info.get("login", str(email).split("@")[0])
|
||||
nickname = user_info.get("name", username)
|
||||
avatar_url = user_info.get("avatar_url", "")
|
||||
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
|
||||
110
api/apps/auth/oauth.py
Normal file
110
api/apps/auth/oauth.py
Normal file
@ -0,0 +1,110 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
import urllib.parse
|
||||
|
||||
|
||||
class UserInfo:
|
||||
def __init__(self, email, username, nickname, avatar_url):
|
||||
self.email = email
|
||||
self.username = username
|
||||
self.nickname = nickname
|
||||
self.avatar_url = avatar_url
|
||||
|
||||
def to_dict(self):
|
||||
return {key: value for key, value in self.__dict__.items()}
|
||||
|
||||
|
||||
class OAuthClient:
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the OAuthClient with the provider's configuration.
|
||||
"""
|
||||
self.client_id = config["client_id"]
|
||||
self.client_secret = config["client_secret"]
|
||||
self.authorization_url = config["authorization_url"]
|
||||
self.token_url = config["token_url"]
|
||||
self.userinfo_url = config["userinfo_url"]
|
||||
self.redirect_uri = config["redirect_uri"]
|
||||
self.scope = config.get("scope", None)
|
||||
|
||||
self.http_request_timeout = 7
|
||||
|
||||
|
||||
def get_authorization_url(self, state=None):
|
||||
"""
|
||||
Generate the authorization URL for user login.
|
||||
"""
|
||||
params = {
|
||||
"client_id": self.client_id,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"response_type": "code",
|
||||
}
|
||||
if self.scope:
|
||||
params["scope"] = self.scope
|
||||
if state:
|
||||
params["state"] = state
|
||||
authorization_url = f"{self.authorization_url}?{urllib.parse.urlencode(params)}"
|
||||
return authorization_url
|
||||
|
||||
|
||||
def exchange_code_for_token(self, code):
|
||||
"""
|
||||
Exchange authorization code for access token.
|
||||
"""
|
||||
try:
|
||||
payload = {
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
"code": code,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"grant_type": "authorization_code"
|
||||
}
|
||||
response = requests.post(
|
||||
self.token_url,
|
||||
data=payload,
|
||||
headers={"Accept": "application/json"},
|
||||
timeout=self.http_request_timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to exchange authorization code for token: {e}")
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, **kwargs):
|
||||
"""
|
||||
Fetch user information using access token.
|
||||
"""
|
||||
try:
|
||||
headers = {"Authorization": f"Bearer {access_token}"}
|
||||
response = requests.get(self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
user_info = response.json()
|
||||
return self.normalize_user_info(user_info)
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch user info: {e}")
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
email = user_info.get("email")
|
||||
username = user_info.get("username", str(email).split("@")[0])
|
||||
nickname = user_info.get("nickname", username)
|
||||
avatar_url = user_info.get("avatar_url", None)
|
||||
if avatar_url is None:
|
||||
avatar_url = user_info.get("picture", "")
|
||||
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
|
||||
99
api/apps/auth/oidc.py
Normal file
99
api/apps/auth/oidc.py
Normal file
@ -0,0 +1,99 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
from .oauth import OAuthClient
|
||||
|
||||
|
||||
class OIDCClient(OAuthClient):
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the OIDCClient with the provider's configuration.
|
||||
Use `issuer` as the single source of truth for configuration discovery.
|
||||
"""
|
||||
self.issuer = config.get("issuer")
|
||||
if not self.issuer:
|
||||
raise ValueError("Missing issuer in configuration.")
|
||||
|
||||
oidc_metadata = self._load_oidc_metadata(self.issuer)
|
||||
config.update({
|
||||
'issuer': oidc_metadata['issuer'],
|
||||
'jwks_uri': oidc_metadata['jwks_uri'],
|
||||
'authorization_url': oidc_metadata['authorization_endpoint'],
|
||||
'token_url': oidc_metadata['token_endpoint'],
|
||||
'userinfo_url': oidc_metadata['userinfo_endpoint']
|
||||
})
|
||||
|
||||
super().__init__(config)
|
||||
self.issuer = config['issuer']
|
||||
self.jwks_uri = config['jwks_uri']
|
||||
|
||||
|
||||
def _load_oidc_metadata(self, issuer):
|
||||
"""
|
||||
Load OIDC metadata from `/.well-known/openid-configuration`.
|
||||
"""
|
||||
try:
|
||||
metadata_url = f"{issuer}/.well-known/openid-configuration"
|
||||
response = requests.get(metadata_url, timeout=7)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch OIDC metadata: {e}")
|
||||
|
||||
|
||||
def parse_id_token(self, id_token):
|
||||
"""
|
||||
Parse and validate OIDC ID Token (JWT format) with signature verification.
|
||||
"""
|
||||
try:
|
||||
# Decode JWT header without verifying signature
|
||||
headers = jwt.get_unverified_header(id_token)
|
||||
|
||||
# OIDC usually uses `RS256` for signing
|
||||
alg = headers.get("alg", "RS256")
|
||||
|
||||
# Use PyJWT's PyJWKClient to fetch JWKS and find signing key
|
||||
jwks_cli = jwt.PyJWKClient(self.jwks_uri)
|
||||
signing_key = jwks_cli.get_signing_key_from_jwt(id_token).key
|
||||
|
||||
# Decode and verify signature
|
||||
decoded_token = jwt.decode(
|
||||
id_token,
|
||||
key=signing_key,
|
||||
algorithms=[alg],
|
||||
audience=str(self.client_id),
|
||||
issuer=self.issuer,
|
||||
)
|
||||
return decoded_token
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error parsing ID Token: {e}")
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, id_token=None, **kwargs):
|
||||
"""
|
||||
Fetch user info.
|
||||
"""
|
||||
user_info = {}
|
||||
if id_token:
|
||||
user_info = self.parse_id_token(id_token)
|
||||
user_info.update(super().fetch_user_info(access_token).to_dict())
|
||||
return self.normalize_user_info(user_info)
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
return super().normalize_user_info(user_info)
|
||||
@ -26,7 +26,6 @@ from api.utils.api_utils import get_json_result, server_error_response, validate
|
||||
from agent.canvas import Canvas
|
||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||
from api.db.db_models import APIToken
|
||||
import logging
|
||||
import time
|
||||
|
||||
@manager.route('/templates', methods=['GET']) # noqa: F821
|
||||
@ -89,7 +88,6 @@ def save():
|
||||
@login_required
|
||||
def get(canvas_id):
|
||||
e, c = UserCanvasService.get_by_tenant_id(canvas_id)
|
||||
logging.info(f"get canvas_id: {canvas_id} c: {c}")
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
return get_json_result(data=c)
|
||||
@ -115,6 +113,7 @@ def getsse(canvas_id):
|
||||
def run():
|
||||
req = request.json
|
||||
stream = req.get("stream", True)
|
||||
running_hint_text = req.get("running_hint_text", "")
|
||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
@ -140,7 +139,7 @@ def run():
|
||||
def sse():
|
||||
nonlocal answer, cvs
|
||||
try:
|
||||
for ans in canvas.run(stream=True):
|
||||
for ans in canvas.run(running_hint_text = running_hint_text, stream=True):
|
||||
if ans.get("running_status"):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {"answer": ans["content"],
|
||||
@ -178,7 +177,7 @@ def run():
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
for answer in canvas.run(stream=False):
|
||||
for answer in canvas.run(running_hint_text = running_hint_text, stream=False):
|
||||
if answer.get("running_status"):
|
||||
continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
@ -250,7 +249,9 @@ def debug():
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
canvas.get_component(req["component_id"])["obj"]._param.debug_inputs = req["params"]
|
||||
componant = canvas.get_component(req["component_id"])["obj"]
|
||||
componant.reset()
|
||||
componant._param.debug_inputs = req["params"]
|
||||
df = canvas.get_component(req["component_id"])["obj"].debug()
|
||||
return get_json_result(data=df.to_dict(orient="records"))
|
||||
except Exception as e:
|
||||
|
||||
@ -22,7 +22,7 @@ from flask_login import login_required, current_user
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.prompts import keyword_extraction, cross_languages
|
||||
from rag.settings import PAGERANK_FLD
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
@ -37,6 +37,7 @@ import xxhash
|
||||
import re
|
||||
|
||||
|
||||
|
||||
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
@ -194,6 +195,7 @@ def switch():
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "doc_id")
|
||||
def rm():
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
@ -204,6 +206,9 @@ def rm():
|
||||
deleted_chunk_ids = req["chunk_ids"]
|
||||
chunk_number = len(deleted_chunk_ids)
|
||||
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||
for cid in deleted_chunk_ids:
|
||||
if STORAGE_IMPL.obj_exist(doc.kb_id, cid):
|
||||
STORAGE_IMPL.rm(doc.kb_id, cid)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -275,6 +280,7 @@ def retrieval_test():
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
use_kg = req.get("use_kg", False)
|
||||
top = int(req.get("top_k", 1024))
|
||||
langs = req.get("cross_languages", [])
|
||||
tenant_ids = []
|
||||
|
||||
try:
|
||||
@ -294,6 +300,9 @@ def retrieval_test():
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
|
||||
if langs:
|
||||
question = cross_languages(kb.tenant_id, None, question, langs)
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
|
||||
rerank_mdl = None
|
||||
|
||||
@ -41,6 +41,12 @@ def set_conversation():
|
||||
req = request.json
|
||||
conv_id = req.get("conversation_id")
|
||||
is_new = req.get("is_new")
|
||||
name = req.get("name", "New conversation")
|
||||
req["user_id"] = current_user.id
|
||||
|
||||
if len(name) > 255:
|
||||
name = name[0:255]
|
||||
|
||||
del req["is_new"]
|
||||
if not is_new:
|
||||
del req["conversation_id"]
|
||||
@ -59,7 +65,7 @@ def set_conversation():
|
||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found")
|
||||
conv = {"id": conv_id, "dialog_id": req["dialog_id"], "name": req.get("name", "New conversation"), "message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]}
|
||||
conv = {"id": conv_id, "dialog_id": req["dialog_id"], "name": name, "message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}],"user_id": current_user.id}
|
||||
ConversationService.save(**conv)
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
@ -99,6 +105,7 @@ def get():
|
||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(ck, "image_id", "img_id"),
|
||||
"positions": get_value(ck, "positions", "position_int"),
|
||||
"doc_type": get_value(ck, "doc_type", "doc_type_kwd"),
|
||||
}
|
||||
for ck in ref.get("chunks", [])
|
||||
]
|
||||
@ -210,6 +217,7 @@ def completion():
|
||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(ck, "image_id", "img_id"),
|
||||
"positions": get_value(ck, "positions", "position_int"),
|
||||
"doc_type": get_value(ck, "doc_type_kwd", "doc_type_kwd"),
|
||||
}
|
||||
for ck in ref.get("chunks", [])
|
||||
]
|
||||
@ -241,7 +249,7 @@ def completion():
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = structure_answer(conv, ans, message_id, req["conversation_id"])
|
||||
answer = structure_answer(conv, ans, message_id, conv.id)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_json_result(data=answer)
|
||||
|
||||
@ -28,6 +28,7 @@ from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@validate_request("prompt_config")
|
||||
@login_required
|
||||
def set_dialog():
|
||||
req = request.json
|
||||
@ -43,21 +44,10 @@ def set_dialog():
|
||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||
llm_setting = req.get("llm_setting", {})
|
||||
default_prompt = {
|
||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||
以下是知识库:
|
||||
{knowledge}
|
||||
以上是知识库。""",
|
||||
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
||||
"parameters": [
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
||||
}
|
||||
prompt_config = req.get("prompt_config", default_prompt)
|
||||
|
||||
if not prompt_config["system"]:
|
||||
prompt_config["system"] = default_prompt["system"]
|
||||
prompt_config = req["prompt_config"]
|
||||
|
||||
if not req.get("kb_ids", []) and not prompt_config.get("tavily_api_key") and "{knowledge}" in prompt_config['system']:
|
||||
return get_data_error_result(message="Please remove `{knowledge}` in system prompt since no knowledge base/Tavily used here.")
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["optional"]:
|
||||
|
||||
@ -20,79 +20,76 @@ import re
|
||||
|
||||
import flask
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||
from rag.nlp import search
|
||||
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api import settings
|
||||
from api.constants import FILE_NAME_LEN_LIMIT, IMG_BASE64_PREFIX
|
||||
from api.db import VALID_FILE_TYPES, VALID_TASK_STATUS, FileSource, FileType, ParserType, TaskStatus
|
||||
from api.db.db_models import File, Task
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import TaskService
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import (
|
||||
server_error_response,
|
||||
get_data_error_result,
|
||||
get_json_result,
|
||||
server_error_response,
|
||||
validate_request,
|
||||
)
|
||||
from api.utils import get_uuid
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from api.utils.file_utils import filename_type, thumbnail, get_project_base_directory
|
||||
from api.utils.file_utils import filename_type, get_project_base_directory, thumbnail
|
||||
from api.utils.web_utils import html2pdf, is_valid_url
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||
from rag.nlp import search
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||
@manager.route("/upload", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def upload():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if "file" not in request.files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
file_objs = request.files.getlist("file")
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if file_obj.filename == "":
|
||||
return get_json_result(data=False, message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if len(file_obj.filename.encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
return get_json_result(data=False, message=f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
err, files = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
files = [f[0] for f in files] # remove the blob
|
||||
|
||||
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=files, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=files, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
if not files:
|
||||
return get_json_result(data=files, message="There seems to be an issue with your file format. Please verify it is correct and not corrupted.", code=settings.RetCode.DATA_ERROR)
|
||||
files = [f[0] for f in files] # remove the blob
|
||||
|
||||
return get_json_result(data=files)
|
||||
|
||||
|
||||
@manager.route('/web_crawl', methods=['POST']) # noqa: F821
|
||||
@manager.route("/web_crawl", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "url")
|
||||
def web_crawl():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
name = request.form.get("name")
|
||||
url = request.form.get("url")
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(
|
||||
data=False, message='The URL format is invalid', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message="The URL format is invalid", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
@ -108,10 +105,7 @@ def web_crawl():
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
try:
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=name + ".pdf",
|
||||
kb_id=kb.id)
|
||||
filename = duplicate_name(DocumentService.query, name=name + ".pdf", kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
@ -130,7 +124,7 @@ def web_crawl():
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
"thumbnail": thumbnail(filename, blob),
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
@ -147,129 +141,136 @@ def web_crawl():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@manager.route("/create", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name", "kb_id")
|
||||
def create():
|
||||
req = request.json
|
||||
kb_id = req["kb_id"]
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if len(req["name"].encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
return get_json_result(data=False, message=f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if req["name"].strip() == "":
|
||||
return get_json_result(data=False, message="File name can't be empty.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
req["name"] = req["name"].strip()
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
return get_data_error_result(message="Can't find this knowledgebase!")
|
||||
|
||||
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
||||
return get_data_error_result(
|
||||
message="Duplicated document name in the same knowledgebase.")
|
||||
return get_data_error_result(message="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
doc = DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": FileType.VIRTUAL,
|
||||
"name": req["name"],
|
||||
"location": "",
|
||||
"size": 0
|
||||
})
|
||||
doc = DocumentService.insert(
|
||||
{
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": FileType.VIRTUAL,
|
||||
"name": req["name"],
|
||||
"location": "",
|
||||
"size": 0,
|
||||
}
|
||||
)
|
||||
return get_json_result(data=doc.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@manager.route("/list", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def list_docs():
|
||||
kb_id = request.args.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kb_id):
|
||||
if KnowledgebaseService.query(tenant_id=tenant.tenant_id, id=kb_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
return get_json_result(data=False, message="Only owner of knowledgebase authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 15))
|
||||
page_number = int(request.args.get("page", 0))
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
|
||||
req = request.get_json()
|
||||
|
||||
run_status = req.get("run_status", [])
|
||||
if run_status:
|
||||
invalid_status = {s for s in run_status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(message=f"Invalid filter run status conditions: {', '.join(invalid_status)}")
|
||||
|
||||
types = req.get("types", [])
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}")
|
||||
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
docs, tol = DocumentService.get_by_kb_id(kb_id, page_number, items_per_page, orderby, desc, keywords, run_status, types)
|
||||
|
||||
for doc_item in docs:
|
||||
if doc_item['thumbnail'] and not doc_item['thumbnail'].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item['thumbnail'] = f"/v1/document/image/{kb_id}-{doc_item['thumbnail']}"
|
||||
if doc_item["thumbnail"] and not doc_item["thumbnail"].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item["thumbnail"] = f"/v1/document/image/{kb_id}-{doc_item['thumbnail']}"
|
||||
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/infos', methods=['POST']) # noqa: F821
|
||||
@manager.route("/infos", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def docinfos():
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/thumbnails', methods=['GET']) # noqa: F821
|
||||
@manager.route("/thumbnails", methods=["GET"]) # noqa: F821
|
||||
# @login_required
|
||||
def thumbnails():
|
||||
doc_ids = request.args.get("doc_ids").split(",")
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "Document ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "Document ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
docs = DocumentService.get_thumbnails(doc_ids)
|
||||
|
||||
for doc_item in docs:
|
||||
if doc_item['thumbnail'] and not doc_item['thumbnail'].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item['thumbnail'] = f"/v1/document/image/{doc_item['kb_id']}-{doc_item['thumbnail']}"
|
||||
if doc_item["thumbnail"] and not doc_item["thumbnail"].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item["thumbnail"] = f"/v1/document/image/{doc_item['kb_id']}-{doc_item['thumbnail']}"
|
||||
|
||||
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_status', methods=['POST']) # noqa: F821
|
||||
@manager.route("/change_status", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "status")
|
||||
def change_status():
|
||||
req = request.json
|
||||
if str(req["status"]) not in ["0", "1"]:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='"Status" must be either 0 or 1!',
|
||||
code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='"Status" must be either 0 or 1!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
@ -277,23 +278,19 @@ def change_status():
|
||||
return get_data_error_result(message="Document not found!")
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
return get_data_error_result(message="Can't find this knowledgebase!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"status": str(req["status"])}):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document update)!")
|
||||
if not DocumentService.update_by_id(req["doc_id"], {"status": str(req["status"])}):
|
||||
return get_data_error_result(message="Database error (Document update)!")
|
||||
|
||||
status = int(req["status"])
|
||||
settings.docStoreConn.update({"doc_id": req["doc_id"]}, {"available_int": status},
|
||||
search.index_name(kb.tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"doc_id": req["doc_id"]}, {"available_int": status}, search.index_name(kb.tenant_id), doc.kb_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@manager.route("/rm", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def rm():
|
||||
@ -304,16 +301,13 @@ def rm():
|
||||
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
errors = ""
|
||||
kb_table_num_map = {}
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
@ -327,14 +321,25 @@ def rm():
|
||||
|
||||
TaskService.filter_delete([Task.doc_id == doc_id])
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
return get_data_error_result(message="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
deleted_file_count = FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
deleted_file_count = 0
|
||||
if f2d:
|
||||
deleted_file_count = FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
if deleted_file_count > 0:
|
||||
STORAGE_IMPL.rm(b, n)
|
||||
|
||||
doc_parser = doc.parser_id
|
||||
if doc_parser == ParserType.TABLE:
|
||||
kb_id = doc.kb_id
|
||||
if kb_id not in kb_table_num_map:
|
||||
counts = DocumentService.count_by_kb_id(kb_id=kb_id, keywords="", run_status=[TaskStatus.DONE], types=[])
|
||||
kb_table_num_map[kb_id] = counts
|
||||
kb_table_num_map[kb_id] -= 1
|
||||
if kb_table_num_map[kb_id] <= 0:
|
||||
KnowledgebaseService.delete_field_map(kb_id)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
@ -344,19 +349,16 @@ def rm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/run', methods=['POST']) # noqa: F821
|
||||
@manager.route("/run", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_ids", "run")
|
||||
def run():
|
||||
def run():
|
||||
req = request.json
|
||||
for doc_id in req["doc_ids"]:
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
kb_table_num_map = {}
|
||||
for id in req["doc_ids"]:
|
||||
info = {"run": str(req["run"]), "progress": 0}
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value and req.get("delete", False):
|
||||
@ -379,6 +381,17 @@ def run():
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
|
||||
doc_parser = doc.get("parser_id", ParserType.NAIVE)
|
||||
if doc_parser == ParserType.TABLE:
|
||||
kb_id = doc.get("kb_id")
|
||||
if not kb_id:
|
||||
continue
|
||||
if kb_id not in kb_table_num_map:
|
||||
count = DocumentService.count_by_kb_id(kb_id=kb_id, keywords="", run_status=[TaskStatus.DONE], types=[])
|
||||
kb_table_num_map[kb_id] = count
|
||||
if kb_table_num_map[kb_id] <= 0:
|
||||
KnowledgebaseService.delete_field_map(kb_id)
|
||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name, 0)
|
||||
|
||||
@ -387,36 +400,28 @@ def run():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||
@manager.route("/rename", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "name")
|
||||
def rename():
|
||||
req = request.json
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
doc.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message="The extension of file can't be changed",
|
||||
code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(doc.name.lower()).suffix:
|
||||
return get_json_result(data=False, message="The extension of file can't be changed", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if len(req["name"].encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
return get_json_result(data=False, message=f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
message="Duplicated document name in the same knowledgebase.")
|
||||
return get_data_error_result(message="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document rename)!")
|
||||
if not DocumentService.update_by_id(req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(message="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||
if informs:
|
||||
@ -428,7 +433,7 @@ def rename():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<doc_id>', methods=['GET']) # noqa: F821
|
||||
@manager.route("/get/<doc_id>", methods=["GET"]) # noqa: F821
|
||||
# @login_required
|
||||
def get(doc_id):
|
||||
try:
|
||||
@ -442,29 +447,22 @@ def get(doc_id):
|
||||
ext = re.search(r"\.([^.]+)$", doc.name)
|
||||
if ext:
|
||||
if doc.type == FileType.VISUAL.value:
|
||||
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
||||
response.headers.set("Content-Type", "image/%s" % ext.group(1))
|
||||
else:
|
||||
response.headers.set(
|
||||
'Content-Type',
|
||||
'application/%s' %
|
||||
ext.group(1))
|
||||
response.headers.set("Content-Type", "application/%s" % ext.group(1))
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_parser', methods=['POST']) # noqa: F821
|
||||
@manager.route("/change_parser", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "parser_id")
|
||||
def change_parser():
|
||||
req = request.json
|
||||
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
@ -476,21 +474,16 @@ def change_parser():
|
||||
else:
|
||||
return get_json_result(data=True)
|
||||
|
||||
if ((doc.type == FileType.VISUAL and req["parser_id"] != "picture")
|
||||
or (re.search(
|
||||
r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation")):
|
||||
if (doc.type == FileType.VISUAL and req["parser_id"] != "picture") or (re.search(r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation"):
|
||||
return get_data_error_result(message="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(doc.id,
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value})
|
||||
e = DocumentService.update_by_id(doc.id, {"parser_id": req["parser_id"], "progress": 0, "progress_msg": "", "run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||
doc.process_duation * -1)
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, doc.process_duation * -1)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
@ -504,7 +497,7 @@ def change_parser():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/image/<image_id>', methods=['GET']) # noqa: F821
|
||||
@manager.route("/image/<image_id>", methods=["GET"]) # noqa: F821
|
||||
# @login_required
|
||||
def get_image(image_id):
|
||||
try:
|
||||
@ -513,53 +506,46 @@ def get_image(image_id):
|
||||
return get_data_error_result(message="Image not found.")
|
||||
bkt, nm = image_id.split("-")
|
||||
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
||||
response.headers.set('Content-Type', 'image/JPEG')
|
||||
response.headers.set("Content-Type", "image/JPEG")
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/upload_and_parse', methods=['POST']) # noqa: F821
|
||||
@manager.route("/upload_and_parse", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id")
|
||||
def upload_and_parse():
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if "file" not in request.files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
file_objs = request.files.getlist("file")
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if file_obj.filename == "":
|
||||
return get_json_result(data=False, message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/parse', methods=['POST']) # noqa: F821
|
||||
@manager.route("/parse", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def parse():
|
||||
url = request.json.get("url") if request.json else ""
|
||||
if url:
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(
|
||||
data=False, message='The URL format is invalid', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message="The URL format is invalid", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
download_path = os.path.join(get_project_base_directory(), "logs/downloads")
|
||||
os.makedirs(download_path, exist_ok=True)
|
||||
from seleniumwire.webdriver import Chrome, ChromeOptions
|
||||
|
||||
options = ChromeOptions()
|
||||
options.add_argument('--headless')
|
||||
options.add_argument('--disable-gpu')
|
||||
options.add_argument('--no-sandbox')
|
||||
options.add_argument('--disable-dev-shm-usage')
|
||||
options.add_experimental_option('prefs', {
|
||||
'download.default_directory': download_path,
|
||||
'download.prompt_for_download': False,
|
||||
'download.directory_upgrade': True,
|
||||
'safebrowsing.enabled': True
|
||||
})
|
||||
options.add_argument("--headless")
|
||||
options.add_argument("--disable-gpu")
|
||||
options.add_argument("--no-sandbox")
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_experimental_option("prefs", {"download.default_directory": download_path, "download.prompt_for_download": False, "download.directory_upgrade": True, "safebrowsing.enabled": True})
|
||||
driver = Chrome(options=options)
|
||||
driver.get(url)
|
||||
res_headers = [r.response.headers for r in driver.requests if r and r.response]
|
||||
@ -582,51 +568,41 @@ def parse():
|
||||
|
||||
r = re.search(r"filename=\"([^\"]+)\"", str(res_headers))
|
||||
if not r or not r.group(1):
|
||||
return get_json_result(
|
||||
data=False, message="Can't not identify downloaded file", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message="Can't not identify downloaded file", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
f = File(r.group(1), os.path.join(download_path, r.group(1)))
|
||||
txt = FileService.parse_docs([f], current_user.id)
|
||||
return get_json_result(data=txt)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if "file" not in request.files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
file_objs = request.files.getlist("file")
|
||||
txt = FileService.parse_docs(file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=txt)
|
||||
|
||||
|
||||
@manager.route('/set_meta', methods=['POST']) # noqa: F821
|
||||
@manager.route("/set_meta", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "meta")
|
||||
def set_meta():
|
||||
req = request.json
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
meta = json.loads(req["meta"])
|
||||
except Exception as e:
|
||||
return get_json_result(
|
||||
data=False, message=f'Json syntax error: {e}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message=f"Json syntax error: {e}", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if not isinstance(meta, dict):
|
||||
return get_json_result(
|
||||
data=False, message='Meta data should be in Json map format, like {"key": "value"}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Meta data should be in Json map format, like {"key": "value"}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"meta_fields": meta}):
|
||||
return get_data_error_result(
|
||||
message="Database error (meta updates)!")
|
||||
if not DocumentService.update_by_id(req["doc_id"], {"meta_fields": meta}):
|
||||
return get_data_error_result(message="Database error (meta updates)!")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
|
||||
@ -257,6 +257,7 @@ def rm():
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||
else:
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
if not FileService.delete(file):
|
||||
return get_data_error_result(
|
||||
message="Database error (File removal)!")
|
||||
|
||||
@ -34,6 +34,7 @@ from api import settings
|
||||
from rag.nlp import search
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
from rag.settings import PAGERANK_FLD
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
@manager.route('/create', methods=['post']) # noqa: F821
|
||||
@ -44,11 +45,11 @@ def create():
|
||||
dataset_name = req["name"]
|
||||
if not isinstance(dataset_name, str):
|
||||
return get_data_error_result(message="Dataset name must be string.")
|
||||
if dataset_name == "":
|
||||
if dataset_name.strip() == "":
|
||||
return get_data_error_result(message="Dataset name can't be empty.")
|
||||
if len(dataset_name) >= DATASET_NAME_LIMIT:
|
||||
if len(dataset_name.encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(
|
||||
message=f"Dataset name length is {len(dataset_name)} which is large than {DATASET_NAME_LIMIT}")
|
||||
message=f"Dataset name length is {len(dataset_name)} which is larger than {DATASET_NAME_LIMIT}")
|
||||
|
||||
dataset_name = dataset_name.strip()
|
||||
dataset_name = duplicate_name(
|
||||
@ -58,6 +59,7 @@ def create():
|
||||
status=StatusEnum.VALID.value)
|
||||
try:
|
||||
req["id"] = get_uuid()
|
||||
req["name"] = dataset_name
|
||||
req["tenant_id"] = current_user.id
|
||||
req["created_by"] = current_user.id
|
||||
e, t = TenantService.get_by_id(current_user.id)
|
||||
@ -77,7 +79,15 @@ def create():
|
||||
@not_allowed_parameters("id", "tenant_id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||
def update():
|
||||
req = request.json
|
||||
if not isinstance(req["name"], str):
|
||||
return get_data_error_result(message="Dataset name must be string.")
|
||||
if req["name"].strip() == "":
|
||||
return get_data_error_result(message="Dataset name can't be empty.")
|
||||
if len(req["name"].encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(
|
||||
message=f"Dataset name length is {len(req['name'])} which is large than {DATASET_NAME_LIMIT}")
|
||||
req["name"] = req["name"].strip()
|
||||
|
||||
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
@ -99,13 +109,13 @@ def update():
|
||||
if req.get("parser_id", "") == "tag" and os.environ.get('DOC_ENGINE', "elasticsearch") == "infinity":
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='The chunk method Tag has not been supported by Infinity yet.',
|
||||
message='The chunking method Tag has not been supported by Infinity yet.',
|
||||
code=settings.RetCode.OPERATING_ERROR
|
||||
)
|
||||
|
||||
if req["name"].lower() != kb.name.lower() \
|
||||
and len(
|
||||
KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||
KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) >= 1:
|
||||
return get_data_error_result(
|
||||
message="Duplicated knowledgebase name.")
|
||||
|
||||
@ -114,6 +124,9 @@ def update():
|
||||
return get_data_error_result()
|
||||
|
||||
if kb.pagerank != req.get("pagerank", 0):
|
||||
if os.environ.get("DOC_ENGINE", "elasticsearch") != "elasticsearch":
|
||||
return get_data_error_result(message="'pagerank' can only be set when doc_engine is elasticsearch")
|
||||
|
||||
if req.get("pagerank", 0) > 0:
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: req["pagerank"]},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
@ -152,6 +165,7 @@ def detail():
|
||||
if not kb:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
kb["size"] = DocumentService.get_total_size_by_kb_id(kb_id=kb["id"],keywords="", run_status=[], types=[])
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -165,7 +179,10 @@ def list_kbs():
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
parser_id = request.args.get("parser_id")
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
|
||||
req = request.get_json()
|
||||
owner_ids = req.get("owner_ids", [])
|
||||
@ -182,9 +199,9 @@ def list_kbs():
|
||||
tenants, current_user.id, 0,
|
||||
0, orderby, desc, keywords, parser_id)
|
||||
kbs = [kb for kb in kbs if kb["tenant_id"] in tenants]
|
||||
total = len(kbs)
|
||||
if page_number and items_per_page:
|
||||
kbs = kbs[(page_number-1)*items_per_page:page_number*items_per_page]
|
||||
total = len(kbs)
|
||||
return get_json_result(data={"kbs": kbs, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -224,6 +241,8 @@ def rm():
|
||||
for kb in kbs:
|
||||
settings.docStoreConn.delete({"kb_id": kb.id}, search.index_name(kb.tenant_id), kb.id)
|
||||
settings.docStoreConn.deleteIdx(search.index_name(kb.tenant_id), kb.id)
|
||||
if hasattr(STORAGE_IMPL, 'remove_bucket'):
|
||||
STORAGE_IMPL.remove_bucket(kb.id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
12
api/apps/plugin_app.py
Normal file
12
api/apps/plugin_app.py
Normal file
@ -0,0 +1,12 @@
|
||||
from flask import Response
|
||||
from flask_login import login_required
|
||||
from api.utils.api_utils import get_json_result
|
||||
from plugin import GlobalPluginManager
|
||||
|
||||
@manager.route('/llm_tools', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def llm_tools() -> Response:
|
||||
tools = GlobalPluginManager.get_llm_tools()
|
||||
tools_metadata = [t.get_metadata() for t in tools]
|
||||
|
||||
return get_json_result(data=tools_metadata)
|
||||
@ -14,8 +14,14 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import json
|
||||
import time
|
||||
from typing import Any, cast
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.utils.api_utils import get_error_data_result, token_required
|
||||
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_error_result, get_error_data_result, get_json_result, token_required
|
||||
from api.utils.api_utils import get_result
|
||||
from flask import request
|
||||
|
||||
@ -37,3 +43,86 @@ def list_agents(tenant_id):
|
||||
desc = True
|
||||
canvas = UserCanvasService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,title)
|
||||
return get_result(data=canvas)
|
||||
|
||||
|
||||
@manager.route("/agents", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create_agent(tenant_id: str):
|
||||
req: dict[str, Any] = cast(dict[str, Any], request.json)
|
||||
req["user_id"] = tenant_id
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
else:
|
||||
return get_json_result(data=False, message="No DSL data in request.", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if req.get("title") is not None:
|
||||
req["title"] = req["title"].strip()
|
||||
else:
|
||||
return get_json_result(data=False, message="No title in request.", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if UserCanvasService.query(user_id=tenant_id, title=req["title"]):
|
||||
return get_data_error_result(message=f"Agent with title {req['title']} already exists.")
|
||||
|
||||
agent_id = get_uuid()
|
||||
req["id"] = agent_id
|
||||
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(message="Fail to create agent.")
|
||||
|
||||
UserCanvasVersionService.insert(
|
||||
user_canvas_id=agent_id,
|
||||
title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")),
|
||||
dsl=req["dsl"]
|
||||
)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/agents/<agent_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update_agent(tenant_id: str, agent_id: str):
|
||||
req: dict[str, Any] = {k: v for k, v in cast(dict[str, Any], request.json).items() if v is not None}
|
||||
req["user_id"] = tenant_id
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
|
||||
if req.get("title") is not None:
|
||||
req["title"] = req["title"].strip()
|
||||
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_json_result(
|
||||
data=False, message="Only owner of canvas authorized for this operation.",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
UserCanvasService.update_by_id(agent_id, req)
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
UserCanvasVersionService.insert(
|
||||
user_canvas_id=agent_id,
|
||||
title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")),
|
||||
dsl=req["dsl"]
|
||||
)
|
||||
|
||||
UserCanvasVersionService.delete_all_versions(agent_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/agents/<agent_id>", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete_agent(tenant_id: str, agent_id: str):
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_json_result(
|
||||
data=False, message="Only owner of canvas authorized for this operation.",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
UserCanvasService.delete_by_id(agent_id)
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
import logging
|
||||
|
||||
from flask import request
|
||||
|
||||
from api import settings
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.dialog_service import DialogService
|
||||
@ -23,15 +24,14 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result, token_required, get_result, check_duplicate_ids
|
||||
from api.utils.api_utils import check_duplicate_ids, get_error_data_result, get_result, token_required
|
||||
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['POST']) # noqa: F821
|
||||
@manager.route("/chats", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
req = request.json
|
||||
ids = [i for i in req.get("dataset_ids", []) if i]
|
||||
ids = [i for i in req.get("dataset_ids", []) if i]
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
@ -40,34 +40,30 @@ def create(tenant_id):
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(ids) if ids else []
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) > 1:
|
||||
return get_result(message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
# llm
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
if req.get("llm_id") is not None:
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(req["llm_id"])
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory, model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id","top_k"]
|
||||
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
@ -85,9 +81,7 @@ def create(tenant_id):
|
||||
req["rerank_id"] = req.get("rerank_id", "")
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
|
||||
llm_name=req.get("rerank_id"),
|
||||
model_type="rerank"):
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if not req.get("llm_id"):
|
||||
req["llm_id"] = tenant.llm_id
|
||||
@ -106,27 +100,24 @@ def create(tenant_id):
|
||||
{knowledge}
|
||||
The above is the knowledge base.""",
|
||||
"prologue": "Hi! I'm your assistant, what can I do for you?",
|
||||
"parameters": [
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"parameters": [{"key": "knowledge", "optional": False}],
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
|
||||
"quote": True,
|
||||
"tts": False,
|
||||
"refine_multiturn": True
|
||||
"refine_multiturn": True,
|
||||
}
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response", "quote", "tts", "refine_multiturn"]
|
||||
if "prompt_config" not in req:
|
||||
req['prompt_config'] = {}
|
||||
req["prompt_config"] = {}
|
||||
for key in key_list_2:
|
||||
temp = req['prompt_config'].get(key)
|
||||
if (not temp and key == 'system') or (key not in req["prompt_config"]):
|
||||
req['prompt_config'][key] = default_prompt[key]
|
||||
for p in req['prompt_config']["parameters"]:
|
||||
temp = req["prompt_config"].get(key)
|
||||
if (not temp and key == "system") or (key not in req["prompt_config"]):
|
||||
req["prompt_config"][key] = default_prompt[key]
|
||||
for p in req["prompt_config"]["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_error_data_result(
|
||||
message="Parameter '{}' is not used".format(p["key"]))
|
||||
if req["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
|
||||
# save
|
||||
if not DialogService.save(**req):
|
||||
return get_error_data_result(message="Fail to new a chat!")
|
||||
@ -141,10 +132,7 @@ def create(tenant_id):
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
@ -156,55 +144,47 @@ def create(tenant_id):
|
||||
return get_result(data=res)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>', methods=['PUT']) # noqa: F821
|
||||
@manager.route("/chats/<chat_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id, chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message='You do not own the chat')
|
||||
return get_error_data_result(message="You do not own the chat")
|
||||
req = request.json
|
||||
ids = req.get("dataset_ids")
|
||||
if "show_quotation" in req:
|
||||
req["do_refer"] = req.pop("show_quotation")
|
||||
if "dataset_ids" in req:
|
||||
if not ids:
|
||||
return get_error_data_result("`dataset_ids` can't be empty")
|
||||
if ids:
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
if ids is not None:
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
if req.get("llm_id") is not None:
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(req["llm_id"])
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory, model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id","top_k"]
|
||||
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
@ -217,16 +197,12 @@ def update(tenant_id, chat_id):
|
||||
res = res.to_json()
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
|
||||
llm_name=req.get("rerank_id"),
|
||||
model_type="rerank"):
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if "name" in req:
|
||||
if not req.get("name"):
|
||||
return get_error_data_result(message="`name` cannot be empty.")
|
||||
if req["name"].lower() != res["name"].lower() \
|
||||
and len(
|
||||
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||
if req["name"].lower() != res["name"].lower() and len(DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||
return get_error_data_result(message="Duplicated chat name in updating chat.")
|
||||
if "prompt_config" in req:
|
||||
res["prompt_config"].update(req["prompt_config"])
|
||||
@ -249,7 +225,7 @@ def update(tenant_id, chat_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['DELETE']) # noqa: F821
|
||||
@manager.route("/chats", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
errors = []
|
||||
@ -276,30 +252,23 @@ def delete(tenant_id):
|
||||
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||
DialogService.update_by_id(id, temp_dict)
|
||||
success_count += 1
|
||||
|
||||
|
||||
if errors:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
data={"success_count": success_count, "errors": errors},
|
||||
message=f"Partially deleted {success_count} chats with {len(errors)} errors"
|
||||
)
|
||||
return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} chats with {len(errors)} errors")
|
||||
else:
|
||||
return get_error_data_result(message="; ".join(errors))
|
||||
|
||||
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
message=f"Partially deleted {success_count} chats with {len(duplicate_messages)} errors",
|
||||
data={"success_count": success_count, "errors": duplicate_messages}
|
||||
)
|
||||
return get_result(message=f"Partially deleted {success_count} chats with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['GET']) # noqa: F821
|
||||
@manager.route("/chats", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_chat(tenant_id):
|
||||
id = request.args.get("id")
|
||||
@ -319,13 +288,15 @@ def list_chat(tenant_id):
|
||||
if not chats:
|
||||
return get_result(data=[])
|
||||
list_assts = []
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight",
|
||||
"do_refer": "show_quotation"}
|
||||
key_mapping = {
|
||||
"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight",
|
||||
"do_refer": "show_quotation",
|
||||
}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
for res in chats:
|
||||
renamed_dict = {}
|
||||
@ -334,10 +305,7 @@ def list_chat(tenant_id):
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
|
||||
@ -14,24 +14,44 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from flask import request
|
||||
from api.db import StatusEnum, FileSource
|
||||
from peewee import OperationalError
|
||||
|
||||
from api import settings
|
||||
from api.db import FileSource, StatusEnum
|
||||
from api.db.db_models import File
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api import settings
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import (
|
||||
get_result,
|
||||
token_required,
|
||||
deep_merge,
|
||||
get_error_argument_result,
|
||||
get_error_data_result,
|
||||
valid,
|
||||
get_parser_config, valid_parser_config, dataset_readonly_fields,check_duplicate_ids
|
||||
get_error_operating_result,
|
||||
get_error_permission_result,
|
||||
get_parser_config,
|
||||
get_result,
|
||||
remap_dictionary_keys,
|
||||
token_required,
|
||||
verify_embedding_availability,
|
||||
)
|
||||
from api.utils.validation_utils import (
|
||||
CreateDatasetReq,
|
||||
DeleteDatasetReq,
|
||||
ListDatasetReq,
|
||||
UpdateDatasetReq,
|
||||
validate_and_parse_json_request,
|
||||
validate_and_parse_request_args,
|
||||
)
|
||||
from rag.nlp import search
|
||||
from rag.settings import PAGERANK_FLD
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["POST"]) # noqa: F821
|
||||
@ -62,14 +82,23 @@ def create(tenant_id):
|
||||
name:
|
||||
type: string
|
||||
description: Name of the dataset.
|
||||
avatar:
|
||||
type: string
|
||||
description: Base64 encoding of the avatar.
|
||||
description:
|
||||
type: string
|
||||
description: Description of the dataset.
|
||||
embedding_model:
|
||||
type: string
|
||||
description: Embedding model Name.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Dataset permission.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "email", "tag"
|
||||
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||
"picture", "presentation", "qa", "table", "tag"
|
||||
]
|
||||
description: Chunking method.
|
||||
parser_config:
|
||||
@ -84,106 +113,47 @@ def create(tenant_id):
|
||||
data:
|
||||
type: object
|
||||
"""
|
||||
req = request.json
|
||||
for k in req.keys():
|
||||
if dataset_readonly_fields(k):
|
||||
return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"'{k}' is readonly.")
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
permission = req.get("permission")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_parser_config(parser_config)
|
||||
valid_permission = ["me", "team"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
"qa",
|
||||
"table",
|
||||
"paper",
|
||||
"book",
|
||||
"laws",
|
||||
"presentation",
|
||||
"picture",
|
||||
"one",
|
||||
"email",
|
||||
"tag"
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
if check_validation:
|
||||
return check_validation
|
||||
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||
if "tenant_id" in req:
|
||||
return get_error_data_result(message="`tenant_id` must not be provided")
|
||||
if "chunk_count" in req or "document_count" in req:
|
||||
return get_error_data_result(
|
||||
message="`chunk_count` or `document_count` must not be provided"
|
||||
)
|
||||
if "name" not in req:
|
||||
return get_error_data_result(message="`name` is not empty!")
|
||||
req["id"] = get_uuid()
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"] == "":
|
||||
return get_error_data_result(message="`name` is not empty string!")
|
||||
if len(req["name"]) >= 128:
|
||||
return get_error_data_result(
|
||||
message="Dataset name should not be longer than 128 characters."
|
||||
)
|
||||
if KnowledgebaseService.query(
|
||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||
):
|
||||
return get_error_data_result(
|
||||
message="Duplicated dataset name in creating dataset."
|
||||
)
|
||||
req["tenant_id"] = tenant_id
|
||||
req["created_by"] = tenant_id
|
||||
if not req.get("embedding_model"):
|
||||
req["embedding_model"] = t.embd_id
|
||||
else:
|
||||
valid_embedding_models = [
|
||||
"BAAI/bge-large-zh-v1.5",
|
||||
"maidalun1020/bce-embedding-base_v1",
|
||||
]
|
||||
embd_model = LLMService.query(
|
||||
llm_name=req["embedding_model"], model_type="embedding"
|
||||
)
|
||||
if embd_model:
|
||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
if not embd_model:
|
||||
embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
|
||||
if not embd_model:
|
||||
return get_error_data_result(
|
||||
f"`embedding_model` {req.get('embedding_model')} doesn't exist"
|
||||
)
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
mapped_keys = {
|
||||
new_key: req[old_key]
|
||||
for new_key, old_key in key_mapping.items()
|
||||
if old_key in req
|
||||
}
|
||||
req.update(mapped_keys)
|
||||
flds = list(req.keys())
|
||||
for f in flds:
|
||||
if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
|
||||
del req[f]
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||
renamed_data = {}
|
||||
e, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
for key, value in k.to_dict().items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
return get_result(data=renamed_data)
|
||||
# Field name transformations during model dump:
|
||||
# | Original | Dump Output |
|
||||
# |----------------|-------------|
|
||||
# | embedding_model| embd_id |
|
||||
# | chunk_method | parser_id |
|
||||
req, err = validate_and_parse_json_request(request, CreateDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
try:
|
||||
if KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_operating_result(message=f"Dataset name '{req['name']}' already exists")
|
||||
|
||||
req["parser_config"] = get_parser_config(req["parser_id"], req["parser_config"])
|
||||
req["id"] = get_uuid()
|
||||
req["tenant_id"] = tenant_id
|
||||
req["created_by"] = tenant_id
|
||||
|
||||
ok, t = TenantService.get_by_id(tenant_id)
|
||||
if not ok:
|
||||
return get_error_permission_result(message="Tenant not found")
|
||||
|
||||
if not req.get("embd_id"):
|
||||
req["embd_id"] = t.embd_id
|
||||
else:
|
||||
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||
if not ok:
|
||||
return err
|
||||
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||
|
||||
ok, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
|
||||
response_data = remap_dictionary_keys(k.to_dict())
|
||||
return get_result(data=response_data)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
|
||||
@ -208,75 +178,81 @@ def delete(tenant_id):
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- ids
|
||||
properties:
|
||||
ids:
|
||||
type: array
|
||||
type: array or null
|
||||
items:
|
||||
type: string
|
||||
description: List of dataset IDs to delete.
|
||||
description: |
|
||||
Specifies the datasets to delete:
|
||||
- If `null`, all datasets will be deleted.
|
||||
- If an array of IDs, only the specified datasets will be deleted.
|
||||
- If an empty array, no datasets will be deleted.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
errors = []
|
||||
success_count = 0
|
||||
req = request.json
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids = req.get("ids")
|
||||
if not ids:
|
||||
id_list = []
|
||||
kbs = KnowledgebaseService.query(tenant_id=tenant_id)
|
||||
for kb in kbs:
|
||||
id_list.append(kb.id)
|
||||
else:
|
||||
id_list = ids
|
||||
unique_id_list, duplicate_messages = check_duplicate_ids(id_list, "dataset")
|
||||
id_list = unique_id_list
|
||||
req, err = validate_and_parse_json_request(request, DeleteDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
for id in id_list:
|
||||
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
||||
if not kbs:
|
||||
errors.append(f"You don't own the dataset {id}")
|
||||
continue
|
||||
for doc in DocumentService.query(kb_id=id):
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
errors.append(f"Remove document error for dataset {id}")
|
||||
try:
|
||||
kb_id_instance_pairs = []
|
||||
if req["ids"] is None:
|
||||
kbs = KnowledgebaseService.query(tenant_id=tenant_id)
|
||||
for kb in kbs:
|
||||
kb_id_instance_pairs.append((kb.id, kb))
|
||||
|
||||
else:
|
||||
error_kb_ids = []
|
||||
for kb_id in req["ids"]:
|
||||
kb = KnowledgebaseService.get_or_none(id=kb_id, tenant_id=tenant_id)
|
||||
if kb is None:
|
||||
error_kb_ids.append(kb_id)
|
||||
continue
|
||||
kb_id_instance_pairs.append((kb_id, kb))
|
||||
if len(error_kb_ids) > 0:
|
||||
return get_error_permission_result(message=f"""User '{tenant_id}' lacks permission for datasets: '{", ".join(error_kb_ids)}'""")
|
||||
|
||||
errors = []
|
||||
success_count = 0
|
||||
for kb_id, kb in kb_id_instance_pairs:
|
||||
for doc in DocumentService.query(kb_id=kb_id):
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
errors.append(f"Remove document '{doc.id}' error for dataset '{kb_id}'")
|
||||
continue
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[
|
||||
File.source_type == FileSource.KNOWLEDGEBASE,
|
||||
File.id == f2d[0].file_id,
|
||||
]
|
||||
)
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kb.name])
|
||||
if not KnowledgebaseService.delete_by_id(kb_id):
|
||||
errors.append(f"Delete dataset error for {kb_id}")
|
||||
continue
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[
|
||||
File.source_type == FileSource.KNOWLEDGEBASE,
|
||||
File.id == f2d[0].file_id,
|
||||
]
|
||||
)
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
if not KnowledgebaseService.delete_by_id(id):
|
||||
errors.append(f"Delete dataset error for {id}")
|
||||
continue
|
||||
success_count += 1
|
||||
if errors:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
data={"success_count": success_count, "errors": errors},
|
||||
message=f"Partially deleted {success_count} datasets with {len(errors)} errors"
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message="; ".join(errors))
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially deleted {success_count} datasets with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
success_count += 1
|
||||
|
||||
if not errors:
|
||||
return get_result()
|
||||
|
||||
error_message = f"Successfully deleted {success_count} datasets, {len(errors)} failed. Details: {'; '.join(errors)[:128]}..."
|
||||
if success_count == 0:
|
||||
return get_error_data_result(message=error_message)
|
||||
|
||||
return get_result(data={"success_count": success_count, "errors": errors[:5]}, message=error_message)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -307,16 +283,28 @@ def update(tenant_id, dataset_id):
|
||||
name:
|
||||
type: string
|
||||
description: New name of the dataset.
|
||||
avatar:
|
||||
type: string
|
||||
description: Updated base64 encoding of the avatar.
|
||||
description:
|
||||
type: string
|
||||
description: Updated description of the dataset.
|
||||
embedding_model:
|
||||
type: string
|
||||
description: Updated embedding model Name.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Updated permission.
|
||||
description: Updated dataset permission.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "email", "tag"
|
||||
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||
"picture", "presentation", "qa", "table", "tag"
|
||||
]
|
||||
description: Updated chunking method.
|
||||
pagerank:
|
||||
type: integer
|
||||
description: Updated page rank.
|
||||
parser_config:
|
||||
type: object
|
||||
description: Updated parser configuration.
|
||||
@ -326,128 +314,67 @@ def update(tenant_id, dataset_id):
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||
return get_error_data_result(message="You don't own the dataset")
|
||||
req = request.json
|
||||
for k in req.keys():
|
||||
if dataset_readonly_fields(k):
|
||||
return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"'{k}' is readonly.")
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id", "create_date", "create_time", "created_by", "status","token_num","update_date","update_time"}
|
||||
if any(key in req for key in invalid_keys):
|
||||
return get_error_data_result(message="The input parameters are invalid.")
|
||||
permission = req.get("permission")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_parser_config(parser_config)
|
||||
valid_permission = ["me", "team"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
"qa",
|
||||
"table",
|
||||
"paper",
|
||||
"book",
|
||||
"laws",
|
||||
"presentation",
|
||||
"picture",
|
||||
"one",
|
||||
"email",
|
||||
"tag"
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
if check_validation:
|
||||
return check_validation
|
||||
if "tenant_id" in req:
|
||||
if req["tenant_id"] != tenant_id:
|
||||
return get_error_data_result(message="Can't change `tenant_id`.")
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if "parser_config" in req:
|
||||
temp_dict = kb.parser_config
|
||||
temp_dict.update(req["parser_config"])
|
||||
req["parser_config"] = temp_dict
|
||||
if "chunk_count" in req:
|
||||
if req["chunk_count"] != kb.chunk_num:
|
||||
return get_error_data_result(message="Can't change `chunk_count`.")
|
||||
req.pop("chunk_count")
|
||||
if "document_count" in req:
|
||||
if req["document_count"] != kb.doc_num:
|
||||
return get_error_data_result(message="Can't change `document_count`.")
|
||||
req.pop("document_count")
|
||||
if req.get("chunk_method"):
|
||||
if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
|
||||
return get_error_data_result(
|
||||
message="If `chunk_count` is not 0, `chunk_method` is not changeable."
|
||||
)
|
||||
req["parser_id"] = req.pop("chunk_method")
|
||||
if req["parser_id"] != kb.parser_id:
|
||||
if not req.get("parser_config"):
|
||||
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||
if "embedding_model" in req:
|
||||
if kb.chunk_num != 0 and req["embedding_model"] != kb.embd_id:
|
||||
return get_error_data_result(
|
||||
message="If `chunk_count` is not 0, `embedding_model` is not changeable."
|
||||
)
|
||||
if not req.get("embedding_model"):
|
||||
return get_error_data_result("`embedding_model` can't be empty")
|
||||
valid_embedding_models = [
|
||||
"BAAI/bge-large-zh-v1.5",
|
||||
"BAAI/bge-base-en-v1.5",
|
||||
"BAAI/bge-large-en-v1.5",
|
||||
"BAAI/bge-small-en-v1.5",
|
||||
"BAAI/bge-small-zh-v1.5",
|
||||
"jinaai/jina-embeddings-v2-base-en",
|
||||
"jinaai/jina-embeddings-v2-small-en",
|
||||
"nomic-ai/nomic-embed-text-v1.5",
|
||||
"sentence-transformers/all-MiniLM-L6-v2",
|
||||
"text-embedding-v2",
|
||||
"text-embedding-v3",
|
||||
"maidalun1020/bce-embedding-base_v1",
|
||||
]
|
||||
embd_model = LLMService.query(
|
||||
llm_name=req["embedding_model"], model_type="embedding"
|
||||
)
|
||||
if embd_model:
|
||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
if not embd_model:
|
||||
embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
|
||||
# Field name transformations during model dump:
|
||||
# | Original | Dump Output |
|
||||
# |----------------|-------------|
|
||||
# | embedding_model| embd_id |
|
||||
# | chunk_method | parser_id |
|
||||
extras = {"dataset_id": dataset_id}
|
||||
req, err = validate_and_parse_json_request(request, UpdateDatasetReq, extras=extras, exclude_unset=True)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
if not embd_model:
|
||||
return get_error_data_result(
|
||||
f"`embedding_model` {req.get('embedding_model')} doesn't exist"
|
||||
)
|
||||
req["embd_id"] = req.pop("embedding_model")
|
||||
if "name" in req:
|
||||
req["name"] = req["name"].strip()
|
||||
if len(req["name"]) >= 128:
|
||||
return get_error_data_result(
|
||||
message="Dataset name should not be longer than 128 characters."
|
||||
)
|
||||
if (
|
||||
req["name"].lower() != kb.name.lower()
|
||||
and len(
|
||||
KnowledgebaseService.query(
|
||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||
)
|
||||
)
|
||||
> 0
|
||||
):
|
||||
return get_error_data_result(
|
||||
message="Duplicated dataset name in updating dataset."
|
||||
)
|
||||
flds = list(req.keys())
|
||||
for f in flds:
|
||||
if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
|
||||
del req[f]
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_error_data_result(message="Update dataset error.(Database error)")
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
if not req:
|
||||
return get_error_argument_result(message="No properties were modified")
|
||||
|
||||
try:
|
||||
kb = KnowledgebaseService.get_or_none(id=dataset_id, tenant_id=tenant_id)
|
||||
if kb is None:
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{dataset_id}'")
|
||||
|
||||
if req.get("parser_config"):
|
||||
req["parser_config"] = deep_merge(kb.parser_config, req["parser_config"])
|
||||
|
||||
if (chunk_method := req.get("parser_id")) and chunk_method != kb.parser_id:
|
||||
if not req.get("parser_config"):
|
||||
req["parser_config"] = get_parser_config(chunk_method, None)
|
||||
elif "parser_config" in req and not req["parser_config"]:
|
||||
del req["parser_config"]
|
||||
|
||||
if "name" in req and req["name"].lower() != kb.name.lower():
|
||||
exists = KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
if exists:
|
||||
return get_error_data_result(message=f"Dataset name '{req['name']}' already exists")
|
||||
|
||||
if "embd_id" in req:
|
||||
if kb.chunk_num != 0 and req["embd_id"] != kb.embd_id:
|
||||
return get_error_data_result(message=f"When chunk_num ({kb.chunk_num}) > 0, embedding_model must remain {kb.embd_id}")
|
||||
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||
if not ok:
|
||||
return err
|
||||
|
||||
if "pagerank" in req and req["pagerank"] != kb.pagerank:
|
||||
if os.environ.get("DOC_ENGINE", "elasticsearch") == "infinity":
|
||||
return get_error_argument_result(message="'pagerank' can only be set when doc_engine is elasticsearch")
|
||||
|
||||
if req["pagerank"] > 0:
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: req["pagerank"]}, search.index_name(kb.tenant_id), kb.id)
|
||||
else:
|
||||
# Elasticsearch requires PAGERANK_FLD be non-zero!
|
||||
settings.docStoreConn.update({"exists": PAGERANK_FLD}, {"remove": PAGERANK_FLD}, search.index_name(kb.tenant_id), kb.id)
|
||||
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_error_data_result(message="Update dataset error.(Database error)")
|
||||
|
||||
ok, k = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
|
||||
response_data = remap_dictionary_keys(k.to_dict())
|
||||
return get_result(data=response_data)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||
@ -481,7 +408,7 @@ def list_datasets(tenant_id):
|
||||
name: page_size
|
||||
type: integer
|
||||
required: false
|
||||
default: 1024
|
||||
default: 30
|
||||
description: Number of items per page.
|
||||
- in: query
|
||||
name: orderby
|
||||
@ -508,47 +435,39 @@ def list_datasets(tenant_id):
|
||||
items:
|
||||
type: object
|
||||
"""
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
if id:
|
||||
kbs = KnowledgebaseService.get_kb_by_id(id,tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {id}")
|
||||
if name:
|
||||
kbs = KnowledgebaseService.get_kb_by_name(name,tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {name}")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc", "false").lower() not in ["true", "false"]:
|
||||
return get_error_data_result("desc should be true or false")
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||
kbs = KnowledgebaseService.get_list(
|
||||
[m["tenant_id"] for m in tenants],
|
||||
tenant_id,
|
||||
page_number,
|
||||
items_per_page,
|
||||
orderby,
|
||||
desc,
|
||||
id,
|
||||
name,
|
||||
)
|
||||
renamed_list = []
|
||||
for kb in kbs:
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
renamed_data = {}
|
||||
for key, value in kb.items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
renamed_list.append(renamed_data)
|
||||
return get_result(data=renamed_list)
|
||||
args, err = validate_and_parse_request_args(request, ListDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
try:
|
||||
kb_id = request.args.get("id")
|
||||
name = args.get("name")
|
||||
if kb_id:
|
||||
kbs = KnowledgebaseService.get_kb_by_id(kb_id, tenant_id)
|
||||
|
||||
if not kbs:
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{kb_id}'")
|
||||
if name:
|
||||
kbs = KnowledgebaseService.get_kb_by_name(name, tenant_id)
|
||||
if not kbs:
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{name}'")
|
||||
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||
kbs = KnowledgebaseService.get_list(
|
||||
[m["tenant_id"] for m in tenants],
|
||||
tenant_id,
|
||||
args["page"],
|
||||
args["page_size"],
|
||||
args["orderby"],
|
||||
args["desc"],
|
||||
kb_id,
|
||||
name,
|
||||
)
|
||||
|
||||
response_data_list = []
|
||||
for kb in kbs:
|
||||
response_data_list.append(remap_dictionary_keys(kb))
|
||||
return get_result(data=response_data_list)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
from flask import request, jsonify
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
@ -70,12 +71,13 @@ def retrieval(tenant_id):
|
||||
|
||||
records = []
|
||||
for c in ranks["chunks"]:
|
||||
e, doc = DocumentService.get_by_id( c["doc_id"])
|
||||
c.pop("vector", None)
|
||||
records.append({
|
||||
"content": c["content_with_weight"],
|
||||
"score": c["similarity"],
|
||||
"title": c["docnm_kwd"],
|
||||
"metadata": {}
|
||||
"metadata": doc.meta_fields
|
||||
})
|
||||
|
||||
return jsonify({"records": records})
|
||||
|
||||
@ -13,38 +13,35 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import pathlib
|
||||
import datetime
|
||||
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import rag_tokenizer
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.llm_service import TenantLLMService, LLMBundle
|
||||
from api import settings
|
||||
import xxhash
|
||||
import logging
|
||||
import pathlib
|
||||
import re
|
||||
from api.utils.api_utils import token_required
|
||||
from api.db.db_models import Task
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.utils.api_utils import server_error_response
|
||||
from api.utils.api_utils import get_result, get_error_data_result
|
||||
from io import BytesIO
|
||||
|
||||
import xxhash
|
||||
from flask import request, send_file
|
||||
from api.db import FileSource, TaskStatus, FileType
|
||||
from api.db.db_models import File
|
||||
from peewee import OperationalError
|
||||
from pydantic import BaseModel, Field, validator
|
||||
|
||||
from api import settings
|
||||
from api.constants import FILE_NAME_LEN_LIMIT
|
||||
from api.db import FileSource, FileType, LLMType, ParserType, TaskStatus
|
||||
from api.db.db_models import File, Task
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import construct_json_result, get_parser_config, check_duplicate_ids
|
||||
from rag.nlp import search
|
||||
from rag.prompts import keyword_extraction
|
||||
from api.db.services.llm_service import LLMBundle, TenantLLMService
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.utils.api_utils import check_duplicate_ids, construct_json_result, get_error_data_result, get_parser_config, get_result, server_error_response, token_required
|
||||
from rag.app.qa import beAdoc, rmPrefix
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp import rag_tokenizer, search
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.utils import rmSpace
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
from pydantic import BaseModel, Field, validator
|
||||
|
||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||
|
||||
|
||||
@ -60,7 +57,7 @@ class Chunk(BaseModel):
|
||||
available: bool = True
|
||||
positions: list[list[int]] = Field(default_factory=list)
|
||||
|
||||
@validator('positions')
|
||||
@validator("positions")
|
||||
def validate_positions(cls, value):
|
||||
for sublist in value:
|
||||
if len(sublist) != 5:
|
||||
@ -128,20 +125,14 @@ def upload(dataset_id, tenant_id):
|
||||
description: Processing status.
|
||||
"""
|
||||
if "file" not in request.files:
|
||||
return get_error_data_result(
|
||||
message="No file part!", code=settings.RetCode.ARGUMENT_ERROR
|
||||
)
|
||||
return get_error_data_result(message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
file_objs = request.files.getlist("file")
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == "":
|
||||
return get_result(
|
||||
message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR
|
||||
)
|
||||
if len(file_obj.filename.encode("utf-8")) >= 128:
|
||||
return get_result(
|
||||
message="File name should be less than 128 bytes.", code=settings.RetCode.ARGUMENT_ERROR
|
||||
)
|
||||
'''
|
||||
return get_result(message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if len(file_obj.filename.encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
return get_result(message=f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
"""
|
||||
# total size
|
||||
total_size = 0
|
||||
for file_obj in file_objs:
|
||||
@ -154,7 +145,7 @@ def upload(dataset_id, tenant_id):
|
||||
message=f"Total file size exceeds 10MB limit! ({total_size / (1024 * 1024):.2f} MB)",
|
||||
code=settings.RetCode.ARGUMENT_ERROR,
|
||||
)
|
||||
'''
|
||||
"""
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not e:
|
||||
raise LookupError(f"Can't find the dataset with ID {dataset_id}!")
|
||||
@ -222,6 +213,9 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
chunk_method:
|
||||
type: string
|
||||
description: Chunking method.
|
||||
enabled:
|
||||
type: boolean
|
||||
description: Document status.
|
||||
responses:
|
||||
200:
|
||||
description: Document updated successfully.
|
||||
@ -231,6 +225,9 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
req = request.json
|
||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||
return get_error_data_result(message="You don't own the dataset.")
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not e:
|
||||
return get_error_data_result(message="Can't find this knowledgebase!")
|
||||
doc = DocumentService.query(kb_id=dataset_id, id=document_id)
|
||||
if not doc:
|
||||
return get_error_data_result(message="The dataset doesn't own the document.")
|
||||
@ -251,24 +248,19 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
DocumentService.update_meta_fields(document_id, req["meta_fields"])
|
||||
|
||||
if "name" in req and req["name"] != doc.name:
|
||||
if len(req["name"].encode("utf-8")) >= 128:
|
||||
if len(req["name"].encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
return get_result(
|
||||
message="The name should be less than 128 bytes.",
|
||||
message=f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.",
|
||||
code=settings.RetCode.ARGUMENT_ERROR,
|
||||
)
|
||||
if (
|
||||
pathlib.Path(req["name"].lower()).suffix
|
||||
!= pathlib.Path(doc.name.lower()).suffix
|
||||
):
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(doc.name.lower()).suffix:
|
||||
return get_result(
|
||||
message="The extension of file can't be changed",
|
||||
code=settings.RetCode.ARGUMENT_ERROR,
|
||||
)
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_error_data_result(
|
||||
message="Duplicated document name in the same dataset."
|
||||
)
|
||||
return get_error_data_result(message="Duplicated document name in the same dataset.")
|
||||
if not DocumentService.update_by_id(document_id, {"name": req["name"]}):
|
||||
return get_error_data_result(message="Database error (Document rename)!")
|
||||
|
||||
@ -280,46 +272,28 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if "chunk_method" in req:
|
||||
valid_chunk_method = {
|
||||
"naive",
|
||||
"manual",
|
||||
"qa",
|
||||
"table",
|
||||
"paper",
|
||||
"book",
|
||||
"laws",
|
||||
"presentation",
|
||||
"picture",
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
}
|
||||
valid_chunk_method = {"naive", "manual", "qa", "table", "paper", "book", "laws", "presentation", "picture", "one", "knowledge_graph", "email", "tag"}
|
||||
if req.get("chunk_method") not in valid_chunk_method:
|
||||
return get_error_data_result(
|
||||
f"`chunk_method` {req['chunk_method']} doesn't exist"
|
||||
)
|
||||
if doc.parser_id.lower() == req["chunk_method"].lower():
|
||||
return get_result()
|
||||
return get_error_data_result(f"`chunk_method` {req['chunk_method']} doesn't exist")
|
||||
|
||||
if doc.type == FileType.VISUAL or re.search(r"\.(ppt|pptx|pages)$", doc.name):
|
||||
return get_error_data_result(message="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(
|
||||
doc.id,
|
||||
{
|
||||
"parser_id": req["chunk_method"],
|
||||
"progress": 0,
|
||||
"progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value,
|
||||
},
|
||||
)
|
||||
if not e:
|
||||
return get_error_data_result(message="Document not found!")
|
||||
req["parser_config"] = get_parser_config(
|
||||
req["chunk_method"], req.get("parser_config")
|
||||
)
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.parser_id.lower() != req["chunk_method"].lower():
|
||||
e = DocumentService.update_by_id(
|
||||
doc.id,
|
||||
{
|
||||
"parser_id": req["chunk_method"],
|
||||
"progress": 0,
|
||||
"progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value,
|
||||
},
|
||||
)
|
||||
if not e:
|
||||
return get_error_data_result(message="Document not found!")
|
||||
if not req.get("parser_config"):
|
||||
req["parser_config"] = get_parser_config(req["chunk_method"], req.get("parser_config"))
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(
|
||||
doc.id,
|
||||
@ -332,7 +306,49 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
return get_error_data_result(message="Document not found!")
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
|
||||
|
||||
return get_result()
|
||||
if "enabled" in req:
|
||||
status = int(req["enabled"])
|
||||
if doc.status != req["enabled"]:
|
||||
try:
|
||||
if not DocumentService.update_by_id(doc.id, {"status": str(status)}):
|
||||
return get_error_data_result(message="Database error (Document update)!")
|
||||
|
||||
settings.docStoreConn.update({"doc_id": doc.id}, {"available_int": status}, search.index_name(kb.tenant_id), doc.kb_id)
|
||||
return get_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
try:
|
||||
ok, doc = DocumentService.get_by_id(doc.id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"kb_id": "dataset_id",
|
||||
"token_num": "token_count",
|
||||
"parser_id": "chunk_method",
|
||||
}
|
||||
run_mapping = {
|
||||
"0": "UNSTART",
|
||||
"1": "RUNNING",
|
||||
"2": "CANCEL",
|
||||
"3": "DONE",
|
||||
"4": "FAIL",
|
||||
}
|
||||
renamed_doc = {}
|
||||
for key, value in doc.to_dict().items():
|
||||
if key == "run":
|
||||
renamed_doc["run"] = run_mapping.get(str(value))
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_doc[new_key] = value
|
||||
if key == "run":
|
||||
renamed_doc["run"] = run_mapping.get(value)
|
||||
|
||||
return get_result(data=renamed_doc)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["GET"]) # noqa: F821
|
||||
@ -374,25 +390,17 @@ def download(tenant_id, dataset_id, document_id):
|
||||
type: object
|
||||
"""
|
||||
if not document_id:
|
||||
return get_error_data_result(
|
||||
message="Specify document_id please."
|
||||
)
|
||||
return get_error_data_result(message="Specify document_id please.")
|
||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||
return get_error_data_result(message=f"You do not own the dataset {dataset_id}.")
|
||||
doc = DocumentService.query(kb_id=dataset_id, id=document_id)
|
||||
if not doc:
|
||||
return get_error_data_result(
|
||||
message=f"The dataset not own the document {document_id}."
|
||||
)
|
||||
return get_error_data_result(message=f"The dataset not own the document {document_id}.")
|
||||
# The process of downloading
|
||||
doc_id, doc_location = File2DocumentService.get_storage_address(
|
||||
doc_id=document_id
|
||||
) # minio address
|
||||
doc_id, doc_location = File2DocumentService.get_storage_address(doc_id=document_id) # minio address
|
||||
file_stream = STORAGE_IMPL.get(doc_id, doc_location)
|
||||
if not file_stream:
|
||||
return construct_json_result(
|
||||
message="This file is empty.", code=settings.RetCode.DATA_ERROR
|
||||
)
|
||||
return construct_json_result(message="This file is empty.", code=settings.RetCode.DATA_ERROR)
|
||||
file = BytesIO(file_stream)
|
||||
# Use send_file with a proper filename and MIME type
|
||||
return send_file(
|
||||
@ -507,9 +515,7 @@ def list_docs(dataset_id, tenant_id):
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
docs, tol = DocumentService.get_list(
|
||||
dataset_id, page, page_size, orderby, desc, keywords, id, name
|
||||
)
|
||||
docs, tol = DocumentService.get_list(dataset_id, page, page_size, orderby, desc, keywords, id, name)
|
||||
|
||||
# rename key's name
|
||||
renamed_doc_list = []
|
||||
@ -615,9 +621,7 @@ def delete(tenant_id, dataset_id):
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_error_data_result(
|
||||
message="Database error (Document removal)!"
|
||||
)
|
||||
return get_error_data_result(message="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete(
|
||||
@ -641,7 +645,10 @@ def delete(tenant_id, dataset_id):
|
||||
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially deleted {success_count} datasets with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||
return get_result(
|
||||
message=f"Partially deleted {success_count} datasets with {len(duplicate_messages)} errors",
|
||||
data={"success_count": success_count, "errors": duplicate_messages},
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
@ -706,9 +713,7 @@ def parse(tenant_id, dataset_id):
|
||||
if not doc:
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if 0.0 < doc[0].progress < 1.0:
|
||||
return get_error_data_result(
|
||||
"Can't parse document that is currently being processed"
|
||||
)
|
||||
return get_error_data_result("Can't parse document that is currently being processed")
|
||||
info = {"run": "1", "progress": 0, "progress_msg": "", "chunk_num": 0, "token_num": 0}
|
||||
DocumentService.update_by_id(id, info)
|
||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), dataset_id)
|
||||
@ -723,7 +728,10 @@ def parse(tenant_id, dataset_id):
|
||||
return get_result(message=f"Documents not found: {not_found}", code=settings.RetCode.DATA_ERROR)
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially parsed {success_count} documents with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||
return get_result(
|
||||
message=f"Partially parsed {success_count} documents with {len(duplicate_messages)} errors",
|
||||
data={"success_count": success_count, "errors": duplicate_messages},
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
@ -785,16 +793,17 @@ def stop_parsing(tenant_id, dataset_id):
|
||||
if not doc:
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if int(doc[0].progress) == 1 or doc[0].progress == 0:
|
||||
return get_error_data_result(
|
||||
"Can't stop parsing document with progress at 0 or 1"
|
||||
)
|
||||
return get_error_data_result("Can't stop parsing document with progress at 0 or 1")
|
||||
info = {"run": "2", "progress": 0, "chunk_num": 0}
|
||||
DocumentService.update_by_id(id, info)
|
||||
settings.docStoreConn.delete({"doc_id": doc[0].id}, search.index_name(tenant_id), dataset_id)
|
||||
success_count += 1
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially stopped {success_count} documents with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||
return get_result(
|
||||
message=f"Partially stopped {success_count} documents with {len(duplicate_messages)} errors",
|
||||
data={"success_count": success_count, "errors": duplicate_messages},
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
return get_result()
|
||||
@ -833,6 +842,12 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
required: false
|
||||
default: 30
|
||||
description: Number of items per page.
|
||||
- in: query
|
||||
name: id
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: Chunk Id.
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
@ -877,9 +892,7 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
||||
doc = DocumentService.query(id=document_id, kb_id=dataset_id)
|
||||
if not doc:
|
||||
return get_error_data_result(
|
||||
message=f"You don't own the document {document_id}."
|
||||
)
|
||||
return get_error_data_result(message=f"You don't own the document {document_id}.")
|
||||
doc = doc[0]
|
||||
req = request.args
|
||||
doc_id = document_id
|
||||
@ -927,34 +940,29 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
del chunk[n]
|
||||
if not chunk:
|
||||
return get_error_data_result(f"Chunk `{req.get('id')}` not found.")
|
||||
res['total'] = 1
|
||||
res["total"] = 1
|
||||
final_chunk = {
|
||||
"id":chunk.get("id",chunk.get("chunk_id")),
|
||||
"content":chunk["content_with_weight"],
|
||||
"document_id":chunk.get("doc_id",chunk.get("document_id")),
|
||||
"docnm_kwd":chunk["docnm_kwd"],
|
||||
"important_keywords":chunk.get("important_kwd",[]),
|
||||
"questions":chunk.get("question_kwd",[]),
|
||||
"dataset_id":chunk.get("kb_id",chunk.get("dataset_id")),
|
||||
"image_id":chunk.get("img_id", ""),
|
||||
"available":bool(chunk.get("available_int",1)),
|
||||
"positions":chunk.get("position_int",[]),
|
||||
"id": chunk.get("id", chunk.get("chunk_id")),
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk.get("doc_id", chunk.get("document_id")),
|
||||
"docnm_kwd": chunk["docnm_kwd"],
|
||||
"important_keywords": chunk.get("important_kwd", []),
|
||||
"questions": chunk.get("question_kwd", []),
|
||||
"dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
|
||||
"image_id": chunk.get("img_id", ""),
|
||||
"available": bool(chunk.get("available_int", 1)),
|
||||
"positions": chunk.get("position_int", []),
|
||||
}
|
||||
res["chunks"].append(final_chunk)
|
||||
_ = Chunk(**final_chunk)
|
||||
|
||||
elif settings.docStoreConn.indexExist(search.index_name(tenant_id), dataset_id):
|
||||
sres = settings.retrievaler.search(query, search.index_name(tenant_id), [dataset_id], emb_mdl=None,
|
||||
highlight=True)
|
||||
sres = settings.retrievaler.search(query, search.index_name(tenant_id), [dataset_id], emb_mdl=None, highlight=True)
|
||||
res["total"] = sres.total
|
||||
for id in sres.ids:
|
||||
d = {
|
||||
"id": id,
|
||||
"content": (
|
||||
rmSpace(sres.highlight[id])
|
||||
if question and id in sres.highlight
|
||||
else sres.field[id].get("content_with_weight", "")
|
||||
),
|
||||
"content": (rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[id].get("content_with_weight", "")),
|
||||
"document_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_keywords": sres.field[id].get("important_kwd", []),
|
||||
@ -962,10 +970,10 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
"dataset_id": sres.field[id].get("kb_id", sres.field[id].get("dataset_id")),
|
||||
"image_id": sres.field[id].get("img_id", ""),
|
||||
"available": bool(int(sres.field[id].get("available_int", "1"))),
|
||||
"positions": sres.field[id].get("position_int",[]),
|
||||
"positions": sres.field[id].get("position_int", []),
|
||||
}
|
||||
res["chunks"].append(d)
|
||||
_ = Chunk(**d) # validate the chunk
|
||||
_ = Chunk(**d) # validate the chunk
|
||||
return get_result(data=res)
|
||||
|
||||
|
||||
@ -1041,23 +1049,17 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
||||
doc = DocumentService.query(id=document_id, kb_id=dataset_id)
|
||||
if not doc:
|
||||
return get_error_data_result(
|
||||
message=f"You don't own the document {document_id}."
|
||||
)
|
||||
return get_error_data_result(message=f"You don't own the document {document_id}.")
|
||||
doc = doc[0]
|
||||
req = request.json
|
||||
if not str(req.get("content", "")).strip():
|
||||
return get_error_data_result(message="`content` is required")
|
||||
if "important_keywords" in req:
|
||||
if not isinstance(req["important_keywords"], list):
|
||||
return get_error_data_result(
|
||||
"`important_keywords` is required to be a list"
|
||||
)
|
||||
return get_error_data_result("`important_keywords` is required to be a list")
|
||||
if "questions" in req:
|
||||
if not isinstance(req["questions"], list):
|
||||
return get_error_data_result(
|
||||
"`questions` is required to be a list"
|
||||
)
|
||||
return get_error_data_result("`questions` is required to be a list")
|
||||
chunk_id = xxhash.xxh64((req["content"] + document_id).encode("utf-8")).hexdigest()
|
||||
d = {
|
||||
"id": chunk_id,
|
||||
@ -1066,22 +1068,16 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
}
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req.get("important_keywords", [])
|
||||
d["important_tks"] = rag_tokenizer.tokenize(
|
||||
" ".join(req.get("important_keywords", []))
|
||||
)
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_keywords", [])))
|
||||
d["question_kwd"] = [str(q).strip() for q in req.get("questions", []) if str(q).strip()]
|
||||
d["question_tks"] = rag_tokenizer.tokenize(
|
||||
"\n".join(req.get("questions", []))
|
||||
)
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req.get("questions", [])))
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
d["kb_id"] = dataset_id
|
||||
d["docnm_kwd"] = doc.name
|
||||
d["doc_id"] = document_id
|
||||
embd_id = DocumentService.get_embd_id(document_id)
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id
|
||||
)
|
||||
embd_mdl = TenantLLMService.model_instance(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
v, c = embd_mdl.encode([doc.name, req["content"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
@ -1174,7 +1170,10 @@ def rm_chunk(tenant_id, dataset_id, document_id):
|
||||
return get_result(message=f"deleted {chunk_number} chunks")
|
||||
return get_error_data_result(message=f"rm_chunk deleted chunks {chunk_number}, expect {len(unique_chunk_ids)}")
|
||||
if duplicate_messages:
|
||||
return get_result(message=f"Partially deleted {chunk_number} chunks with {len(duplicate_messages)} errors", data={"success_count": chunk_number, "errors": duplicate_messages},)
|
||||
return get_result(
|
||||
message=f"Partially deleted {chunk_number} chunks with {len(duplicate_messages)} errors",
|
||||
data={"success_count": chunk_number, "errors": duplicate_messages},
|
||||
)
|
||||
return get_result(message=f"deleted {chunk_number} chunks")
|
||||
|
||||
|
||||
@ -1242,9 +1241,7 @@ def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
||||
doc = DocumentService.query(id=document_id, kb_id=dataset_id)
|
||||
if not doc:
|
||||
return get_error_data_result(
|
||||
message=f"You don't own the document {document_id}."
|
||||
)
|
||||
return get_error_data_result(message=f"You don't own the document {document_id}.")
|
||||
doc = doc[0]
|
||||
req = request.json
|
||||
if "content" in req:
|
||||
@ -1267,19 +1264,13 @@ def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
|
||||
if "available" in req:
|
||||
d["available_int"] = int(req["available"])
|
||||
embd_id = DocumentService.get_embd_id(document_id)
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id
|
||||
)
|
||||
embd_mdl = TenantLLMService.model_instance(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
if doc.parser_id == ParserType.QA:
|
||||
arr = [t for t in re.split(r"[\n\t]", d["content_with_weight"]) if len(t) > 1]
|
||||
if len(arr) != 2:
|
||||
return get_error_data_result(
|
||||
message="Q&A must be separated by TAB/ENTER key."
|
||||
)
|
||||
return get_error_data_result(message="Q&A must be separated by TAB/ENTER key.")
|
||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
||||
d = beAdoc(
|
||||
d, arr[0], arr[1], not any([rag_tokenizer.is_chinese(t) for t in q + a])
|
||||
)
|
||||
d = beAdoc(d, arr[0], arr[1], not any([rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, d["content_with_weight"] if not d.get("question_kwd") else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
@ -1396,9 +1387,7 @@ def retrieval_test(tenant_id):
|
||||
doc_ids_list = KnowledgebaseService.list_documents_by_ids(kb_ids)
|
||||
for doc_id in doc_ids:
|
||||
if doc_id not in doc_ids_list:
|
||||
return get_error_data_result(
|
||||
f"The datasets don't own the document {doc_id}"
|
||||
)
|
||||
return get_error_data_result(f"The datasets don't own the document {doc_id}")
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
@ -1407,6 +1396,7 @@ def retrieval_test(tenant_id):
|
||||
else:
|
||||
highlight = True
|
||||
try:
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||
if not e:
|
||||
return get_error_data_result(message="Dataset not found!")
|
||||
@ -1423,7 +1413,7 @@ def retrieval_test(tenant_id):
|
||||
ranks = settings.retrievaler.retrieval(
|
||||
question,
|
||||
embd_mdl,
|
||||
kb.tenant_id,
|
||||
tenant_ids,
|
||||
kb_ids,
|
||||
page,
|
||||
size,
|
||||
@ -1433,14 +1423,10 @@ def retrieval_test(tenant_id):
|
||||
doc_ids,
|
||||
rerank_mdl=rerank_mdl,
|
||||
highlight=highlight,
|
||||
rank_feature=label_question(question, kbs)
|
||||
rank_feature=label_question(question, kbs),
|
||||
)
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
[k.tenant_id for k in kbs],
|
||||
kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
ck = settings.kg_retrievaler.retrieval(question, [k.tenant_id for k in kbs], kb_ids, embd_mdl, LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
@ -1457,7 +1443,7 @@ def retrieval_test(tenant_id):
|
||||
"important_kwd": "important_keywords",
|
||||
"question_kwd": "questions",
|
||||
"docnm_kwd": "document_keyword",
|
||||
"kb_id":"dataset_id"
|
||||
"kb_id": "dataset_id",
|
||||
}
|
||||
rename_chunk = {}
|
||||
for key, value in chunk.items():
|
||||
|
||||
@ -388,10 +388,10 @@ def agents_completion_openai_compatibility (tenant_id, agent_id):
|
||||
question = next((m["content"] for m in reversed(messages) if m["role"] == "user"), "")
|
||||
|
||||
if req.get("stream", True):
|
||||
return Response(completionOpenAI(tenant_id, agent_id, question, session_id=req.get("id", ""), stream=True), mimetype="text/event-stream")
|
||||
return Response(completionOpenAI(tenant_id, agent_id, question, session_id=req.get("id", req.get("metadata", {}).get("id","")), stream=True), mimetype="text/event-stream")
|
||||
else:
|
||||
# For non-streaming, just return the response directly
|
||||
response = next(completionOpenAI(tenant_id, agent_id, question, session_id=req.get("id", ""), stream=False))
|
||||
response = next(completionOpenAI(tenant_id, agent_id, question, session_id=req.get("id", req.get("metadata", {}).get("id","")), stream=False))
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
@ -464,12 +464,11 @@ def list_session(tenant_id, chat_id):
|
||||
if conv["reference"]:
|
||||
messages = conv["messages"]
|
||||
message_num = 0
|
||||
chunk_num = 0
|
||||
while message_num < len(messages):
|
||||
while message_num < len(messages) and message_num < len(conv["reference"]):
|
||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||
chunk_list = []
|
||||
if "chunks" in conv["reference"][chunk_num]:
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
if "chunks" in conv["reference"][message_num]:
|
||||
chunks = conv["reference"][message_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk.get("chunk_id", chunk.get("id")),
|
||||
@ -482,7 +481,6 @@ def list_session(tenant_id, chat_id):
|
||||
}
|
||||
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
message_num += 1
|
||||
del conv["reference"]
|
||||
|
||||
188
api/apps/search_app.py
Normal file
188
api/apps/search_app.py
Normal file
@ -0,0 +1,188 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from api import settings
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import DB
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.search_service import SearchService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_error_result, get_json_result, not_allowed_parameters, server_error_response, validate_request
|
||||
|
||||
|
||||
@manager.route("/create", methods=["post"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
req = request.get_json()
|
||||
search_name = req["name"]
|
||||
description = req.get("description", "")
|
||||
if not isinstance(search_name, str):
|
||||
return get_data_error_result(message="Search name must be string.")
|
||||
if search_name.strip() == "":
|
||||
return get_data_error_result(message="Search name can't be empty.")
|
||||
if len(search_name.encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(message=f"Search name length is {len(search_name)} which is large than {DATASET_NAME_LIMIT}")
|
||||
e, _ = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Authorizationd identity.")
|
||||
|
||||
search_name = search_name.strip()
|
||||
search_name = duplicate_name(KnowledgebaseService.query, name=search_name, tenant_id=current_user.id, status=StatusEnum.VALID.value)
|
||||
|
||||
req["id"] = get_uuid()
|
||||
req["name"] = search_name
|
||||
req["description"] = description
|
||||
req["tenant_id"] = current_user.id
|
||||
req["created_by"] = current_user.id
|
||||
with DB.atomic():
|
||||
try:
|
||||
if not SearchService.save(**req):
|
||||
return get_data_error_result()
|
||||
return get_json_result(data={"search_id": req["id"]})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/update", methods=["post"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("search_id", "name", "search_config", "tenant_id")
|
||||
@not_allowed_parameters("id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||
def update():
|
||||
req = request.get_json()
|
||||
if not isinstance(req["name"], str):
|
||||
return get_data_error_result(message="Search name must be string.")
|
||||
if req["name"].strip() == "":
|
||||
return get_data_error_result(message="Search name can't be empty.")
|
||||
if len(req["name"].encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(message=f"Search name length is {len(req['name'])} which is large than {DATASET_NAME_LIMIT}")
|
||||
req["name"] = req["name"].strip()
|
||||
tenant_id = req["tenant_id"]
|
||||
e, _ = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Authorizationd identity.")
|
||||
|
||||
search_id = req["search_id"]
|
||||
if not SearchService.accessible4deletion(search_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
search_app = SearchService.query(tenant_id=tenant_id, id=search_id)[0]
|
||||
if not search_app:
|
||||
return get_json_result(data=False, message=f"Cannot find search {search_id}", code=settings.RetCode.DATA_ERROR)
|
||||
|
||||
if req["name"].lower() != search_app.name.lower() and len(SearchService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) >= 1:
|
||||
return get_data_error_result(message="Duplicated search name.")
|
||||
|
||||
if "search_config" in req:
|
||||
current_config = search_app.search_config or {}
|
||||
new_config = req["search_config"]
|
||||
|
||||
if not isinstance(new_config, dict):
|
||||
return get_data_error_result(message="search_config must be a JSON object")
|
||||
|
||||
updated_config = {**current_config, **new_config}
|
||||
req["search_config"] = updated_config
|
||||
|
||||
req.pop("search_id", None)
|
||||
req.pop("tenant_id", None)
|
||||
|
||||
updated = SearchService.update_by_id(search_id, req)
|
||||
if not updated:
|
||||
return get_data_error_result(message="Failed to update search")
|
||||
|
||||
e, updated_search = SearchService.get_by_id(search_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Failed to fetch updated search")
|
||||
|
||||
return get_json_result(data=updated_search.to_dict())
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/detail", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def detail():
|
||||
search_id = request.args["search_id"]
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if SearchService.query(tenant_id=tenant.tenant_id, id=search_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(data=False, message="Has no permission for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
search = SearchService.get_detail(search_id)
|
||||
if not search:
|
||||
return get_data_error_result(message="Can't find this Search App!")
|
||||
return get_json_result(data=search)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/list", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def list_search_app():
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 0))
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
|
||||
req = request.get_json()
|
||||
owner_ids = req.get("owner_ids", [])
|
||||
try:
|
||||
if not owner_ids:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
tenants = [m["tenant_id"] for m in tenants]
|
||||
search_apps, total = SearchService.get_by_tenant_ids(tenants, current_user.id, page_number, items_per_page, orderby, desc, keywords)
|
||||
else:
|
||||
tenants = owner_ids
|
||||
search_apps, total = SearchService.get_by_tenant_ids(tenants, current_user.id, 0, 0, orderby, desc, keywords)
|
||||
search_apps = [search_app for search_app in search_apps if search_app["tenant_id"] in tenants]
|
||||
total = len(search_apps)
|
||||
if page_number and items_per_page:
|
||||
search_apps = search_apps[(page_number - 1) * items_per_page : page_number * items_per_page]
|
||||
return get_json_result(data={"search_apps": search_apps, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/rm", methods=["post"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("search_id")
|
||||
def rm():
|
||||
req = request.get_json()
|
||||
search_id = req["search_id"]
|
||||
if not SearchService.accessible4deletion(search_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
if not SearchService.delete_by_id(search_id):
|
||||
return get_data_error_result(message=f"Failed to delete search App {search_id}")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -13,35 +13,38 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import secrets
|
||||
from datetime import datetime
|
||||
|
||||
from flask import request, session, redirect
|
||||
from werkzeug.security import generate_password_hash, check_password_hash
|
||||
from flask_login import login_required, current_user, login_user, logout_user
|
||||
from flask import redirect, request, session
|
||||
from flask_login import current_user, login_required, login_user, logout_user
|
||||
from werkzeug.security import check_password_hash, generate_password_hash
|
||||
|
||||
from api import settings
|
||||
from api.apps.auth import get_auth_client
|
||||
from api.db import FileType, UserTenantRole
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.utils.api_utils import (
|
||||
server_error_response,
|
||||
validate_request,
|
||||
get_data_error_result,
|
||||
)
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService
|
||||
from api.db.services.user_service import TenantService, UserService, UserTenantService
|
||||
from api.utils import (
|
||||
get_uuid,
|
||||
get_format_time,
|
||||
decrypt,
|
||||
download_img,
|
||||
current_timestamp,
|
||||
datetime_format,
|
||||
decrypt,
|
||||
download_img,
|
||||
get_format_time,
|
||||
get_uuid,
|
||||
)
|
||||
from api.utils.api_utils import (
|
||||
construct_response,
|
||||
get_data_error_result,
|
||||
get_json_result,
|
||||
server_error_response,
|
||||
validate_request,
|
||||
)
|
||||
from api.db import UserTenantRole, FileType
|
||||
from api import settings
|
||||
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.utils.api_utils import get_json_result, construct_response
|
||||
|
||||
|
||||
@manager.route("/login", methods=["POST", "GET"]) # noqa: F821
|
||||
@ -76,9 +79,7 @@ def login():
|
||||
type: object
|
||||
"""
|
||||
if not request.json:
|
||||
return get_json_result(
|
||||
data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!"
|
||||
)
|
||||
return get_json_result(data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!")
|
||||
|
||||
email = request.json.get("email", "")
|
||||
users = UserService.query(email=email)
|
||||
@ -93,9 +94,7 @@ def login():
|
||||
try:
|
||||
password = decrypt(password)
|
||||
except BaseException:
|
||||
return get_json_result(
|
||||
data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password"
|
||||
)
|
||||
return get_json_result(data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password")
|
||||
|
||||
user = UserService.query_user(email, password)
|
||||
if user:
|
||||
@ -115,9 +114,131 @@ def login():
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/login/channels", methods=["GET"]) # noqa: F821
|
||||
def get_login_channels():
|
||||
"""
|
||||
Get all supported authentication channels.
|
||||
"""
|
||||
try:
|
||||
channels = []
|
||||
for channel, config in settings.OAUTH_CONFIG.items():
|
||||
channels.append(
|
||||
{
|
||||
"channel": channel,
|
||||
"display_name": config.get("display_name", channel.title()),
|
||||
"icon": config.get("icon", "sso"),
|
||||
}
|
||||
)
|
||||
return get_json_result(data=channels)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_json_result(data=[], message=f"Load channels failure, error: {str(e)}", code=settings.RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/login/<channel>", methods=["GET"]) # noqa: F821
|
||||
def oauth_login(channel):
|
||||
channel_config = settings.OAUTH_CONFIG.get(channel)
|
||||
if not channel_config:
|
||||
raise ValueError(f"Invalid channel name: {channel}")
|
||||
auth_cli = get_auth_client(channel_config)
|
||||
|
||||
state = get_uuid()
|
||||
session["oauth_state"] = state
|
||||
auth_url = auth_cli.get_authorization_url(state)
|
||||
return redirect(auth_url)
|
||||
|
||||
|
||||
@manager.route("/oauth/callback/<channel>", methods=["GET"]) # noqa: F821
|
||||
def oauth_callback(channel):
|
||||
"""
|
||||
Handle the OAuth/OIDC callback for various channels dynamically.
|
||||
"""
|
||||
try:
|
||||
channel_config = settings.OAUTH_CONFIG.get(channel)
|
||||
if not channel_config:
|
||||
raise ValueError(f"Invalid channel name: {channel}")
|
||||
auth_cli = get_auth_client(channel_config)
|
||||
|
||||
# Check the state
|
||||
state = request.args.get("state")
|
||||
if not state or state != session.get("oauth_state"):
|
||||
return redirect("/?error=invalid_state")
|
||||
session.pop("oauth_state", None)
|
||||
|
||||
# Obtain the authorization code
|
||||
code = request.args.get("code")
|
||||
if not code:
|
||||
return redirect("/?error=missing_code")
|
||||
|
||||
# Exchange authorization code for access token
|
||||
token_info = auth_cli.exchange_code_for_token(code)
|
||||
access_token = token_info.get("access_token")
|
||||
if not access_token:
|
||||
return redirect("/?error=token_failed")
|
||||
|
||||
id_token = token_info.get("id_token")
|
||||
|
||||
# Fetch user info
|
||||
user_info = auth_cli.fetch_user_info(access_token, id_token=id_token)
|
||||
if not user_info.email:
|
||||
return redirect("/?error=email_missing")
|
||||
|
||||
# Login or register
|
||||
users = UserService.query(email=user_info.email)
|
||||
user_id = get_uuid()
|
||||
|
||||
if not users:
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(user_info.avatar_url)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
avatar = ""
|
||||
|
||||
users = user_register(
|
||||
user_id,
|
||||
{
|
||||
"access_token": get_uuid(),
|
||||
"email": user_info.email,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info.nickname,
|
||||
"login_channel": channel,
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
|
||||
if not users:
|
||||
raise Exception(f"Failed to register {user_info.email}")
|
||||
if len(users) > 1:
|
||||
raise Exception(f"Same email: {user_info.email} exists!")
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect(f"/?auth={user.get_id()}")
|
||||
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
logging.exception(e)
|
||||
return redirect(f"/?error={str(e)}")
|
||||
|
||||
# User exists, try to log in
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect(f"/?auth={user.get_id()}")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return redirect(f"/?error={str(e)}")
|
||||
|
||||
|
||||
@manager.route("/github_callback", methods=["GET"]) # noqa: F821
|
||||
def github_callback():
|
||||
"""
|
||||
**Deprecated**, Use `/oauth/callback/<channel>` instead.
|
||||
|
||||
GitHub OAuth callback endpoint.
|
||||
---
|
||||
tags:
|
||||
@ -309,9 +430,7 @@ def user_info_from_feishu(access_token):
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
}
|
||||
res = requests.get(
|
||||
"https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers
|
||||
)
|
||||
res = requests.get("https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers)
|
||||
user_info = res.json()["data"]
|
||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||
return user_info
|
||||
@ -321,17 +440,13 @@ def user_info_from_github(access_token):
|
||||
import requests
|
||||
|
||||
headers = {"Accept": "application/json", "Authorization": f"token {access_token}"}
|
||||
res = requests.get(
|
||||
f"https://api.github.com/user?access_token={access_token}", headers=headers
|
||||
)
|
||||
res = requests.get(f"https://api.github.com/user?access_token={access_token}", headers=headers)
|
||||
user_info = res.json()
|
||||
email_info = requests.get(
|
||||
f"https://api.github.com/user/emails?access_token={access_token}",
|
||||
headers=headers,
|
||||
).json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email["primary"]), None
|
||||
)["email"]
|
||||
user_info["email"] = next((email for email in email_info if email["primary"]), None)["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
@ -351,7 +466,7 @@ def log_out():
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
current_user.access_token = ""
|
||||
current_user.access_token = f"INVALID_{secrets.token_hex(16)}"
|
||||
current_user.save()
|
||||
logout_user()
|
||||
return get_json_result(data=True)
|
||||
@ -391,9 +506,7 @@ def setting_user():
|
||||
request_data = request.json
|
||||
if request_data.get("password"):
|
||||
new_password = request_data.get("new_password")
|
||||
if not check_password_hash(
|
||||
current_user.password, decrypt(request_data["password"])
|
||||
):
|
||||
if not check_password_hash(current_user.password, decrypt(request_data["password"])):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
@ -424,9 +537,7 @@ def setting_user():
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_json_result(
|
||||
data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/info", methods=["GET"]) # noqa: F821
|
||||
@ -518,9 +629,23 @@ def user_register(user_id, user):
|
||||
"model_type": llm.model_type,
|
||||
"api_key": settings.API_KEY,
|
||||
"api_base": settings.LLM_BASE_URL,
|
||||
"max_tokens": llm.max_tokens if llm.max_tokens else 8192
|
||||
"max_tokens": llm.max_tokens if llm.max_tokens else 8192,
|
||||
}
|
||||
)
|
||||
if settings.LIGHTEN != 1:
|
||||
for buildin_embedding_model in settings.BUILTIN_EMBEDDING_MODELS:
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(buildin_embedding_model)
|
||||
tenant_llm.append(
|
||||
{
|
||||
"tenant_id": user_id,
|
||||
"llm_factory": fid,
|
||||
"llm_name": mdlnm,
|
||||
"model_type": "embedding",
|
||||
"api_key": "",
|
||||
"api_base": "",
|
||||
"max_tokens": 1024 if buildin_embedding_model == "BAAI/bge-large-zh-v1.5@BAAI" else 512,
|
||||
}
|
||||
)
|
||||
|
||||
if not UserService.save(**user):
|
||||
return
|
||||
|
||||
@ -13,9 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NAME_LENGTH_LIMIT = 2 ** 10
|
||||
NAME_LENGTH_LIMIT = 2**10
|
||||
|
||||
IMG_BASE64_PREFIX = 'data:image/png;base64,'
|
||||
IMG_BASE64_PREFIX = "data:image/png;base64,"
|
||||
|
||||
SERVICE_CONF = "service_conf.yaml"
|
||||
|
||||
@ -25,3 +25,4 @@ REQUEST_WAIT_SEC = 2
|
||||
REQUEST_MAX_WAIT_SEC = 300
|
||||
|
||||
DATASET_NAME_LIMIT = 128
|
||||
FILE_NAME_LEN_LIMIT = 255
|
||||
|
||||
@ -49,6 +49,7 @@ class FileType(StrEnum):
|
||||
FOLDER = 'folder'
|
||||
OTHER = "other"
|
||||
|
||||
VALID_FILE_TYPES = {FileType.PDF, FileType.DOC, FileType.VISUAL, FileType.AURAL, FileType.VIRTUAL, FileType.FOLDER, FileType.OTHER}
|
||||
|
||||
class LLMType(StrEnum):
|
||||
CHAT = 'chat'
|
||||
@ -73,6 +74,7 @@ class TaskStatus(StrEnum):
|
||||
DONE = "3"
|
||||
FAIL = "4"
|
||||
|
||||
VALID_TASK_STATUS = {TaskStatus.UNSTART, TaskStatus.RUNNING, TaskStatus.CANCEL, TaskStatus.DONE, TaskStatus.FAIL}
|
||||
|
||||
class ParserType(StrEnum):
|
||||
PRESENTATION = "presentation"
|
||||
|
||||
@ -13,16 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import hashlib
|
||||
import inspect
|
||||
import logging
|
||||
import operator
|
||||
import os
|
||||
import sys
|
||||
import typing
|
||||
import time
|
||||
import typing
|
||||
from enum import Enum
|
||||
from functools import wraps
|
||||
import hashlib
|
||||
|
||||
from flask_login import UserMixin
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
@ -264,14 +264,15 @@ class BaseDataBase:
|
||||
|
||||
def with_retry(max_retries=3, retry_delay=1.0):
|
||||
"""Decorator: Add retry mechanism to database operations
|
||||
|
||||
|
||||
Args:
|
||||
max_retries (int): maximum number of retries
|
||||
retry_delay (float): initial retry delay (seconds), will increase exponentially
|
||||
|
||||
|
||||
Returns:
|
||||
decorated function
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
@ -284,26 +285,28 @@ def with_retry(max_retries=3, retry_delay=1.0):
|
||||
# get self and method name for logging
|
||||
self_obj = args[0] if args else None
|
||||
func_name = func.__name__
|
||||
lock_name = getattr(self_obj, 'lock_name', 'unknown') if self_obj else 'unknown'
|
||||
|
||||
lock_name = getattr(self_obj, "lock_name", "unknown") if self_obj else "unknown"
|
||||
|
||||
if retry < max_retries - 1:
|
||||
current_delay = retry_delay * (2 ** retry)
|
||||
logging.warning(f"{func_name} {lock_name} failed: {str(e)}, retrying ({retry+1}/{max_retries})")
|
||||
current_delay = retry_delay * (2**retry)
|
||||
logging.warning(f"{func_name} {lock_name} failed: {str(e)}, retrying ({retry + 1}/{max_retries})")
|
||||
time.sleep(current_delay)
|
||||
else:
|
||||
logging.error(f"{func_name} {lock_name} failed after all attempts: {str(e)}")
|
||||
|
||||
|
||||
if last_exception:
|
||||
raise last_exception
|
||||
return False
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class PostgresDatabaseLock:
|
||||
def __init__(self, lock_name, timeout=10, db=None):
|
||||
self.lock_name = lock_name
|
||||
self.lock_id = int(hashlib.md5(lock_name.encode()).hexdigest(), 16) % (2**31-1)
|
||||
self.lock_id = int(hashlib.md5(lock_name.encode()).hexdigest(), 16) % (2**31 - 1)
|
||||
self.timeout = int(timeout)
|
||||
self.db = db if db else DB
|
||||
|
||||
@ -542,7 +545,7 @@ class LLM(DataBaseModel):
|
||||
max_tokens = IntegerField(default=0)
|
||||
|
||||
tags = CharField(max_length=255, null=False, help_text="LLM, Text Embedding, Image2Text, Chat, 32k...", index=True)
|
||||
is_tools = BooleanField(null=False, help_text="support tools", default=False)
|
||||
is_tools = BooleanField(null=False, help_text="support tools", default=False)
|
||||
status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted, 1: validate)", default="1", index=True)
|
||||
|
||||
def __str__(self):
|
||||
@ -796,6 +799,50 @@ class UserCanvasVersion(DataBaseModel):
|
||||
db_table = "user_canvas_version"
|
||||
|
||||
|
||||
class Search(DataBaseModel):
|
||||
id = CharField(max_length=32, primary_key=True)
|
||||
avatar = TextField(null=True, help_text="avatar base64 string")
|
||||
tenant_id = CharField(max_length=32, null=False, index=True)
|
||||
name = CharField(max_length=128, null=False, help_text="Search name", index=True)
|
||||
description = TextField(null=True, help_text="KB description")
|
||||
created_by = CharField(max_length=32, null=False, index=True)
|
||||
search_config = JSONField(
|
||||
null=False,
|
||||
default={
|
||||
"kb_ids": [],
|
||||
"doc_ids": [],
|
||||
"similarity_threshold": 0.0,
|
||||
"vector_similarity_weight": 0.3,
|
||||
"use_kg": False,
|
||||
# rerank settings
|
||||
"rerank_id": "",
|
||||
"top_k": 1024,
|
||||
# chat settings
|
||||
"summary": False,
|
||||
"chat_id": "",
|
||||
"llm_setting": {
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3,
|
||||
"frequency_penalty": 0.7,
|
||||
"presence_penalty": 0.4,
|
||||
},
|
||||
"chat_settingcross_languages": [],
|
||||
"highlight": False,
|
||||
"keyword": False,
|
||||
"web_search": False,
|
||||
"related_search": False,
|
||||
"query_mindmap": False,
|
||||
},
|
||||
)
|
||||
status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted, 1: validate)", default="1", index=True)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
class Meta:
|
||||
db_table = "search"
|
||||
|
||||
|
||||
def migrate_db():
|
||||
migrator = DatabaseMigrator[settings.DATABASE_TYPE.upper()].value(DB)
|
||||
try:
|
||||
|
||||
@ -84,14 +84,14 @@ def init_superuser():
|
||||
{"role": "user", "content": "Hello!"}], gen_conf={})
|
||||
if msg.find("ERROR: ") == 0:
|
||||
logging.error(
|
||||
"'{}' dosen't work. {}".format(
|
||||
"'{}' doesn't work. {}".format(
|
||||
tenant["llm_id"],
|
||||
msg))
|
||||
embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
|
||||
v, c = embd_mdl.encode(["Hello!"])
|
||||
if c == 0:
|
||||
logging.error(
|
||||
"'{}' dosen't work!".format(
|
||||
"'{}' doesn't work!".format(
|
||||
tenant["embd_id"]))
|
||||
|
||||
|
||||
@ -119,7 +119,7 @@ def init_llm_factory():
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
|
||||
LLMFactoriesService.filter_delete([(LLMFactories.name == "Local") | (LLMFactories.name == "novita.ai")])
|
||||
LLMService.filter_delete([LLM.fid == "Local"])
|
||||
LLMService.filter_delete([LLM.llm_name == "qwen-vl-max"])
|
||||
LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
|
||||
|
||||
@ -13,27 +13,87 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import pathlib
|
||||
import re
|
||||
from pathlib import PurePath
|
||||
|
||||
from .user_service import UserService as UserService
|
||||
|
||||
|
||||
def duplicate_name(query_func, **kwargs):
|
||||
fnm = kwargs["name"]
|
||||
objs = query_func(**kwargs)
|
||||
if not objs:
|
||||
return fnm
|
||||
ext = pathlib.Path(fnm).suffix #.jpg
|
||||
nm = re.sub(r"%s$"%ext, "", fnm)
|
||||
r = re.search(r"\(([0-9]+)\)$", nm)
|
||||
c = 0
|
||||
if r:
|
||||
c = int(r.group(1))
|
||||
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
||||
c += 1
|
||||
nm = f"{nm}({c})"
|
||||
if ext:
|
||||
nm += f"{ext}"
|
||||
def split_name_counter(filename: str) -> tuple[str, int | None]:
|
||||
"""
|
||||
Splits a filename into main part and counter (if present in parentheses).
|
||||
|
||||
kwargs["name"] = nm
|
||||
return duplicate_name(query_func, **kwargs)
|
||||
Args:
|
||||
filename: Input filename string to be parsed
|
||||
|
||||
Returns:
|
||||
A tuple containing:
|
||||
- The main filename part (string)
|
||||
- The counter from parentheses (integer) or None if no counter exists
|
||||
"""
|
||||
pattern = re.compile(r"^(.*?)\((\d+)\)$")
|
||||
|
||||
match = pattern.search(filename)
|
||||
if match:
|
||||
main_part = match.group(1).rstrip()
|
||||
bracket_part = match.group(2)
|
||||
return main_part, int(bracket_part)
|
||||
|
||||
return filename, None
|
||||
|
||||
|
||||
def duplicate_name(query_func, **kwargs) -> str:
|
||||
"""
|
||||
Generates a unique filename by appending/incrementing a counter when duplicates exist.
|
||||
|
||||
Continuously checks for name availability using the provided query function,
|
||||
automatically appending (1), (2), etc. until finding an available name or
|
||||
reaching maximum retries.
|
||||
|
||||
Args:
|
||||
query_func: Callable that accepts keyword arguments and returns:
|
||||
- True if name exists (should be modified)
|
||||
- False if name is available
|
||||
**kwargs: Must contain 'name' key with original filename to check
|
||||
|
||||
Returns:
|
||||
str: Available filename, either:
|
||||
- Original name (if available)
|
||||
- Modified name with counter (e.g., "file(1).txt")
|
||||
|
||||
Raises:
|
||||
KeyError: If 'name' key not provided in kwargs
|
||||
RuntimeError: If unable to generate unique name after maximum retries
|
||||
|
||||
Example:
|
||||
>>> def name_exists(name): return name in existing_files
|
||||
>>> duplicate_name(name_exists, name="document.pdf")
|
||||
'document(1).pdf' # If original exists
|
||||
"""
|
||||
MAX_RETRIES = 1000
|
||||
|
||||
if "name" not in kwargs:
|
||||
raise KeyError("Arguments must contain 'name' key")
|
||||
|
||||
original_name = kwargs["name"]
|
||||
current_name = original_name
|
||||
retries = 0
|
||||
|
||||
while retries < MAX_RETRIES:
|
||||
if not query_func(**kwargs):
|
||||
return current_name
|
||||
|
||||
path = PurePath(current_name)
|
||||
stem = path.stem
|
||||
suffix = path.suffix
|
||||
|
||||
main_part, counter = split_name_counter(stem)
|
||||
counter = counter + 1 if counter else 1
|
||||
|
||||
new_name = f"{main_part}({counter}){suffix}"
|
||||
|
||||
kwargs["name"] = new_name
|
||||
current_name = new_name
|
||||
retries += 1
|
||||
|
||||
raise RuntimeError(f"Failed to generate unique name within {MAX_RETRIES} attempts. Original: {original_name}")
|
||||
|
||||
@ -73,11 +73,11 @@ class UserCanvasService(CommonService):
|
||||
User.nickname,
|
||||
User.avatar.alias('tenant_avatar'),
|
||||
]
|
||||
angents = cls.model.select(*fields) \
|
||||
agents = cls.model.select(*fields) \
|
||||
.join(User, on=(cls.model.user_id == User.id)) \
|
||||
.where(cls.model.id == pid)
|
||||
# obj = cls.model.query(id=pid)[0]
|
||||
return True, angents.dicts()[0]
|
||||
return True, agents.dicts()[0]
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return False, None
|
||||
@ -100,25 +100,25 @@ class UserCanvasService(CommonService):
|
||||
cls.model.update_time
|
||||
]
|
||||
if keywords:
|
||||
angents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||
agents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||
((cls.model.user_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.user_id == user_id)),
|
||||
(fn.LOWER(cls.model.title).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
angents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||
agents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||
((cls.model.user_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.user_id == user_id))
|
||||
)
|
||||
if desc:
|
||||
angents = angents.order_by(cls.model.getter_by(orderby).desc())
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
angents = angents.order_by(cls.model.getter_by(orderby).asc())
|
||||
count = angents.count()
|
||||
angents = angents.paginate(page_number, items_per_page)
|
||||
return list(angents.dicts()), count
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).asc())
|
||||
count = agents.count()
|
||||
agents = agents.paginate(page_number, items_per_page)
|
||||
return list(agents.dicts()), count
|
||||
|
||||
|
||||
def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||
@ -173,6 +173,19 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
kwargs_changed = False
|
||||
if kwargs:
|
||||
query = canvas.get_preset_param()
|
||||
if query:
|
||||
for ele in query:
|
||||
if ele["key"] in kwargs:
|
||||
if ele["value"] != kwargs[ele["key"]]:
|
||||
ele["value"] = kwargs[ele["key"]]
|
||||
kwargs_changed = True
|
||||
if kwargs_changed:
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.update_by_id(session_id, {"dsl": conv.dsl})
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
if stream:
|
||||
try:
|
||||
@ -281,8 +294,22 @@ def completionOpenAI(tenant_id, agent_id, question, session_id=None, stream=True
|
||||
"source": "agent",
|
||||
"dsl": cvs.dsl
|
||||
}
|
||||
canvas.messages.append({"role": "user", "content": question, "id": message_id})
|
||||
canvas.add_user_input(question)
|
||||
|
||||
API4ConversationService.save(**conv)
|
||||
conv = API4Conversation(**conv)
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
conv.message.append({
|
||||
"role": "user",
|
||||
"content": question,
|
||||
"id": message_id
|
||||
})
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
# Handle existing session
|
||||
else:
|
||||
@ -318,7 +345,7 @@ def completionOpenAI(tenant_id, agent_id, question, session_id=None, stream=True
|
||||
if stream:
|
||||
try:
|
||||
completion_tokens = 0
|
||||
for ans in canvas.run(stream=True):
|
||||
for ans in canvas.run(stream=True, bypass_begin=True):
|
||||
if ans.get("running_status"):
|
||||
completion_tokens += len(tiktokenenc.encode(ans.get("content", "")))
|
||||
yield "data: " + json.dumps(
|
||||
@ -381,7 +408,7 @@ def completionOpenAI(tenant_id, agent_id, question, session_id=None, stream=True
|
||||
else: # Non-streaming mode
|
||||
try:
|
||||
all_answer_content = ""
|
||||
for answer in canvas.run(stream=False):
|
||||
for answer in canvas.run(stream=False, bypass_begin=True):
|
||||
if answer.get("running_status"):
|
||||
continue
|
||||
|
||||
|
||||
@ -18,57 +18,57 @@ from datetime import datetime
|
||||
import peewee
|
||||
|
||||
from api.db.db_models import DB
|
||||
from api.utils import datetime_format, current_timestamp, get_uuid
|
||||
from api.utils import current_timestamp, datetime_format, get_uuid
|
||||
|
||||
|
||||
class CommonService:
|
||||
"""Base service class that provides common database operations.
|
||||
|
||||
|
||||
This class serves as a foundation for all service classes in the application,
|
||||
implementing standard CRUD operations and common database query patterns.
|
||||
It uses the Peewee ORM for database interactions and provides a consistent
|
||||
interface for database operations across all derived service classes.
|
||||
|
||||
|
||||
Attributes:
|
||||
model: The Peewee model class that this service operates on. Must be set by subclasses.
|
||||
"""
|
||||
|
||||
model = None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||
"""Execute a database query with optional column selection and ordering.
|
||||
|
||||
|
||||
This method provides a flexible way to query the database with various filters
|
||||
and sorting options. It supports column selection, sort order control, and
|
||||
additional filter conditions.
|
||||
|
||||
|
||||
Args:
|
||||
cols (list, optional): List of column names to select. If None, selects all columns.
|
||||
reverse (bool, optional): If True, sorts in descending order. If False, sorts in ascending order.
|
||||
order_by (str, optional): Column name to sort results by.
|
||||
**kwargs: Additional filter conditions passed as keyword arguments.
|
||||
|
||||
|
||||
Returns:
|
||||
peewee.ModelSelect: A query result containing matching records.
|
||||
"""
|
||||
return cls.model.query(cols=cols, reverse=reverse,
|
||||
order_by=order_by, **kwargs)
|
||||
return cls.model.query(cols=cols, reverse=reverse, order_by=order_by, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all(cls, cols=None, reverse=None, order_by=None):
|
||||
"""Retrieve all records from the database with optional column selection and ordering.
|
||||
|
||||
|
||||
This method fetches all records from the model's table with support for
|
||||
column selection and result ordering. If no order_by is specified and reverse
|
||||
is True, it defaults to ordering by create_time.
|
||||
|
||||
|
||||
Args:
|
||||
cols (list, optional): List of column names to select. If None, selects all columns.
|
||||
reverse (bool, optional): If True, sorts in descending order. If False, sorts in ascending order.
|
||||
order_by (str, optional): Column name to sort results by. Defaults to 'create_time' if reverse is specified.
|
||||
|
||||
|
||||
Returns:
|
||||
peewee.ModelSelect: A query containing all matching records.
|
||||
"""
|
||||
@ -80,27 +80,25 @@ class CommonService:
|
||||
if not order_by or not hasattr(cls, order_by):
|
||||
order_by = "create_time"
|
||||
if reverse is True:
|
||||
query_records = query_records.order_by(
|
||||
cls.model.getter_by(order_by).desc())
|
||||
query_records = query_records.order_by(cls.model.getter_by(order_by).desc())
|
||||
elif reverse is False:
|
||||
query_records = query_records.order_by(
|
||||
cls.model.getter_by(order_by).asc())
|
||||
query_records = query_records.order_by(cls.model.getter_by(order_by).asc())
|
||||
return query_records
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get(cls, **kwargs):
|
||||
"""Get a single record matching the given criteria.
|
||||
|
||||
|
||||
This method retrieves a single record from the database that matches
|
||||
the specified filter conditions.
|
||||
|
||||
|
||||
Args:
|
||||
**kwargs: Filter conditions as keyword arguments.
|
||||
|
||||
|
||||
Returns:
|
||||
Model instance: Single matching record.
|
||||
|
||||
|
||||
Raises:
|
||||
peewee.DoesNotExist: If no matching record is found.
|
||||
"""
|
||||
@ -110,13 +108,13 @@ class CommonService:
|
||||
@DB.connection_context()
|
||||
def get_or_none(cls, **kwargs):
|
||||
"""Get a single record or None if not found.
|
||||
|
||||
|
||||
This method attempts to retrieve a single record matching the given criteria,
|
||||
returning None if no match is found instead of raising an exception.
|
||||
|
||||
|
||||
Args:
|
||||
**kwargs: Filter conditions as keyword arguments.
|
||||
|
||||
|
||||
Returns:
|
||||
Model instance or None: Matching record if found, None otherwise.
|
||||
"""
|
||||
@ -129,13 +127,13 @@ class CommonService:
|
||||
@DB.connection_context()
|
||||
def save(cls, **kwargs):
|
||||
"""Save a new record to database.
|
||||
|
||||
|
||||
This method creates a new record in the database with the provided field values,
|
||||
forcing an insert operation rather than an update.
|
||||
|
||||
|
||||
Args:
|
||||
**kwargs: Record field values as keyword arguments.
|
||||
|
||||
|
||||
Returns:
|
||||
Model instance: The created record object.
|
||||
"""
|
||||
@ -146,13 +144,13 @@ class CommonService:
|
||||
@DB.connection_context()
|
||||
def insert(cls, **kwargs):
|
||||
"""Insert a new record with automatic ID and timestamps.
|
||||
|
||||
|
||||
This method creates a new record with automatically generated ID and timestamp fields.
|
||||
It handles the creation of create_time, create_date, update_time, and update_date fields.
|
||||
|
||||
|
||||
Args:
|
||||
**kwargs: Record field values as keyword arguments.
|
||||
|
||||
|
||||
Returns:
|
||||
Model instance: The newly created record object.
|
||||
"""
|
||||
@ -169,10 +167,10 @@ class CommonService:
|
||||
@DB.connection_context()
|
||||
def insert_many(cls, data_list, batch_size=100):
|
||||
"""Insert multiple records in batches.
|
||||
|
||||
|
||||
This method efficiently inserts multiple records into the database using batch processing.
|
||||
It automatically sets creation timestamps for all records.
|
||||
|
||||
|
||||
Args:
|
||||
data_list (list): List of dictionaries containing record data to insert.
|
||||
batch_size (int, optional): Number of records to insert in each batch. Defaults to 100.
|
||||
@ -182,16 +180,16 @@ class CommonService:
|
||||
d["create_time"] = current_timestamp()
|
||||
d["create_date"] = datetime_format(datetime.now())
|
||||
for i in range(0, len(data_list), batch_size):
|
||||
cls.model.insert_many(data_list[i:i + batch_size]).execute()
|
||||
cls.model.insert_many(data_list[i : i + batch_size]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_many_by_id(cls, data_list):
|
||||
"""Update multiple records by their IDs.
|
||||
|
||||
|
||||
This method updates multiple records in the database, identified by their IDs.
|
||||
It automatically updates the update_time and update_date fields for each record.
|
||||
|
||||
|
||||
Args:
|
||||
data_list (list): List of dictionaries containing record data to update.
|
||||
Each dictionary must include an 'id' field.
|
||||
@ -200,8 +198,7 @@ class CommonService:
|
||||
for data in data_list:
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(data).where(
|
||||
cls.model.id == data["id"]).execute()
|
||||
cls.model.update(data).where(cls.model.id == data["id"]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -257,6 +254,18 @@ class CommonService:
|
||||
# Returns:
|
||||
# Number of records deleted
|
||||
return cls.model.delete().where(cls.model.id == pid).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_ids(cls, pids):
|
||||
# Delete multiple records by their IDs
|
||||
# Args:
|
||||
# pids: List of record IDs
|
||||
# Returns:
|
||||
# Number of records deleted
|
||||
with DB.atomic():
|
||||
res = cls.model.delete().where(cls.model.id.in_(pids)).execute()
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -292,13 +301,12 @@ class CommonService:
|
||||
# List of tuples containing chunks
|
||||
length = len(tar_list)
|
||||
arr = range(length)
|
||||
result = [tuple(tar_list[x:(x + n)]) for x in arr[::n]]
|
||||
result = [tuple(tar_list[x : (x + n)]) for x in arr[::n]]
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_scope_list(cls, in_key, in_filters_list,
|
||||
filters=None, cols=None):
|
||||
def filter_scope_list(cls, in_key, in_filters_list, filters=None, cols=None):
|
||||
# Get records matching IN clause filters with optional column selection
|
||||
# Args:
|
||||
# in_key: Field name for IN clause
|
||||
@ -313,22 +321,12 @@ class CommonService:
|
||||
res_list = []
|
||||
if cols:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select(
|
||||
*
|
||||
cols).where(
|
||||
getattr(
|
||||
cls.model,
|
||||
in_key).in_(i),
|
||||
*
|
||||
filters)
|
||||
query_records = cls.model.select(*cols).where(getattr(cls.model, in_key).in_(i), *filters)
|
||||
if query_records:
|
||||
res_list.extend(
|
||||
[query_record for query_record in query_records])
|
||||
res_list.extend([query_record for query_record in query_records])
|
||||
else:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select().where(
|
||||
getattr(cls.model, in_key).in_(i), *filters)
|
||||
query_records = cls.model.select().where(getattr(cls.model, in_key).in_(i), *filters)
|
||||
if query_records:
|
||||
res_list.extend(
|
||||
[query_record for query_record in query_records])
|
||||
res_list.extend([query_record for query_record in query_records])
|
||||
return res_list
|
||||
|
||||
@ -90,17 +90,18 @@ def completion(tenant_id, chat_id, question, name="New session", session_id=None
|
||||
"user_id": kwargs.get("user_id", "")
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {
|
||||
"answer": conv["message"][0]["content"],
|
||||
"reference": {},
|
||||
"audio_binary": None,
|
||||
"id": None,
|
||||
"session_id": session_id
|
||||
}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
if stream:
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {
|
||||
"answer": conv["message"][0]["content"],
|
||||
"reference": {},
|
||||
"audio_binary": None,
|
||||
"id": None,
|
||||
"session_id": session_id
|
||||
}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
|
||||
conv = ConversationService.query(id=session_id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
@ -123,6 +124,8 @@ def completion(tenant_id, chat_id, question, name="New session", session_id=None
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
|
||||
kb_ids = kwargs.get("kb_ids",[])
|
||||
dia.kb_ids = list(set(dia.kb_ids + kb_ids))
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
|
||||
@ -14,11 +14,11 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import binascii
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from timeit import default_timer as timer
|
||||
|
||||
@ -36,7 +36,7 @@ from api.utils import current_timestamp, datetime_format
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp.search import index_name
|
||||
from rag.prompts import chunks_format, citation_prompt, full_question, kb_prompt, keyword_extraction, llm_id2llm_type, message_fit_in
|
||||
from rag.prompts import chunks_format, citation_prompt, cross_languages, full_question, kb_prompt, keyword_extraction, llm_id2llm_type, message_fit_in
|
||||
from rag.utils import num_tokens_from_string, rmSpace
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
@ -109,6 +109,7 @@ def chat_solo(dialog, messages, stream=True):
|
||||
msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"]
|
||||
if stream:
|
||||
last_ans = ""
|
||||
delta_ans = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans) :]
|
||||
@ -116,6 +117,7 @@ def chat_solo(dialog, messages, stream=True):
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
delta_ans = ""
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
else:
|
||||
@ -125,9 +127,71 @@ def chat_solo(dialog, messages, stream=True):
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
|
||||
|
||||
|
||||
def get_models(dialog):
|
||||
embd_mdl, chat_mdl, rerank_mdl, tts_mdl = None, None, None, None
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embedding_list) > 1:
|
||||
raise Exception("**ERROR**: Knowledge bases use different embedding models.")
|
||||
|
||||
if embedding_list:
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_list[0])
|
||||
if not embd_mdl:
|
||||
raise LookupError("Embedding model(%s) not found" % embedding_list[0])
|
||||
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
|
||||
if dialog.prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
return kbs, embd_mdl, rerank_mdl, chat_mdl, tts_mdl
|
||||
|
||||
|
||||
BAD_CITATION_PATTERNS = [
|
||||
re.compile(r"\(\s*ID\s*[: ]*\s*(\d+)\s*\)"), # (ID: 12)
|
||||
re.compile(r"\[\s*ID\s*[: ]*\s*(\d+)\s*\]"), # [ID: 12]
|
||||
re.compile(r"【\s*ID\s*[: ]*\s*(\d+)\s*】"), # 【ID: 12】
|
||||
re.compile(r"ref\s*(\d+)", flags=re.IGNORECASE), # ref12、REF 12
|
||||
]
|
||||
|
||||
|
||||
def repair_bad_citation_formats(answer: str, kbinfos: dict, idx: set):
|
||||
max_index = len(kbinfos["chunks"])
|
||||
|
||||
def safe_add(i):
|
||||
if 0 <= i < max_index:
|
||||
idx.add(i)
|
||||
return True
|
||||
return False
|
||||
|
||||
def find_and_replace(pattern, group_index=1, repl=lambda i: f"ID:{i}", flags=0):
|
||||
nonlocal answer
|
||||
|
||||
def replacement(match):
|
||||
try:
|
||||
i = int(match.group(group_index))
|
||||
if safe_add(i):
|
||||
return f"[{repl(i)}]"
|
||||
except Exception:
|
||||
pass
|
||||
return match.group(0)
|
||||
|
||||
answer = re.sub(pattern, replacement, answer, flags=flags)
|
||||
|
||||
for pattern in BAD_CITATION_PATTERNS:
|
||||
find_and_replace(pattern)
|
||||
|
||||
return answer, idx
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
if not dialog.kb_ids:
|
||||
if not dialog.kb_ids and not dialog.prompt_config.get("tavily_api_key"):
|
||||
for ans in chat_solo(dialog, messages, stream):
|
||||
yield ans
|
||||
return
|
||||
@ -152,45 +216,19 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
langfuse.trace = langfuse_tracer.trace(name=f"{dialog.name}-{llm_model_config['llm_name']}")
|
||||
|
||||
check_langfuse_tracer_ts = timer()
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embedding_list) != 1:
|
||||
yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
|
||||
embedding_model_name = embedding_list[0]
|
||||
kbs, embd_mdl, rerank_mdl, chat_mdl, tts_mdl = get_models(dialog)
|
||||
toolcall_session, tools = kwargs.get("toolcall_session"), kwargs.get("tools")
|
||||
if toolcall_session and tools:
|
||||
chat_mdl.bind_tools(toolcall_session, tools)
|
||||
bind_models_ts = timer()
|
||||
|
||||
retriever = settings.retrievaler
|
||||
|
||||
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||
if "doc_ids" in messages[-1]:
|
||||
attachments = messages[-1]["doc_ids"]
|
||||
|
||||
create_retriever_ts = timer()
|
||||
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_model_name)
|
||||
if not embd_mdl:
|
||||
raise LookupError("Embedding model(%s) not found" % embedding_model_name)
|
||||
|
||||
bind_embedding_ts = timer()
|
||||
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
toolcall_session, tools = kwargs.get("toolcall_session"), kwargs.get("tools")
|
||||
if toolcall_session and tools:
|
||||
chat_mdl.bind_tools(toolcall_session, tools)
|
||||
|
||||
bind_llm_ts = timer()
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
|
||||
tts_mdl = None
|
||||
if prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
# try to use sql if field mapping is good to go
|
||||
if field_map:
|
||||
logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
|
||||
@ -212,26 +250,21 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
else:
|
||||
questions = questions[-1:]
|
||||
|
||||
if prompt_config.get("cross_languages"):
|
||||
questions = [cross_languages(dialog.tenant_id, dialog.llm_id, questions[0], prompt_config["cross_languages"])]
|
||||
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
|
||||
refine_question_ts = timer()
|
||||
|
||||
rerank_mdl = None
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
|
||||
bind_reranker_ts = timer()
|
||||
generate_keyword_ts = bind_reranker_ts
|
||||
thought = ""
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
knowledges = []
|
||||
else:
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
generate_keyword_ts = timer()
|
||||
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
|
||||
knowledges = []
|
||||
if prompt_config.get("reasoning", False):
|
||||
reasoner = DeepResearcher(
|
||||
@ -247,21 +280,22 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
elif stream:
|
||||
yield think
|
||||
else:
|
||||
kbinfos = retriever.retrieval(
|
||||
" ".join(questions),
|
||||
embd_mdl,
|
||||
tenant_ids,
|
||||
dialog.kb_ids,
|
||||
1,
|
||||
dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k,
|
||||
aggs=False,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(" ".join(questions), kbs),
|
||||
)
|
||||
if embd_mdl:
|
||||
kbinfos = retriever.retrieval(
|
||||
" ".join(questions),
|
||||
embd_mdl,
|
||||
tenant_ids,
|
||||
dialog.kb_ids,
|
||||
1,
|
||||
dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k,
|
||||
aggs=False,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(" ".join(questions), kbs),
|
||||
)
|
||||
if prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(" ".join(questions))
|
||||
@ -298,7 +332,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
gen_conf["max_tokens"] = min(gen_conf["max_tokens"], max_tokens - used_token_count)
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions, langfuse_tracer
|
||||
nonlocal embd_mdl, prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions, langfuse_tracer
|
||||
|
||||
refs = []
|
||||
ans = answer.split("</think>")
|
||||
@ -308,9 +342,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
answer = ans[1]
|
||||
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer = re.sub(r"##[ij]\$\$", "", answer, flags=re.DOTALL)
|
||||
idx = set([])
|
||||
if not re.search(r"##[0-9]+\$\$", answer):
|
||||
if embd_mdl and not re.search(r"\[ID:([0-9]+)\]", answer):
|
||||
answer, idx = retriever.insert_citations(
|
||||
answer,
|
||||
[ck["content_ltks"] for ck in kbinfos["chunks"]],
|
||||
@ -320,20 +353,12 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
vtweight=dialog.vector_similarity_weight,
|
||||
)
|
||||
else:
|
||||
for match in re.finditer(r"##([0-9]+)\$\$", answer):
|
||||
for match in re.finditer(r"\[ID:([0-9]+)\]", answer):
|
||||
i = int(match.group(1))
|
||||
if i < len(kbinfos["chunks"]):
|
||||
idx.add(i)
|
||||
|
||||
# handle (ID: 1), ID: 2 etc.
|
||||
for match in re.finditer(r"\(\s*ID:\s*(\d+)\s*\)|ID[: ]+\s*(\d+)", answer):
|
||||
full_match = match.group(0)
|
||||
id = match.group(1) or match.group(2)
|
||||
if id:
|
||||
i = int(id)
|
||||
if i < len(kbinfos["chunks"]):
|
||||
idx.add(i)
|
||||
answer = answer.replace(full_match, f"##{i}$$")
|
||||
answer, idx = repair_bad_citation_formats(answer, kbinfos, idx)
|
||||
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
@ -353,13 +378,9 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
|
||||
check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
|
||||
check_langfuse_tracer_cost = (check_langfuse_tracer_ts - check_llm_ts) * 1000
|
||||
create_retriever_time_cost = (create_retriever_ts - check_langfuse_tracer_ts) * 1000
|
||||
bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
|
||||
bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
|
||||
refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
|
||||
bind_reranker_time_cost = (bind_reranker_ts - refine_question_ts) * 1000
|
||||
generate_keyword_time_cost = (generate_keyword_ts - bind_reranker_ts) * 1000
|
||||
retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
|
||||
bind_embedding_time_cost = (bind_models_ts - check_langfuse_tracer_ts) * 1000
|
||||
refine_question_time_cost = (refine_question_ts - bind_models_ts) * 1000
|
||||
retrieval_time_cost = (retrieval_ts - refine_question_ts) * 1000
|
||||
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
|
||||
|
||||
tk_num = num_tokens_from_string(think + answer)
|
||||
@ -370,12 +391,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
f" - Total: {total_time_cost:.1f}ms\n"
|
||||
f" - Check LLM: {check_llm_time_cost:.1f}ms\n"
|
||||
f" - Check Langfuse tracer: {check_langfuse_tracer_cost:.1f}ms\n"
|
||||
f" - Create retriever: {create_retriever_time_cost:.1f}ms\n"
|
||||
f" - Bind embedding: {bind_embedding_time_cost:.1f}ms\n"
|
||||
f" - Bind LLM: {bind_llm_time_cost:.1f}ms\n"
|
||||
f" - Multi-turn optimization: {refine_question_time_cost:.1f}ms\n"
|
||||
f" - Bind reranker: {bind_reranker_time_cost:.1f}ms\n"
|
||||
f" - Generate keyword: {generate_keyword_time_cost:.1f}ms\n"
|
||||
f" - Bind models: {bind_embedding_time_cost:.1f}ms\n"
|
||||
f" - Query refinement(LLM): {refine_question_time_cost:.1f}ms\n"
|
||||
f" - Retrieval: {retrieval_time_cost:.1f}ms\n"
|
||||
f" - Generate answer: {generate_result_time_cost:.1f}ms\n\n"
|
||||
"## Token usage:\n"
|
||||
@ -400,7 +417,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt + prompt4citation, msg[1:], gen_conf):
|
||||
if thought:
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans) :]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
@ -436,7 +453,7 @@ Please write the SQL, only SQL, without any other explanations or text.
|
||||
def get_table():
|
||||
nonlocal sys_prompt, user_prompt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {"temperature": 0.06})
|
||||
sql = re.sub(r"<think>.*</think>", "", sql, flags=re.DOTALL)
|
||||
sql = re.sub(r"^.*</think>", "", sql, flags=re.DOTALL)
|
||||
logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||
@ -496,7 +513,7 @@ Please write the SQL, only SQL, without any other explanations or text.
|
||||
|
||||
# compose Markdown table
|
||||
columns = (
|
||||
"|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
"|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
)
|
||||
|
||||
line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + ("|------|" if docid_idx and docid_idx else "")
|
||||
@ -539,7 +556,7 @@ def tts(tts_mdl, text):
|
||||
return binascii.hexlify(bin).decode("utf-8")
|
||||
|
||||
|
||||
def ask(question, kb_ids, tenant_id):
|
||||
def ask(question, kb_ids, tenant_id, chat_llm_name=None):
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
|
||||
@ -547,7 +564,7 @@ def ask(question, kb_ids, tenant_id):
|
||||
retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
|
||||
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embedding_list[0])
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, chat_llm_name)
|
||||
max_tokens = chat_mdl.max_length
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False, rank_feature=label_question(question, kbs))
|
||||
@ -592,4 +609,4 @@ def ask(question, kb_ids, tenant_id):
|
||||
for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
|
||||
answer = ans
|
||||
yield {"answer": answer, "reference": {}}
|
||||
yield decorate_answer(answer)
|
||||
yield decorate_answer(answer)
|
||||
|
||||
@ -27,6 +27,7 @@ import xxhash
|
||||
from peewee import fn
|
||||
|
||||
from api import settings
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
from api.db import FileType, LLMType, ParserType, StatusEnum, TaskStatus, UserTenantRole
|
||||
from api.db.db_models import DB, Document, Knowledgebase, Task, Tenant, UserTenant
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
@ -34,9 +35,10 @@ from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||
from rag.nlp import rag_tokenizer, search
|
||||
from rag.settings import get_svr_queue_name
|
||||
from rag.settings import get_svr_queue_name, SVR_CONSUMER_GROUP_NAME
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.utils.doc_store_conn import OrderByExpr
|
||||
|
||||
|
||||
class DocumentService(CommonService):
|
||||
@ -70,7 +72,7 @@ class DocumentService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_kb_id(cls, kb_id, page_number, items_per_page,
|
||||
orderby, desc, keywords):
|
||||
orderby, desc, keywords, run_status, types):
|
||||
if keywords:
|
||||
docs = cls.model.select().where(
|
||||
(cls.model.kb_id == kb_id),
|
||||
@ -78,16 +80,60 @@ class DocumentService(CommonService):
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||
|
||||
if run_status:
|
||||
docs = docs.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
docs = docs.where(cls.model.type.in_(types))
|
||||
|
||||
count = docs.count()
|
||||
if desc:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
|
||||
if page_number and items_per_page:
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(docs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def count_by_kb_id(cls, kb_id, keywords, run_status, types):
|
||||
if keywords:
|
||||
docs = cls.model.select().where(
|
||||
(cls.model.kb_id == kb_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||
|
||||
if run_status:
|
||||
docs = docs.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
docs = docs.where(cls.model.type.in_(types))
|
||||
|
||||
count = docs.count()
|
||||
|
||||
return count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_total_size_by_kb_id(cls, kb_id, keywords="", run_status=[], types=[]):
|
||||
query = cls.model.select(fn.COALESCE(fn.SUM(cls.model.size), 0)).where(
|
||||
cls.model.kb_id == kb_id
|
||||
)
|
||||
|
||||
if keywords:
|
||||
query = query.where(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
if run_status:
|
||||
query = query.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
query = query.where(cls.model.type.in_(types))
|
||||
|
||||
return int(query.scalar()) or 0
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, doc):
|
||||
@ -102,15 +148,38 @@ class DocumentService(CommonService):
|
||||
def remove_document(cls, doc, tenant_id):
|
||||
cls.clear_chunk_num(doc.id)
|
||||
try:
|
||||
page = 0
|
||||
page_size = 1000
|
||||
all_chunk_ids = []
|
||||
while True:
|
||||
chunks = settings.docStoreConn.search(["img_id"], [], {"doc_id": doc.id}, [], OrderByExpr(),
|
||||
page * page_size, page_size, search.index_name(tenant_id),
|
||||
[doc.kb_id])
|
||||
chunk_ids = settings.docStoreConn.getChunkIds(chunks)
|
||||
if not chunk_ids:
|
||||
break
|
||||
all_chunk_ids.extend(chunk_ids)
|
||||
page += 1
|
||||
for cid in all_chunk_ids:
|
||||
if STORAGE_IMPL.obj_exist(doc.kb_id, cid):
|
||||
STORAGE_IMPL.rm(doc.kb_id, cid)
|
||||
if doc.thumbnail and not doc.thumbnail.startswith(IMG_BASE64_PREFIX):
|
||||
if STORAGE_IMPL.obj_exist(doc.kb_id, doc.thumbnail):
|
||||
STORAGE_IMPL.rm(doc.kb_id, doc.thumbnail)
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "source_id": doc.id},
|
||||
{"remove": {"source_id": doc.id}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]},
|
||||
{"removed_kwd": "Y"},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "must_not": {"exists": "source_id"}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
graph_source = settings.docStoreConn.getFields(
|
||||
settings.docStoreConn.search(["source_id"], [], {"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]}, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [doc.kb_id]), ["source_id"]
|
||||
)
|
||||
if len(graph_source) > 0 and doc.id in list(graph_source.values())[0]["source_id"]:
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "source_id": doc.id},
|
||||
{"remove": {"source_id": doc.id}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]},
|
||||
{"removed_kwd": "Y"},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "must_not": {"exists": "source_id"}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
except Exception:
|
||||
pass
|
||||
return cls.delete_by_id(doc.id)
|
||||
@ -327,6 +396,15 @@ class DocumentService(CommonService):
|
||||
if not doc_id:
|
||||
return
|
||||
return doc_id[0]["id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_ids_by_doc_names(cls, doc_names):
|
||||
if not doc_names:
|
||||
return []
|
||||
|
||||
query = cls.model.select(cls.model.id).where(cls.model.name.in_(doc_names))
|
||||
return list(query.scalars().iterator())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -406,7 +484,8 @@ class DocumentService(CommonService):
|
||||
if t.progress == -1:
|
||||
bad += 1
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
msg.append(t.progress_msg)
|
||||
if t.progress_msg.strip():
|
||||
msg.append(t.progress_msg)
|
||||
if t.task_type == "raptor":
|
||||
has_raptor = True
|
||||
elif t.task_type == "graphrag":
|
||||
@ -436,6 +515,8 @@ class DocumentService(CommonService):
|
||||
info["progress"] = prg
|
||||
if msg:
|
||||
info["progress_msg"] = msg
|
||||
else:
|
||||
info["progress_msg"] = "%d tasks are ahead in the queue..."%get_queue_length(priority)
|
||||
cls.update_by_id(d["id"], info)
|
||||
except Exception as e:
|
||||
if str(e).find("'0'") < 0:
|
||||
@ -484,6 +565,11 @@ def queue_raptor_o_graphrag_tasks(doc, ty, priority):
|
||||
assert REDIS_CONN.queue_product(get_svr_queue_name(priority), message=task), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
def get_queue_length(priority):
|
||||
group_info = REDIS_CONN.queue_info(get_svr_queue_name(priority), SVR_CONSUMER_GROUP_NAME)
|
||||
return int(group_info.get("lag", 0))
|
||||
|
||||
|
||||
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.conversation_service import ConversationService
|
||||
|
||||
@ -14,22 +14,22 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from flask_login import current_user
|
||||
from peewee import fn
|
||||
|
||||
from api.db import FileType, KNOWLEDGEBASE_FOLDER_NAME, FileSource, ParserType
|
||||
from api.db.db_models import DB, File2Document, Knowledgebase
|
||||
from api.db.db_models import File, Document
|
||||
from api.constants import FILE_NAME_LEN_LIMIT
|
||||
from api.db import KNOWLEDGEBASE_FOLDER_NAME, FileSource, FileType, ParserType
|
||||
from api.db.db_models import DB, Document, File, File2Document, Knowledgebase
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.file_utils import filename_type, thumbnail_img
|
||||
from api.utils.file_utils import filename_type, read_potential_broken_pdf, thumbnail_img
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
@ -39,8 +39,7 @@ class FileService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_pf_id(cls, tenant_id, pf_id, page_number, items_per_page,
|
||||
orderby, desc, keywords):
|
||||
def get_by_pf_id(cls, tenant_id, pf_id, page_number, items_per_page, orderby, desc, keywords):
|
||||
# Get files by parent folder ID with pagination and filtering
|
||||
# Args:
|
||||
# tenant_id: ID of the tenant
|
||||
@ -53,17 +52,9 @@ class FileService(CommonService):
|
||||
# Returns:
|
||||
# Tuple of (file_list, total_count)
|
||||
if keywords:
|
||||
files = cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == pf_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower())),
|
||||
~(cls.model.id == pf_id)
|
||||
)
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == pf_id), (fn.LOWER(cls.model.name).contains(keywords.lower())), ~(cls.model.id == pf_id))
|
||||
else:
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == pf_id),
|
||||
~(cls.model.id == pf_id)
|
||||
)
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == pf_id), ~(cls.model.id == pf_id))
|
||||
count = files.count()
|
||||
if desc:
|
||||
files = files.order_by(cls.model.getter_by(orderby).desc())
|
||||
@ -76,16 +67,20 @@ class FileService(CommonService):
|
||||
for file in res_files:
|
||||
if file["type"] == FileType.FOLDER.value:
|
||||
file["size"] = cls.get_folder_size(file["id"])
|
||||
file['kbs_info'] = []
|
||||
children = list(cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == file["id"]),
|
||||
~(cls.model.id == file["id"]),
|
||||
).dicts())
|
||||
file["has_child_folder"] = any(value["type"] == FileType.FOLDER.value for value in children)
|
||||
file["kbs_info"] = []
|
||||
children = list(
|
||||
cls.model.select()
|
||||
.where(
|
||||
(cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == file["id"]),
|
||||
~(cls.model.id == file["id"]),
|
||||
)
|
||||
.dicts()
|
||||
)
|
||||
file["has_child_folder"] = any(value["type"] == FileType.FOLDER.value for value in children)
|
||||
continue
|
||||
kbs_info = cls.get_kb_id_by_file_id(file['id'])
|
||||
file['kbs_info'] = kbs_info
|
||||
kbs_info = cls.get_kb_id_by_file_id(file["id"])
|
||||
file["kbs_info"] = kbs_info
|
||||
|
||||
return res_files, count
|
||||
|
||||
@ -97,16 +92,18 @@ class FileService(CommonService):
|
||||
# file_id: File ID
|
||||
# Returns:
|
||||
# List of dictionaries containing knowledge base IDs and names
|
||||
kbs = (cls.model.select(*[Knowledgebase.id, Knowledgebase.name])
|
||||
.join(File2Document, on=(File2Document.file_id == file_id))
|
||||
.join(Document, on=(File2Document.document_id == Document.id))
|
||||
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
||||
.where(cls.model.id == file_id))
|
||||
kbs = (
|
||||
cls.model.select(*[Knowledgebase.id, Knowledgebase.name])
|
||||
.join(File2Document, on=(File2Document.file_id == file_id))
|
||||
.join(Document, on=(File2Document.document_id == Document.id))
|
||||
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
||||
.where(cls.model.id == file_id)
|
||||
)
|
||||
if not kbs:
|
||||
return []
|
||||
kbs_info_list = []
|
||||
for kb in list(kbs.dicts()):
|
||||
kbs_info_list.append({"kb_id": kb['id'], "kb_name": kb['name']})
|
||||
kbs_info_list.append({"kb_id": kb["id"], "kb_name": kb["name"]})
|
||||
return kbs_info_list
|
||||
|
||||
@classmethod
|
||||
@ -178,16 +175,9 @@ class FileService(CommonService):
|
||||
if count > len(name) - 2:
|
||||
return file
|
||||
else:
|
||||
file = cls.insert({
|
||||
"id": get_uuid(),
|
||||
"parent_id": parent_id,
|
||||
"tenant_id": current_user.id,
|
||||
"created_by": current_user.id,
|
||||
"name": name[count],
|
||||
"location": "",
|
||||
"size": 0,
|
||||
"type": FileType.FOLDER.value
|
||||
})
|
||||
file = cls.insert(
|
||||
{"id": get_uuid(), "parent_id": parent_id, "tenant_id": current_user.id, "created_by": current_user.id, "name": name[count], "location": "", "size": 0, "type": FileType.FOLDER.value}
|
||||
)
|
||||
return cls.create_folder(file, file.id, name, count + 1)
|
||||
|
||||
@classmethod
|
||||
@ -212,9 +202,7 @@ class FileService(CommonService):
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# Root folder dictionary
|
||||
for file in cls.model.select().where((cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == cls.model.id)
|
||||
):
|
||||
for file in cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||
return file.to_dict()
|
||||
|
||||
file_id = get_uuid()
|
||||
@ -239,11 +227,8 @@ class FileService(CommonService):
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# Knowledge base folder dictionary
|
||||
for root in cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||
for folder in cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id), (cls.model.parent_id == root.id),
|
||||
(cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)):
|
||||
for root in cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||
for folder in cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == root.id), (cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)):
|
||||
return folder.to_dict()
|
||||
assert False, "Can't find the KB folder. Database init error."
|
||||
|
||||
@ -271,7 +256,7 @@ class FileService(CommonService):
|
||||
"type": ty,
|
||||
"size": size,
|
||||
"location": location,
|
||||
"source_type": FileSource.KNOWLEDGEBASE
|
||||
"source_type": FileSource.KNOWLEDGEBASE,
|
||||
}
|
||||
cls.save(**file)
|
||||
return file
|
||||
@ -283,12 +268,11 @@ class FileService(CommonService):
|
||||
# Args:
|
||||
# root_id: Root folder ID
|
||||
# tenant_id: Tenant ID
|
||||
for _ in cls.model.select().where((cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)\
|
||||
& (cls.model.parent_id == root_id)):
|
||||
for _ in cls.model.select().where((cls.model.name == KNOWLEDGEBASE_FOLDER_NAME) & (cls.model.parent_id == root_id)):
|
||||
return
|
||||
folder = cls.new_a_file_from_kb(tenant_id, KNOWLEDGEBASE_FOLDER_NAME, root_id)
|
||||
|
||||
for kb in Knowledgebase.select(*[Knowledgebase.id, Knowledgebase.name]).where(Knowledgebase.tenant_id==tenant_id):
|
||||
for kb in Knowledgebase.select(*[Knowledgebase.id, Knowledgebase.name]).where(Knowledgebase.tenant_id == tenant_id):
|
||||
kb_folder = cls.new_a_file_from_kb(tenant_id, kb.name, folder["id"])
|
||||
for doc in DocumentService.query(kb_id=kb.id):
|
||||
FileService.add_file_from_kb(doc.to_dict(), kb_folder["id"], tenant_id)
|
||||
@ -357,12 +341,10 @@ class FileService(CommonService):
|
||||
@DB.connection_context()
|
||||
def delete_folder_by_pf_id(cls, user_id, folder_id):
|
||||
try:
|
||||
files = cls.model.select().where((cls.model.tenant_id == user_id)
|
||||
& (cls.model.parent_id == folder_id))
|
||||
files = cls.model.select().where((cls.model.tenant_id == user_id) & (cls.model.parent_id == folder_id))
|
||||
for file in files:
|
||||
cls.delete_folder_by_pf_id(user_id, file.id)
|
||||
return cls.model.delete().where((cls.model.tenant_id == user_id)
|
||||
& (cls.model.id == folder_id)).execute(),
|
||||
return (cls.model.delete().where((cls.model.tenant_id == user_id) & (cls.model.id == folder_id)).execute(),)
|
||||
except Exception:
|
||||
logging.exception("delete_folder_by_pf_id")
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
@ -380,8 +362,7 @@ class FileService(CommonService):
|
||||
|
||||
def dfs(parent_id):
|
||||
nonlocal size
|
||||
for f in cls.model.select(*[cls.model.id, cls.model.size, cls.model.type]).where(
|
||||
cls.model.parent_id == parent_id, cls.model.id != parent_id):
|
||||
for f in cls.model.select(*[cls.model.id, cls.model.size, cls.model.type]).where(cls.model.parent_id == parent_id, cls.model.id != parent_id):
|
||||
size += f.size
|
||||
if f.type == FileType.FOLDER.value:
|
||||
dfs(f.id)
|
||||
@ -403,16 +384,16 @@ class FileService(CommonService):
|
||||
"type": doc["type"],
|
||||
"size": doc["size"],
|
||||
"location": doc["location"],
|
||||
"source_type": FileSource.KNOWLEDGEBASE
|
||||
"source_type": FileSource.KNOWLEDGEBASE,
|
||||
}
|
||||
cls.save(**file)
|
||||
File2DocumentService.save(**{"id": get_uuid(), "file_id": file["id"], "document_id": doc["id"]})
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def move_file(cls, file_ids, folder_id):
|
||||
try:
|
||||
cls.filter_update((cls.model.id << file_ids, ), { 'parent_id': folder_id })
|
||||
cls.filter_update((cls.model.id << file_ids,), {"parent_id": folder_id})
|
||||
except Exception:
|
||||
logging.exception("move_file")
|
||||
raise RuntimeError("Database error (File move)!")
|
||||
@ -429,16 +410,13 @@ class FileService(CommonService):
|
||||
err, files = [], []
|
||||
for file in file_objs:
|
||||
try:
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get("MAX_FILE_NUM_PER_USER", 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
raise RuntimeError("Exceed the maximum file number of a free user!")
|
||||
if len(file.filename) >= 128:
|
||||
raise RuntimeError("Exceed the maximum length of file name!")
|
||||
if len(file.filename.encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
raise RuntimeError(f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=kb.id)
|
||||
filename = duplicate_name(DocumentService.query, name=file.filename, kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
@ -446,15 +424,18 @@ class FileService(CommonService):
|
||||
location = filename
|
||||
while STORAGE_IMPL.obj_exist(kb.id, location):
|
||||
location += "_"
|
||||
|
||||
blob = file.read()
|
||||
if filetype == FileType.PDF.value:
|
||||
blob = read_potential_broken_pdf(blob)
|
||||
STORAGE_IMPL.put(kb.id, location, blob)
|
||||
|
||||
doc_id = get_uuid()
|
||||
|
||||
img = thumbnail_img(filename, blob)
|
||||
thumbnail_location = ''
|
||||
thumbnail_location = ""
|
||||
if img is not None:
|
||||
thumbnail_location = f'thumbnail_{doc_id}.png'
|
||||
thumbnail_location = f"thumbnail_{doc_id}.png"
|
||||
STORAGE_IMPL.put(kb.id, thumbnail_location, img)
|
||||
|
||||
doc = {
|
||||
@ -467,7 +448,7 @@ class FileService(CommonService):
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail_location
|
||||
"thumbnail": thumbnail_location,
|
||||
}
|
||||
DocumentService.insert(doc)
|
||||
|
||||
@ -480,29 +461,17 @@ class FileService(CommonService):
|
||||
|
||||
@staticmethod
|
||||
def parse_docs(file_objs, user_id):
|
||||
from rag.app import presentation, picture, naive, audio, email
|
||||
from rag.app import audio, email, naive, picture, presentation
|
||||
|
||||
def dummy(prog=None, msg=""):
|
||||
pass
|
||||
|
||||
FACTORY = {
|
||||
ParserType.PRESENTATION.value: presentation,
|
||||
ParserType.PICTURE.value: picture,
|
||||
ParserType.AUDIO.value: audio,
|
||||
ParserType.EMAIL.value: email
|
||||
}
|
||||
FACTORY = {ParserType.PRESENTATION.value: presentation, ParserType.PICTURE.value: picture, ParserType.AUDIO.value: audio, ParserType.EMAIL.value: email}
|
||||
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": "Plain Text"}
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
for file in file_objs:
|
||||
kwargs = {
|
||||
"lang": "English",
|
||||
"callback": dummy,
|
||||
"parser_config": parser_config,
|
||||
"from_page": 0,
|
||||
"to_page": 100000,
|
||||
"tenant_id": user_id
|
||||
}
|
||||
kwargs = {"lang": "English", "callback": dummy, "parser_config": parser_config, "from_page": 0, "to_page": 100000, "tenant_id": user_id}
|
||||
filetype = filename_type(file.filename)
|
||||
blob = file.read()
|
||||
threads.append(exe.submit(FACTORY.get(FileService.get_parser(filetype, file.filename, ""), naive).chunk, file.filename, blob, **kwargs))
|
||||
@ -523,4 +492,4 @@ class FileService(CommonService):
|
||||
return ParserType.PRESENTATION.value
|
||||
if re.search(r"\.(eml)$", filename):
|
||||
return ParserType.EMAIL.value
|
||||
return default
|
||||
return default
|
||||
|
||||
@ -97,7 +97,7 @@ class KnowledgebaseService(CommonService):
|
||||
kb = kbs[0]
|
||||
|
||||
# Get all documents in the knowledge base
|
||||
docs, _ = DocumentService.get_by_kb_id(kb_id, 1, 1000, "create_time", True, "")
|
||||
docs, _ = DocumentService.get_by_kb_id(kb_id, 1, 1000, "create_time", True, "", [], [])
|
||||
|
||||
# Check parsing status of each document
|
||||
for doc in docs:
|
||||
@ -226,7 +226,10 @@ class KnowledgebaseService(CommonService):
|
||||
cls.model.chunk_num,
|
||||
cls.model.parser_id,
|
||||
cls.model.parser_config,
|
||||
cls.model.pagerank]
|
||||
cls.model.pagerank,
|
||||
cls.model.create_time,
|
||||
cls.model.update_time
|
||||
]
|
||||
kbs = cls.model.select(*fields).join(Tenant, on=(
|
||||
(Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value))).where(
|
||||
(cls.model.id == kb_id),
|
||||
@ -266,6 +269,16 @@ class KnowledgebaseService(CommonService):
|
||||
dfs_update(m.parser_config, config)
|
||||
cls.update_by_id(id, {"parser_config": m.parser_config})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_field_map(cls, id):
|
||||
e, m = cls.get_by_id(id)
|
||||
if not e:
|
||||
raise LookupError(f"knowledgebase({id}) not found.")
|
||||
|
||||
m.parser_config.pop("field_map", None)
|
||||
cls.update_by_id(id, {"parser_config": m.parser_config})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_field_map(cls, ids):
|
||||
|
||||
@ -100,9 +100,13 @@ class TenantLLMService(CommonService):
|
||||
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
if not model_config: # for some cases seems fid mismatch
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
if model_config:
|
||||
model_config = model_config.to_dict()
|
||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||
if not llm and fid: # for some cases seems fid mismatch
|
||||
llm = LLMService.query(llm_name=mdlnm)
|
||||
if llm:
|
||||
model_config["is_tools"] = llm[0].is_tools
|
||||
if not model_config:
|
||||
@ -159,19 +163,13 @@ class TenantLLMService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
|
||||
try:
|
||||
if not DB.is_connection_usable():
|
||||
DB.connect()
|
||||
except Exception:
|
||||
DB.close()
|
||||
DB.connect()
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
logging.error(f"Tenant not found: {tenant_id}")
|
||||
return 0
|
||||
|
||||
llm_map = {
|
||||
LLMType.EMBEDDING.value: tenant.embd_id,
|
||||
LLMType.EMBEDDING.value: tenant.embd_id if not llm_name else llm_name,
|
||||
LLMType.SPEECH2TEXT.value: tenant.asr_id,
|
||||
LLMType.IMAGE2TEXT.value: tenant.img2txt_id,
|
||||
LLMType.CHAT.value: tenant.llm_id if not llm_name else llm_name,
|
||||
@ -228,6 +226,7 @@ class LLMBundle:
|
||||
|
||||
def bind_tools(self, toolcall_session, tools):
|
||||
if not self.is_tools:
|
||||
logging.warning(f"Model {self.llm_name} does not support tool call, but you have assigned one or more tools to it!")
|
||||
return
|
||||
self.mdl.bind_tools(toolcall_session, tools)
|
||||
|
||||
@ -236,7 +235,8 @@ class LLMBundle:
|
||||
generation = self.trace.generation(name="encode", model=self.llm_name, input={"texts": texts})
|
||||
|
||||
embeddings, used_tokens = self.mdl.encode(texts)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
llm_name = getattr(self, "llm_name", None)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens, llm_name):
|
||||
logging.error("LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
@ -249,7 +249,8 @@ class LLMBundle:
|
||||
generation = self.trace.generation(name="encode_queries", model=self.llm_name, input={"query": query})
|
||||
|
||||
emd, used_tokens = self.mdl.encode_queries(query)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
llm_name = getattr(self, "llm_name", None)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens, llm_name):
|
||||
logging.error("LLMBundle.encode_queries can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
@ -362,7 +363,7 @@ class LLMBundle:
|
||||
|
||||
ans = ""
|
||||
chat_streamly = self.mdl.chat_streamly
|
||||
total_tokens = 0
|
||||
total_tokens = 0
|
||||
if self.is_tools and self.mdl.is_tools:
|
||||
chat_streamly = self.mdl.chat_streamly_with_tools
|
||||
|
||||
|
||||
110
api/db/services/search_service.py
Normal file
110
api/db/services/search_service.py
Normal file
@ -0,0 +1,110 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
from peewee import fn
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import DB, Search, User
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class SearchService(CommonService):
|
||||
model = Search
|
||||
|
||||
@classmethod
|
||||
def save(cls, **kwargs):
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
kwargs["create_date"] = datetime_format(datetime.now())
|
||||
kwargs["update_time"] = current_timestamp()
|
||||
kwargs["update_date"] = datetime_format(datetime.now())
|
||||
obj = cls.model.create(**kwargs)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible4deletion(cls, search_id, user_id) -> bool:
|
||||
search = (
|
||||
cls.model.select(cls.model.id)
|
||||
.where(
|
||||
cls.model.id == search_id,
|
||||
cls.model.created_by == user_id,
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
return search is not None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_detail(cls, search_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.tenant_id,
|
||||
cls.model.name,
|
||||
cls.model.description,
|
||||
cls.model.created_by,
|
||||
cls.model.search_config,
|
||||
cls.model.update_time,
|
||||
User.nickname,
|
||||
User.avatar.alias("tenant_avatar"),
|
||||
]
|
||||
search = (
|
||||
cls.model.select(*fields)
|
||||
.join(User, on=((User.id == cls.model.tenant_id) & (User.status == StatusEnum.VALID.value)))
|
||||
.where((cls.model.id == search_id) & (cls.model.status == StatusEnum.VALID.value))
|
||||
.first()
|
||||
.to_dict()
|
||||
)
|
||||
return search
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id, page_number, items_per_page, orderby, desc, keywords):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.tenant_id,
|
||||
cls.model.name,
|
||||
cls.model.description,
|
||||
cls.model.created_by,
|
||||
cls.model.status,
|
||||
cls.model.update_time,
|
||||
cls.model.create_time,
|
||||
User.nickname,
|
||||
User.avatar.alias("tenant_avatar"),
|
||||
]
|
||||
query = (
|
||||
cls.model.select(*fields)
|
||||
.join(User, on=(cls.model.tenant_id == User.id))
|
||||
.where(((cls.model.tenant_id.in_(joined_tenant_ids)) | (cls.model.tenant_id == user_id)) & (cls.model.status == StatusEnum.VALID.value))
|
||||
)
|
||||
|
||||
if keywords:
|
||||
query = query.where(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
if desc:
|
||||
query = query.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
query = query.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
count = query.count()
|
||||
|
||||
if page_number and items_per_page:
|
||||
query = query.paginate(page_number, items_per_page)
|
||||
|
||||
return list(query.dicts()), count
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import xxhash
|
||||
@ -256,36 +257,55 @@ class TaskService(CommonService):
|
||||
@DB.connection_context()
|
||||
def update_progress(cls, id, info):
|
||||
"""Update the progress information for a task.
|
||||
|
||||
|
||||
This method updates both the progress message and completion percentage of a task.
|
||||
It handles platform-specific behavior (macOS vs others) and uses database locking
|
||||
when necessary to ensure thread safety.
|
||||
|
||||
|
||||
Update Rules:
|
||||
- progress_msg: Always appends the new message to the existing one, and trims the result to max 3000 lines.
|
||||
- progress: Only updates if the current progress is not -1 AND
|
||||
(the new progress is -1 OR greater than the existing progress),
|
||||
to avoid overwriting valid progress with invalid or regressive values.
|
||||
|
||||
Args:
|
||||
id (str): The unique identifier of the task to update.
|
||||
info (dict): Dictionary containing progress information with keys:
|
||||
- progress_msg (str, optional): Progress message to append
|
||||
- progress (float, optional): Progress percentage (0.0 to 1.0)
|
||||
"""
|
||||
task = cls.model.get_by_id(id)
|
||||
if not task:
|
||||
logging.warning("Update_progress error: task not found")
|
||||
return
|
||||
|
||||
if os.environ.get("MACOS"):
|
||||
if info["progress_msg"]:
|
||||
task = cls.model.get_by_id(id)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
cls.model.id == id
|
||||
prog = info["progress"]
|
||||
cls.model.update(progress=prog).where(
|
||||
(cls.model.id == id) &
|
||||
(
|
||||
(cls.model.progress != -1) &
|
||||
((prog == -1) | (prog > cls.model.progress))
|
||||
)
|
||||
).execute()
|
||||
return
|
||||
|
||||
with DB.lock("update_progress", -1):
|
||||
if info["progress_msg"]:
|
||||
task = cls.model.get_by_id(id)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
cls.model.id == id
|
||||
prog = info["progress"]
|
||||
cls.model.update(progress=prog).where(
|
||||
(cls.model.id == id) &
|
||||
(
|
||||
(cls.model.progress != -1) &
|
||||
((prog == -1) | (prog > cls.model.progress))
|
||||
)
|
||||
).execute()
|
||||
|
||||
|
||||
|
||||
@ -31,8 +31,11 @@ class UserCanvasVersionService(CommonService):
|
||||
try:
|
||||
user_canvas_version = cls.model.select().where(cls.model.user_canvas_id == user_canvas_id).order_by(cls.model.create_time.desc())
|
||||
if user_canvas_version.count() > 20:
|
||||
delete_ids = []
|
||||
for i in range(20, user_canvas_version.count()):
|
||||
cls.delete(user_canvas_version[i].id)
|
||||
delete_ids.append(user_canvas_version[i].id)
|
||||
|
||||
cls.delete_by_ids(delete_ids)
|
||||
return True
|
||||
except DoesNotExist:
|
||||
return None
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
#
|
||||
import hashlib
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
import peewee
|
||||
from werkzeug.security import generate_password_hash, check_password_hash
|
||||
@ -39,6 +40,30 @@ class UserService(CommonService):
|
||||
"""
|
||||
model = User
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||
if 'access_token' in kwargs:
|
||||
access_token = kwargs['access_token']
|
||||
|
||||
# Reject empty, None, or whitespace-only access tokens
|
||||
if not access_token or not str(access_token).strip():
|
||||
logging.warning("UserService.query: Rejecting empty access_token query")
|
||||
return cls.model.select().where(cls.model.id == "INVALID_EMPTY_TOKEN") # Returns empty result
|
||||
|
||||
# Reject tokens that are too short (should be UUID, 32+ chars)
|
||||
if len(str(access_token).strip()) < 32:
|
||||
logging.warning(f"UserService.query: Rejecting short access_token query: {len(str(access_token))} chars")
|
||||
return cls.model.select().where(cls.model.id == "INVALID_SHORT_TOKEN") # Returns empty result
|
||||
|
||||
# Reject tokens that start with "INVALID_" (from logout)
|
||||
if str(access_token).startswith("INVALID_"):
|
||||
logging.warning("UserService.query: Rejecting invalidated access_token")
|
||||
return cls.model.select().where(cls.model.id == "INVALID_LOGOUT_TOKEN") # Returns empty result
|
||||
|
||||
# Call parent query method for valid requests
|
||||
return super().query(cols=cols, reverse=reverse, order_by=order_by, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_id(cls, user_id):
|
||||
|
||||
@ -18,8 +18,9 @@
|
||||
# from beartype.claw import beartype_all # <-- you didn't sign up for this
|
||||
# beartype_all(conf=BeartypeConf(violation_type=UserWarning)) # <-- emit warnings from all code
|
||||
|
||||
from api.utils.log_utils import initRootLogger
|
||||
initRootLogger("ragflow_server")
|
||||
from api.utils.log_utils import init_root_logger
|
||||
from plugin import GlobalPluginManager
|
||||
init_root_logger("ragflow_server")
|
||||
|
||||
import logging
|
||||
import os
|
||||
@ -27,7 +28,6 @@ import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
@ -119,11 +119,21 @@ if __name__ == '__main__':
|
||||
RuntimeConfig.init_env()
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||
|
||||
GlobalPluginManager.load_plugins()
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
thread = ThreadPoolExecutor(max_workers=1)
|
||||
thread.submit(update_progress)
|
||||
def delayed_start_update_progress():
|
||||
logging.info("Starting update_progress thread (delayed)")
|
||||
t = threading.Thread(target=update_progress, daemon=True)
|
||||
t.start()
|
||||
|
||||
if RuntimeConfig.DEBUG:
|
||||
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
|
||||
threading.Timer(1.0, delayed_start_update_progress).start()
|
||||
else:
|
||||
threading.Timer(1.0, delayed_start_update_progress).start()
|
||||
|
||||
# start http server
|
||||
try:
|
||||
|
||||
@ -13,21 +13,23 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
from datetime import date
|
||||
from enum import IntEnum, Enum
|
||||
import json
|
||||
import rag.utils.es_conn
|
||||
import rag.utils.infinity_conn
|
||||
import os
|
||||
import secrets
|
||||
from datetime import date
|
||||
from enum import Enum, IntEnum
|
||||
|
||||
import rag.utils
|
||||
from rag.nlp import search
|
||||
from graphrag import search as kg_search
|
||||
from api.utils import get_base_config, decrypt_database_config
|
||||
import rag.utils.es_conn
|
||||
import rag.utils.infinity_conn
|
||||
import rag.utils.opensearch_coon
|
||||
from api.constants import RAG_FLOW_SERVICE_NAME
|
||||
from api.utils import decrypt_database_config, get_base_config
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from graphrag import search as kg_search
|
||||
from rag.nlp import search
|
||||
|
||||
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
||||
LIGHTEN = int(os.environ.get("LIGHTEN", "0"))
|
||||
|
||||
LLM = None
|
||||
LLM_FACTORY = None
|
||||
@ -44,7 +46,7 @@ HOST_PORT = None
|
||||
SECRET_KEY = None
|
||||
FACTORY_LLM_INFOS = None
|
||||
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
|
||||
# authentication
|
||||
@ -55,7 +57,7 @@ CLIENT_AUTHENTICATION = None
|
||||
HTTP_APP_KEY = None
|
||||
GITHUB_OAUTH = None
|
||||
FEISHU_OAUTH = None
|
||||
|
||||
OAUTH_CONFIG = None
|
||||
DOC_ENGINE = None
|
||||
docStoreConn = None
|
||||
|
||||
@ -66,20 +68,46 @@ kg_retrievaler = None
|
||||
REGISTER_ENABLED = 1
|
||||
|
||||
|
||||
# sandbox-executor-manager
|
||||
SANDBOX_ENABLED = 0
|
||||
SANDBOX_HOST = None
|
||||
|
||||
BUILTIN_EMBEDDING_MODELS = ["BAAI/bge-large-zh-v1.5@BAAI", "maidalun1020/bce-embedding-base_v1@Youdao"]
|
||||
|
||||
def get_or_create_secret_key():
|
||||
secret_key = os.environ.get("RAGFLOW_SECRET_KEY")
|
||||
if secret_key and len(secret_key) >= 32:
|
||||
return secret_key
|
||||
|
||||
# Check if there's a configured secret key
|
||||
configured_key = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("secret_key")
|
||||
if configured_key and configured_key != str(date.today()) and len(configured_key) >= 32:
|
||||
return configured_key
|
||||
|
||||
# Generate a new secure key and warn about it
|
||||
import logging
|
||||
new_key = secrets.token_hex(32)
|
||||
logging.warning(
|
||||
"SECURITY WARNING: Using auto-generated SECRET_KEY. "
|
||||
f"Generated key: {new_key}"
|
||||
)
|
||||
return new_key
|
||||
|
||||
|
||||
def init_settings():
|
||||
global LLM, LLM_FACTORY, LLM_BASE_URL, LIGHTEN, DATABASE_TYPE, DATABASE, FACTORY_LLM_INFOS, REGISTER_ENABLED
|
||||
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||
LIGHTEN = int(os.environ.get("LIGHTEN", "0"))
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
LLM = get_base_config("user_default_llm", {})
|
||||
LLM_DEFAULT_MODELS = LLM.get("default_models", {})
|
||||
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
|
||||
LLM_FACTORY = LLM.get("factory")
|
||||
LLM_BASE_URL = LLM.get("base_url")
|
||||
try:
|
||||
REGISTER_ENABLED = int(os.environ.get("REGISTER_ENABLED", "1"))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
pass
|
||||
|
||||
try:
|
||||
with open(os.path.join(get_project_base_directory(), "conf", "llm_factories.json"), "r") as f:
|
||||
FACTORY_LLM_INFOS = json.load(f)["factory_llm_infos"]
|
||||
@ -88,7 +116,7 @@ def init_settings():
|
||||
|
||||
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
|
||||
if not LIGHTEN:
|
||||
EMBEDDING_MDL = "BAAI/bge-large-zh-v1.5@BAAI"
|
||||
EMBEDDING_MDL = BUILTIN_EMBEDDING_MODELS[0]
|
||||
|
||||
if LLM_DEFAULT_MODELS:
|
||||
CHAT_MDL = LLM_DEFAULT_MODELS.get("chat_model", CHAT_MDL)
|
||||
@ -102,47 +130,51 @@ def init_settings():
|
||||
EMBEDDING_MDL = EMBEDDING_MDL + (f"@{LLM_FACTORY}" if "@" not in EMBEDDING_MDL and EMBEDDING_MDL != "" else "")
|
||||
RERANK_MDL = RERANK_MDL + (f"@{LLM_FACTORY}" if "@" not in RERANK_MDL and RERANK_MDL != "" else "")
|
||||
ASR_MDL = ASR_MDL + (f"@{LLM_FACTORY}" if "@" not in ASR_MDL and ASR_MDL != "" else "")
|
||||
IMAGE2TEXT_MDL = IMAGE2TEXT_MDL + (
|
||||
f"@{LLM_FACTORY}" if "@" not in IMAGE2TEXT_MDL and IMAGE2TEXT_MDL != "" else "")
|
||||
IMAGE2TEXT_MDL = IMAGE2TEXT_MDL + (f"@{LLM_FACTORY}" if "@" not in IMAGE2TEXT_MDL and IMAGE2TEXT_MDL != "" else "")
|
||||
|
||||
global API_KEY, PARSERS, HOST_IP, HOST_PORT, SECRET_KEY
|
||||
API_KEY = LLM.get("api_key", "")
|
||||
API_KEY = LLM.get("api_key")
|
||||
PARSERS = LLM.get(
|
||||
"parsers",
|
||||
"naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag")
|
||||
"parsers", "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag"
|
||||
)
|
||||
|
||||
HOST_IP = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
|
||||
HOST_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
|
||||
|
||||
SECRET_KEY = get_base_config(
|
||||
RAG_FLOW_SERVICE_NAME,
|
||||
{}).get("secret_key", str(date.today()))
|
||||
SECRET_KEY = get_or_create_secret_key()
|
||||
|
||||
global AUTHENTICATION_CONF, CLIENT_AUTHENTICATION, HTTP_APP_KEY, GITHUB_OAUTH, FEISHU_OAUTH
|
||||
global AUTHENTICATION_CONF, CLIENT_AUTHENTICATION, HTTP_APP_KEY, GITHUB_OAUTH, FEISHU_OAUTH, OAUTH_CONFIG
|
||||
# authentication
|
||||
AUTHENTICATION_CONF = get_base_config("authentication", {})
|
||||
|
||||
# client
|
||||
CLIENT_AUTHENTICATION = AUTHENTICATION_CONF.get(
|
||||
"client", {}).get(
|
||||
"switch", False)
|
||||
CLIENT_AUTHENTICATION = AUTHENTICATION_CONF.get("client", {}).get("switch", False)
|
||||
HTTP_APP_KEY = AUTHENTICATION_CONF.get("client", {}).get("http_app_key")
|
||||
GITHUB_OAUTH = get_base_config("oauth", {}).get("github")
|
||||
FEISHU_OAUTH = get_base_config("oauth", {}).get("feishu")
|
||||
|
||||
OAUTH_CONFIG = get_base_config("oauth", {})
|
||||
|
||||
global DOC_ENGINE, docStoreConn, retrievaler, kg_retrievaler
|
||||
DOC_ENGINE = os.environ.get('DOC_ENGINE', "elasticsearch")
|
||||
DOC_ENGINE = os.environ.get("DOC_ENGINE", "elasticsearch")
|
||||
# DOC_ENGINE = os.environ.get('DOC_ENGINE', "opensearch")
|
||||
lower_case_doc_engine = DOC_ENGINE.lower()
|
||||
if lower_case_doc_engine == "elasticsearch":
|
||||
docStoreConn = rag.utils.es_conn.ESConnection()
|
||||
elif lower_case_doc_engine == "infinity":
|
||||
docStoreConn = rag.utils.infinity_conn.InfinityConnection()
|
||||
elif lower_case_doc_engine == "opensearch":
|
||||
docStoreConn = rag.utils.opensearch_coon.OSConnection()
|
||||
else:
|
||||
raise Exception(f"Not supported doc engine: {DOC_ENGINE}")
|
||||
|
||||
retrievaler = search.Dealer(docStoreConn)
|
||||
kg_retrievaler = kg_search.KGSearch(docStoreConn)
|
||||
|
||||
if int(os.environ.get("SANDBOX_ENABLED", "0")):
|
||||
global SANDBOX_HOST
|
||||
SANDBOX_HOST = os.environ.get("SANDBOX_HOST", "sandbox-executor-manager")
|
||||
|
||||
|
||||
class CustomEnum(Enum):
|
||||
@classmethod
|
||||
|
||||
@ -19,6 +19,7 @@ import logging
|
||||
import random
|
||||
import time
|
||||
from base64 import b64encode
|
||||
from copy import deepcopy
|
||||
from functools import wraps
|
||||
from hmac import HMAC
|
||||
from io import BytesIO
|
||||
@ -36,11 +37,13 @@ from flask import (
|
||||
request as flask_request,
|
||||
)
|
||||
from itsdangerous import URLSafeTimedSerializer
|
||||
from peewee import OperationalError
|
||||
from werkzeug.http import HTTP_STATUS_CODES
|
||||
|
||||
from api import settings
|
||||
from api.constants import REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService
|
||||
from api.utils import CustomJSONEncoder, get_uuid, json_dumps
|
||||
|
||||
requests.models.complexjson.dumps = functools.partial(json.dumps, cls=CustomJSONEncoder)
|
||||
@ -322,25 +325,21 @@ def get_error_data_result(
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
def generate_confirmation_token(tenent_id):
|
||||
serializer = URLSafeTimedSerializer(tenent_id)
|
||||
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenent_id)[2:34]
|
||||
def get_error_argument_result(message="Invalid arguments"):
|
||||
return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=message)
|
||||
|
||||
|
||||
def valid(permission, valid_permission, chunk_method, valid_chunk_method):
|
||||
if valid_parameter(permission, valid_permission):
|
||||
return valid_parameter(permission, valid_permission)
|
||||
if valid_parameter(chunk_method, valid_chunk_method):
|
||||
return valid_parameter(chunk_method, valid_chunk_method)
|
||||
def get_error_permission_result(message="Permission error"):
|
||||
return get_result(code=settings.RetCode.PERMISSION_ERROR, message=message)
|
||||
|
||||
|
||||
def valid_parameter(parameter, valid_values):
|
||||
if parameter and parameter not in valid_values:
|
||||
return get_error_data_result(f"'{parameter}' is not in {valid_values}")
|
||||
def get_error_operating_result(message="Operating error"):
|
||||
return get_result(code=settings.RetCode.OPERATING_ERROR, message=message)
|
||||
|
||||
|
||||
def dataset_readonly_fields(field_name):
|
||||
return field_name in ["chunk_count", "create_date", "create_time", "update_date", "update_time", "created_by", "document_count", "token_num", "status", "tenant_id", "id"]
|
||||
def generate_confirmation_token(tenant_id):
|
||||
serializer = URLSafeTimedSerializer(tenant_id)
|
||||
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenant_id)[2:34]
|
||||
|
||||
|
||||
def get_parser_config(chunk_method, parser_config):
|
||||
@ -349,7 +348,7 @@ def get_parser_config(chunk_method, parser_config):
|
||||
if not chunk_method:
|
||||
chunk_method = "naive"
|
||||
key_mapping = {
|
||||
"naive": {"chunk_token_num": 128, "delimiter": "\\n!?;。;!?", "html4excel": False, "layout_recognize": "DeepDOC", "raptor": {"use_raptor": False}},
|
||||
"naive": {"chunk_token_num": 128, "delimiter": r"\n", "html4excel": False, "layout_recognize": "DeepDOC", "raptor": {"use_raptor": False}},
|
||||
"qa": {"raptor": {"use_raptor": False}},
|
||||
"tag": None,
|
||||
"resume": None,
|
||||
@ -360,7 +359,7 @@ def get_parser_config(chunk_method, parser_config):
|
||||
"laws": {"raptor": {"use_raptor": False}},
|
||||
"presentation": {"raptor": {"use_raptor": False}},
|
||||
"one": None,
|
||||
"knowledge_graph": {"chunk_token_num": 8192, "delimiter": "\\n!?;。;!?", "entity_types": ["organization", "person", "location", "event", "time"]},
|
||||
"knowledge_graph": {"chunk_token_num": 8192, "delimiter": r"\n", "entity_types": ["organization", "person", "location", "event", "time"]},
|
||||
"email": None,
|
||||
"picture": None,
|
||||
}
|
||||
@ -368,81 +367,32 @@ def get_parser_config(chunk_method, parser_config):
|
||||
return parser_config
|
||||
|
||||
|
||||
def get_data_openai(id=None,
|
||||
created=None,
|
||||
model=None,
|
||||
prompt_tokens= 0,
|
||||
completion_tokens=0,
|
||||
content = None,
|
||||
finish_reason= None,
|
||||
object="chat.completion",
|
||||
param=None,
|
||||
def get_data_openai(
|
||||
id=None,
|
||||
created=None,
|
||||
model=None,
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
content=None,
|
||||
finish_reason=None,
|
||||
object="chat.completion",
|
||||
param=None,
|
||||
):
|
||||
|
||||
total_tokens= prompt_tokens + completion_tokens
|
||||
total_tokens = prompt_tokens + completion_tokens
|
||||
return {
|
||||
"id":f"{id}",
|
||||
"id": f"{id}",
|
||||
"object": object,
|
||||
"created": int(time.time()) if created else None,
|
||||
"model": model,
|
||||
"param":param,
|
||||
"param": param,
|
||||
"usage": {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"total_tokens": total_tokens,
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": 0,
|
||||
"accepted_prediction_tokens": 0,
|
||||
"rejected_prediction_tokens": 0
|
||||
}
|
||||
"completion_tokens_details": {"reasoning_tokens": 0, "accepted_prediction_tokens": 0, "rejected_prediction_tokens": 0},
|
||||
},
|
||||
"choices": [
|
||||
{
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": content
|
||||
},
|
||||
"logprobs": None,
|
||||
"finish_reason": finish_reason,
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
def valid_parser_config(parser_config):
|
||||
if not parser_config:
|
||||
return
|
||||
scopes = set(
|
||||
[
|
||||
"chunk_token_num",
|
||||
"delimiter",
|
||||
"raptor",
|
||||
"graphrag",
|
||||
"layout_recognize",
|
||||
"task_page_size",
|
||||
"pages",
|
||||
"html4excel",
|
||||
"auto_keywords",
|
||||
"auto_questions",
|
||||
"tag_kb_ids",
|
||||
"topn_tags",
|
||||
"filename_embd_weight",
|
||||
]
|
||||
)
|
||||
for k in parser_config.keys():
|
||||
assert k in scopes, f"Abnormal 'parser_config'. Invalid key: {k}"
|
||||
|
||||
assert isinstance(parser_config.get("chunk_token_num", 1), int), "chunk_token_num should be int"
|
||||
assert 1 <= parser_config.get("chunk_token_num", 1) < 100000000, "chunk_token_num should be in range from 1 to 100000000"
|
||||
assert isinstance(parser_config.get("task_page_size", 1), int), "task_page_size should be int"
|
||||
assert 1 <= parser_config.get("task_page_size", 1) < 100000000, "task_page_size should be in range from 1 to 100000000"
|
||||
assert isinstance(parser_config.get("auto_keywords", 1), int), "auto_keywords should be int"
|
||||
assert 0 <= parser_config.get("auto_keywords", 0) < 32, "auto_keywords should be in range from 0 to 32"
|
||||
assert isinstance(parser_config.get("auto_questions", 1), int), "auto_questions should be int"
|
||||
assert 0 <= parser_config.get("auto_questions", 0) < 10, "auto_questions should be in range from 0 to 10"
|
||||
assert isinstance(parser_config.get("topn_tags", 1), int), "topn_tags should be int"
|
||||
assert 0 <= parser_config.get("topn_tags", 0) < 10, "topn_tags should be in range from 0 to 10"
|
||||
assert isinstance(parser_config.get("html4excel", False), bool), "html4excel should be True or False"
|
||||
assert isinstance(parser_config.get("delimiter", ""), str), "delimiter should be str"
|
||||
"choices": [{"message": {"role": "assistant", "content": content}, "logprobs": None, "finish_reason": finish_reason, "index": 0}],
|
||||
}
|
||||
|
||||
|
||||
def check_duplicate_ids(ids, id_type="item"):
|
||||
@ -472,3 +422,139 @@ def check_duplicate_ids(ids, id_type="item"):
|
||||
|
||||
# Return unique IDs and error messages
|
||||
return list(set(ids)), duplicate_messages
|
||||
|
||||
|
||||
def verify_embedding_availability(embd_id: str, tenant_id: str) -> tuple[bool, Response | None]:
|
||||
"""
|
||||
Verifies availability of an embedding model for a specific tenant.
|
||||
|
||||
Performs comprehensive verification through:
|
||||
1. Identifier Parsing: Decomposes embd_id into name and factory components
|
||||
2. System Verification: Checks model registration in LLMService
|
||||
3. Tenant Authorization: Validates tenant-specific model assignments
|
||||
4. Built-in Model Check: Confirms inclusion in predefined system models
|
||||
|
||||
Args:
|
||||
embd_id (str): Unique identifier for the embedding model in format "model_name@factory"
|
||||
tenant_id (str): Tenant identifier for access control
|
||||
|
||||
Returns:
|
||||
tuple[bool, Response | None]:
|
||||
- First element (bool):
|
||||
- True: Model is available and authorized
|
||||
- False: Validation failed
|
||||
- Second element contains:
|
||||
- None on success
|
||||
- Error detail dict on failure
|
||||
|
||||
Raises:
|
||||
ValueError: When model identifier format is invalid
|
||||
OperationalError: When database connection fails (auto-handled)
|
||||
|
||||
Examples:
|
||||
>>> verify_embedding_availability("text-embedding@openai", "tenant_123")
|
||||
(True, None)
|
||||
|
||||
>>> verify_embedding_availability("invalid_model", "tenant_123")
|
||||
(False, {'code': 101, 'message': "Unsupported model: <invalid_model>"})
|
||||
"""
|
||||
try:
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(embd_id)
|
||||
in_llm_service = bool(LLMService.query(llm_name=llm_name, fid=llm_factory, model_type="embedding"))
|
||||
|
||||
tenant_llms = TenantLLMService.get_my_llms(tenant_id=tenant_id)
|
||||
is_tenant_model = any(llm["llm_name"] == llm_name and llm["llm_factory"] == llm_factory and llm["model_type"] == "embedding" for llm in tenant_llms)
|
||||
|
||||
is_builtin_model = embd_id in settings.BUILTIN_EMBEDDING_MODELS
|
||||
if not (is_builtin_model or is_tenant_model or in_llm_service):
|
||||
return False, get_error_argument_result(f"Unsupported model: <{embd_id}>")
|
||||
|
||||
if not (is_builtin_model or is_tenant_model):
|
||||
return False, get_error_argument_result(f"Unauthorized model: <{embd_id}>")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return False, get_error_data_result(message="Database operation failed")
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def deep_merge(default: dict, custom: dict) -> dict:
|
||||
"""
|
||||
Recursively merges two dictionaries with priority given to `custom` values.
|
||||
|
||||
Creates a deep copy of the `default` dictionary and iteratively merges nested
|
||||
dictionaries using a stack-based approach. Non-dict values in `custom` will
|
||||
completely override corresponding entries in `default`.
|
||||
|
||||
Args:
|
||||
default (dict): Base dictionary containing default values.
|
||||
custom (dict): Dictionary containing overriding values.
|
||||
|
||||
Returns:
|
||||
dict: New merged dictionary combining values from both inputs.
|
||||
|
||||
Example:
|
||||
>>> from copy import deepcopy
|
||||
>>> default = {"a": 1, "nested": {"x": 10, "y": 20}}
|
||||
>>> custom = {"b": 2, "nested": {"y": 99, "z": 30}}
|
||||
>>> deep_merge(default, custom)
|
||||
{'a': 1, 'b': 2, 'nested': {'x': 10, 'y': 99, 'z': 30}}
|
||||
|
||||
>>> deep_merge({"config": {"mode": "auto"}}, {"config": "manual"})
|
||||
{'config': 'manual'}
|
||||
|
||||
Notes:
|
||||
1. Merge priority is always given to `custom` values at all nesting levels
|
||||
2. Non-dict values (e.g. list, str) in `custom` will replace entire values
|
||||
in `default`, even if the original value was a dictionary
|
||||
3. Time complexity: O(N) where N is total key-value pairs in `custom`
|
||||
4. Recommended for configuration merging and nested data updates
|
||||
"""
|
||||
merged = deepcopy(default)
|
||||
stack = [(merged, custom)]
|
||||
|
||||
while stack:
|
||||
base_dict, override_dict = stack.pop()
|
||||
|
||||
for key, val in override_dict.items():
|
||||
if key in base_dict and isinstance(val, dict) and isinstance(base_dict[key], dict):
|
||||
stack.append((base_dict[key], val))
|
||||
else:
|
||||
base_dict[key] = val
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def remap_dictionary_keys(source_data: dict, key_aliases: dict = None) -> dict:
|
||||
"""
|
||||
Transform dictionary keys using a configurable mapping schema.
|
||||
|
||||
Args:
|
||||
source_data: Original dictionary to process
|
||||
key_aliases: Custom key transformation rules (Optional)
|
||||
When provided, overrides default key mapping
|
||||
Format: {<original_key>: <new_key>, ...}
|
||||
|
||||
Returns:
|
||||
dict: New dictionary with transformed keys preserving original values
|
||||
|
||||
Example:
|
||||
>>> input_data = {"old_key": "value", "another_field": 42}
|
||||
>>> remap_dictionary_keys(input_data, {"old_key": "new_key"})
|
||||
{'new_key': 'value', 'another_field': 42}
|
||||
"""
|
||||
DEFAULT_KEY_MAP = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
|
||||
transformed_data = {}
|
||||
mapping = key_aliases or DEFAULT_KEY_MAP
|
||||
|
||||
for original_key, value in source_data.items():
|
||||
mapped_key = mapping.get(original_key, original_key)
|
||||
transformed_data[mapped_key] = value
|
||||
|
||||
return transformed_data
|
||||
|
||||
@ -17,17 +17,20 @@ import base64
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
from io import BytesIO
|
||||
|
||||
import pdfplumber
|
||||
from PIL import Image
|
||||
from cachetools import LRUCache, cached
|
||||
from PIL import Image
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from api.db import FileType
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
from api.db import FileType
|
||||
|
||||
PROJECT_BASE = os.getenv("RAG_PROJECT_BASE") or os.getenv("RAG_DEPLOY_BASE")
|
||||
RAG_BASE = os.getenv("RAG_BASE")
|
||||
@ -74,7 +77,7 @@ def get_rag_python_directory(*args):
|
||||
|
||||
|
||||
def get_home_cache_dir():
|
||||
dir = os.path.join(os.path.expanduser('~'), ".ragflow")
|
||||
dir = os.path.join(os.path.expanduser("~"), ".ragflow")
|
||||
try:
|
||||
os.mkdir(dir)
|
||||
except OSError:
|
||||
@ -92,9 +95,7 @@ def load_json_conf(conf_path):
|
||||
with open(json_conf_path) as f:
|
||||
return json.load(f)
|
||||
except BaseException:
|
||||
raise EnvironmentError(
|
||||
"loading json file config from '{}' failed!".format(json_conf_path)
|
||||
)
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def dump_json_conf(config_data, conf_path):
|
||||
@ -106,9 +107,7 @@ def dump_json_conf(config_data, conf_path):
|
||||
with open(json_conf_path, "w") as f:
|
||||
json.dump(config_data, f, indent=4)
|
||||
except BaseException:
|
||||
raise EnvironmentError(
|
||||
"loading json file config from '{}' failed!".format(json_conf_path)
|
||||
)
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def load_json_conf_real_time(conf_path):
|
||||
@ -120,9 +119,7 @@ def load_json_conf_real_time(conf_path):
|
||||
with open(json_conf_path) as f:
|
||||
return json.load(f)
|
||||
except BaseException:
|
||||
raise EnvironmentError(
|
||||
"loading json file config from '{}' failed!".format(json_conf_path)
|
||||
)
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def load_yaml_conf(conf_path):
|
||||
@ -130,12 +127,10 @@ def load_yaml_conf(conf_path):
|
||||
conf_path = os.path.join(get_project_base_directory(), conf_path)
|
||||
try:
|
||||
with open(conf_path) as f:
|
||||
yaml = YAML(typ='safe', pure=True)
|
||||
yaml = YAML(typ="safe", pure=True)
|
||||
return yaml.load(f)
|
||||
except Exception as e:
|
||||
raise EnvironmentError(
|
||||
"loading yaml file config from {} failed:".format(conf_path), e
|
||||
)
|
||||
raise EnvironmentError("loading yaml file config from {} failed:".format(conf_path), e)
|
||||
|
||||
|
||||
def rewrite_yaml_conf(conf_path, config):
|
||||
@ -146,13 +141,11 @@ def rewrite_yaml_conf(conf_path, config):
|
||||
yaml = YAML(typ="safe")
|
||||
yaml.dump(config, f)
|
||||
except Exception as e:
|
||||
raise EnvironmentError(
|
||||
"rewrite yaml file config {} failed:".format(conf_path), e
|
||||
)
|
||||
raise EnvironmentError("rewrite yaml file config {} failed:".format(conf_path), e)
|
||||
|
||||
|
||||
def rewrite_json_file(filepath, json_data):
|
||||
with open(filepath, "w", encoding='utf-8') as f:
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
json.dump(json_data, f, indent=4, separators=(",", ": "))
|
||||
f.close()
|
||||
|
||||
@ -162,12 +155,10 @@ def filename_type(filename):
|
||||
if re.match(r".*\.pdf$", filename):
|
||||
return FileType.PDF.value
|
||||
|
||||
if re.match(
|
||||
r".*\.(eml|doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html|sql)$", filename):
|
||||
if re.match(r".*\.(eml|doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html|sql)$", filename):
|
||||
return FileType.DOC.value
|
||||
|
||||
if re.match(
|
||||
r".*\.(wav|flac|ape|alac|wavpack|wv|mp3|aac|ogg|vorbis|opus|mp3)$", filename):
|
||||
if re.match(r".*\.(wav|flac|ape|alac|wavpack|wv|mp3|aac|ogg|vorbis|opus)$", filename):
|
||||
return FileType.AURAL.value
|
||||
|
||||
if re.match(r".*\.(jpg|jpeg|png|tif|gif|pcx|tga|exif|fpx|svg|psd|cdr|pcd|dxf|ufo|eps|ai|raw|WMF|webp|avif|apng|icon|ico|mpg|mpeg|avi|rm|rmvb|mov|wmv|asf|dat|asx|wvx|mpe|mpa|mp4)$", filename):
|
||||
@ -175,6 +166,7 @@ def filename_type(filename):
|
||||
|
||||
return FileType.OTHER.value
|
||||
|
||||
|
||||
def thumbnail_img(filename, blob):
|
||||
"""
|
||||
MySQL LongText max length is 65535
|
||||
@ -183,6 +175,7 @@ def thumbnail_img(filename, blob):
|
||||
if re.match(r".*\.pdf$", filename):
|
||||
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||
pdf = pdfplumber.open(BytesIO(blob))
|
||||
|
||||
buffered = BytesIO()
|
||||
resolution = 32
|
||||
img = None
|
||||
@ -206,8 +199,9 @@ def thumbnail_img(filename, blob):
|
||||
return buffered.getvalue()
|
||||
|
||||
elif re.match(r".*\.(ppt|pptx)$", filename):
|
||||
import aspose.slides as slides
|
||||
import aspose.pydrawing as drawing
|
||||
import aspose.slides as slides
|
||||
|
||||
try:
|
||||
with slides.Presentation(BytesIO(blob)) as presentation:
|
||||
buffered = BytesIO()
|
||||
@ -215,8 +209,7 @@ def thumbnail_img(filename, blob):
|
||||
img = None
|
||||
for _ in range(10):
|
||||
# https://reference.aspose.com/slides/python-net/aspose.slides/slide/get_thumbnail/#float-float
|
||||
presentation.slides[0].get_thumbnail(scale, scale).save(
|
||||
buffered, drawing.imaging.ImageFormat.png)
|
||||
presentation.slides[0].get_thumbnail(scale, scale).save(buffered, drawing.imaging.ImageFormat.png)
|
||||
img = buffered.getvalue()
|
||||
if len(img) >= 64000:
|
||||
scale = scale / 2.0
|
||||
@ -232,10 +225,9 @@ def thumbnail_img(filename, blob):
|
||||
def thumbnail(filename, blob):
|
||||
img = thumbnail_img(filename, blob)
|
||||
if img is not None:
|
||||
return IMG_BASE64_PREFIX + \
|
||||
base64.b64encode(img).decode("utf-8")
|
||||
return IMG_BASE64_PREFIX + base64.b64encode(img).decode("utf-8")
|
||||
else:
|
||||
return ''
|
||||
return ""
|
||||
|
||||
|
||||
def traversal_files(base):
|
||||
@ -243,3 +235,52 @@ def traversal_files(base):
|
||||
for f in fs:
|
||||
fullname = os.path.join(root, f)
|
||||
yield fullname
|
||||
|
||||
|
||||
def repair_pdf_with_ghostscript(input_bytes):
|
||||
if shutil.which("gs") is None:
|
||||
return input_bytes
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".pdf") as temp_in, tempfile.NamedTemporaryFile(suffix=".pdf") as temp_out:
|
||||
temp_in.write(input_bytes)
|
||||
temp_in.flush()
|
||||
|
||||
cmd = [
|
||||
"gs",
|
||||
"-o",
|
||||
temp_out.name,
|
||||
"-sDEVICE=pdfwrite",
|
||||
"-dPDFSETTINGS=/prepress",
|
||||
temp_in.name,
|
||||
]
|
||||
try:
|
||||
proc = subprocess.run(cmd, capture_output=True, text=True)
|
||||
if proc.returncode != 0:
|
||||
return input_bytes
|
||||
except Exception:
|
||||
return input_bytes
|
||||
|
||||
temp_out.seek(0)
|
||||
repaired_bytes = temp_out.read()
|
||||
|
||||
return repaired_bytes
|
||||
|
||||
|
||||
def read_potential_broken_pdf(blob):
|
||||
def try_open(blob):
|
||||
try:
|
||||
with pdfplumber.open(BytesIO(blob)) as pdf:
|
||||
if pdf.pages:
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
return False
|
||||
|
||||
if try_open(blob):
|
||||
return blob
|
||||
|
||||
repaired = repair_pdf_with_ghostscript(blob)
|
||||
if try_open(repaired):
|
||||
return repaired
|
||||
|
||||
return blob
|
||||
|
||||
@ -30,7 +30,7 @@ def get_project_base_directory():
|
||||
)
|
||||
return PROJECT_BASE
|
||||
|
||||
def initRootLogger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||
def init_root_logger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||
global initialized_root_logger
|
||||
if initialized_root_logger:
|
||||
return
|
||||
@ -77,4 +77,11 @@ def initRootLogger(logfile_basename: str, log_format: str = "%(asctime)-15s %(le
|
||||
pkg_logger.setLevel(pkg_level)
|
||||
|
||||
msg = f"{logfile_basename} log path: {log_path}, log levels: {pkg_levels}"
|
||||
logger.info(msg)
|
||||
logger.info(msg)
|
||||
|
||||
|
||||
def log_exception(e, *args):
|
||||
logging.exception(e)
|
||||
for a in args:
|
||||
logging.error(str(a))
|
||||
raise e
|
||||
@ -35,6 +35,6 @@ def crypt(line):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pswd = crypt(sys.argv[1])
|
||||
print(pswd)
|
||||
print(decrypt(pswd))
|
||||
passwd = crypt(sys.argv[1])
|
||||
print(passwd)
|
||||
print(decrypt(passwd))
|
||||
|
||||
653
api/utils/validation_utils.py
Normal file
653
api/utils/validation_utils.py
Normal file
@ -0,0 +1,653 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from collections import Counter
|
||||
from enum import auto
|
||||
from typing import Annotated, Any
|
||||
from uuid import UUID
|
||||
|
||||
from flask import Request
|
||||
from pydantic import BaseModel, Field, StringConstraints, ValidationError, field_validator
|
||||
from pydantic_core import PydanticCustomError
|
||||
from strenum import StrEnum
|
||||
from werkzeug.exceptions import BadRequest, UnsupportedMediaType
|
||||
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
|
||||
|
||||
def validate_and_parse_json_request(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None, exclude_unset: bool = False) -> tuple[dict[str, Any] | None, str | None]:
|
||||
"""
|
||||
Validates and parses JSON requests through a multi-stage validation pipeline.
|
||||
|
||||
Implements a four-stage validation process:
|
||||
1. Content-Type verification (must be application/json)
|
||||
2. JSON syntax validation
|
||||
3. Payload structure type checking
|
||||
4. Pydantic model validation with error formatting
|
||||
|
||||
Args:
|
||||
request (Request): Flask request object containing HTTP payload
|
||||
validator (type[BaseModel]): Pydantic model class for data validation
|
||||
extras (dict[str, Any] | None): Additional fields to merge into payload
|
||||
before validation. These fields will be removed from the final output
|
||||
exclude_unset (bool): Whether to exclude fields that have not been explicitly set
|
||||
|
||||
Returns:
|
||||
tuple[Dict[str, Any] | None, str | None]:
|
||||
- First element:
|
||||
- Validated dictionary on success
|
||||
- None on validation failure
|
||||
- Second element:
|
||||
- None on success
|
||||
- Diagnostic error message on failure
|
||||
|
||||
Raises:
|
||||
UnsupportedMediaType: When Content-Type header is not application/json
|
||||
BadRequest: For structural JSON syntax errors
|
||||
ValidationError: When payload violates Pydantic schema rules
|
||||
|
||||
Examples:
|
||||
>>> validate_and_parse_json_request(valid_request, DatasetSchema)
|
||||
({"name": "Dataset1", "format": "csv"}, None)
|
||||
|
||||
>>> validate_and_parse_json_request(xml_request, DatasetSchema)
|
||||
(None, "Unsupported content type: Expected application/json, got text/xml")
|
||||
|
||||
>>> validate_and_parse_json_request(bad_json_request, DatasetSchema)
|
||||
(None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding")
|
||||
|
||||
Notes:
|
||||
1. Validation Priority:
|
||||
- Content-Type verification precedes JSON parsing
|
||||
- Structural validation occurs before schema validation
|
||||
2. Extra fields added via `extras` parameter are automatically removed
|
||||
from the final output after validation
|
||||
"""
|
||||
try:
|
||||
payload = request.get_json() or {}
|
||||
except UnsupportedMediaType:
|
||||
return None, f"Unsupported content type: Expected application/json, got {request.content_type}"
|
||||
except BadRequest:
|
||||
return None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding"
|
||||
|
||||
if not isinstance(payload, dict):
|
||||
return None, f"Invalid request payload: expected object, got {type(payload).__name__}"
|
||||
|
||||
try:
|
||||
if extras is not None:
|
||||
payload.update(extras)
|
||||
validated_request = validator(**payload)
|
||||
except ValidationError as e:
|
||||
return None, format_validation_error_message(e)
|
||||
|
||||
parsed_payload = validated_request.model_dump(by_alias=True, exclude_unset=exclude_unset)
|
||||
|
||||
if extras is not None:
|
||||
for key in list(parsed_payload.keys()):
|
||||
if key in extras:
|
||||
del parsed_payload[key]
|
||||
|
||||
return parsed_payload, None
|
||||
|
||||
|
||||
def validate_and_parse_request_args(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None) -> tuple[dict[str, Any] | None, str | None]:
|
||||
"""
|
||||
Validates and parses request arguments against a Pydantic model.
|
||||
|
||||
This function performs a complete request validation workflow:
|
||||
1. Extracts query parameters from the request
|
||||
2. Merges with optional extra values (if provided)
|
||||
3. Validates against the specified Pydantic model
|
||||
4. Cleans the output by removing extra values
|
||||
5. Returns either parsed data or an error message
|
||||
|
||||
Args:
|
||||
request (Request): Web framework request object containing query parameters
|
||||
validator (type[BaseModel]): Pydantic model class for validation
|
||||
extras (dict[str, Any] | None): Optional additional values to include in validation
|
||||
but exclude from final output. Defaults to None.
|
||||
|
||||
Returns:
|
||||
tuple[dict[str, Any] | None, str | None]:
|
||||
- First element: Validated/parsed arguments as dict if successful, None otherwise
|
||||
- Second element: Formatted error message if validation failed, None otherwise
|
||||
|
||||
Behavior:
|
||||
- Query parameters are merged with extras before validation
|
||||
- Extras are automatically removed from the final output
|
||||
- All validation errors are formatted into a human-readable string
|
||||
|
||||
Raises:
|
||||
TypeError: If validator is not a Pydantic BaseModel subclass
|
||||
|
||||
Examples:
|
||||
Successful validation:
|
||||
>>> validate_and_parse_request_args(request, MyValidator)
|
||||
({'param1': 'value'}, None)
|
||||
|
||||
Failed validation:
|
||||
>>> validate_and_parse_request_args(request, MyValidator)
|
||||
(None, "param1: Field required")
|
||||
|
||||
With extras:
|
||||
>>> validate_and_parse_request_args(request, MyValidator, extras={'internal_id': 123})
|
||||
({'param1': 'value'}, None) # internal_id removed from output
|
||||
|
||||
Notes:
|
||||
- Uses request.args.to_dict() for Flask-compatible parameter extraction
|
||||
- Maintains immutability of original request arguments
|
||||
- Preserves type conversion from Pydantic validation
|
||||
"""
|
||||
args = request.args.to_dict(flat=True)
|
||||
try:
|
||||
if extras is not None:
|
||||
args.update(extras)
|
||||
validated_args = validator(**args)
|
||||
except ValidationError as e:
|
||||
return None, format_validation_error_message(e)
|
||||
|
||||
parsed_args = validated_args.model_dump()
|
||||
if extras is not None:
|
||||
for key in list(parsed_args.keys()):
|
||||
if key in extras:
|
||||
del parsed_args[key]
|
||||
|
||||
return parsed_args, None
|
||||
|
||||
|
||||
def format_validation_error_message(e: ValidationError) -> str:
|
||||
"""
|
||||
Formats validation errors into a standardized string format.
|
||||
|
||||
Processes pydantic ValidationError objects to create human-readable error messages
|
||||
containing field locations, error descriptions, and input values.
|
||||
|
||||
Args:
|
||||
e (ValidationError): The validation error instance containing error details
|
||||
|
||||
Returns:
|
||||
str: Formatted error messages joined by newlines. Each line contains:
|
||||
- Field path (dot-separated)
|
||||
- Error message
|
||||
- Truncated input value (max 128 chars)
|
||||
|
||||
Example:
|
||||
>>> try:
|
||||
... UserModel(name=123, email="invalid")
|
||||
... except ValidationError as e:
|
||||
... print(format_validation_error_message(e))
|
||||
Field: <name> - Message: <Input should be a valid string> - Value: <123>
|
||||
Field: <email> - Message: <value is not a valid email address> - Value: <invalid>
|
||||
"""
|
||||
error_messages = []
|
||||
|
||||
for error in e.errors():
|
||||
field = ".".join(map(str, error["loc"]))
|
||||
msg = error["msg"]
|
||||
input_val = error["input"]
|
||||
input_str = str(input_val)
|
||||
|
||||
if len(input_str) > 128:
|
||||
input_str = input_str[:125] + "..."
|
||||
|
||||
error_msg = f"Field: <{field}> - Message: <{msg}> - Value: <{input_str}>"
|
||||
error_messages.append(error_msg)
|
||||
|
||||
return "\n".join(error_messages)
|
||||
|
||||
|
||||
def normalize_str(v: Any) -> Any:
|
||||
"""
|
||||
Normalizes string values to a standard format while preserving non-string inputs.
|
||||
|
||||
Performs the following transformations when input is a string:
|
||||
1. Trims leading/trailing whitespace (str.strip())
|
||||
2. Converts to lowercase (str.lower())
|
||||
|
||||
Non-string inputs are returned unchanged, making this function safe for mixed-type
|
||||
processing pipelines.
|
||||
|
||||
Args:
|
||||
v (Any): Input value to normalize. Accepts any Python object.
|
||||
|
||||
Returns:
|
||||
Any: Normalized string if input was string-type, original value otherwise.
|
||||
|
||||
Behavior Examples:
|
||||
String Input: " Admin " → "admin"
|
||||
Empty String: " " → "" (empty string)
|
||||
Non-String:
|
||||
- 123 → 123
|
||||
- None → None
|
||||
- ["User"] → ["User"]
|
||||
|
||||
Typical Use Cases:
|
||||
- Standardizing user input
|
||||
- Preparing data for case-insensitive comparison
|
||||
- Cleaning API parameters
|
||||
- Normalizing configuration values
|
||||
|
||||
Edge Cases:
|
||||
- Unicode whitespace is handled by str.strip()
|
||||
- Locale-independent lowercasing (str.lower())
|
||||
- Preserves falsy values (0, False, etc.)
|
||||
|
||||
Example:
|
||||
>>> normalize_str(" ReadOnly ")
|
||||
'readonly'
|
||||
>>> normalize_str(42)
|
||||
42
|
||||
"""
|
||||
if isinstance(v, str):
|
||||
stripped = v.strip()
|
||||
normalized = stripped.lower()
|
||||
return normalized
|
||||
return v
|
||||
|
||||
|
||||
def validate_uuid1_hex(v: Any) -> str:
|
||||
"""
|
||||
Validates and converts input to a UUID version 1 hexadecimal string.
|
||||
|
||||
This function performs strict validation and normalization:
|
||||
1. Accepts either UUID objects or UUID-formatted strings
|
||||
2. Verifies the UUID is version 1 (time-based)
|
||||
3. Returns the 32-character hexadecimal representation
|
||||
|
||||
Args:
|
||||
v (Any): Input value to validate. Can be:
|
||||
- UUID object (must be version 1)
|
||||
- String in UUID format (e.g. "550e8400-e29b-41d4-a716-446655440000")
|
||||
|
||||
Returns:
|
||||
str: 32-character lowercase hexadecimal string without hyphens
|
||||
Example: "550e8400e29b41d4a716446655440000"
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: With code "invalid_UUID1_format" when:
|
||||
- Input is not a UUID object or valid UUID string
|
||||
- UUID version is not 1
|
||||
- String doesn't match UUID format
|
||||
|
||||
Examples:
|
||||
Valid cases:
|
||||
>>> validate_uuid1_hex("550e8400-e29b-41d4-a716-446655440000")
|
||||
'550e8400e29b41d4a716446655440000'
|
||||
>>> validate_uuid1_hex(UUID('550e8400-e29b-41d4-a716-446655440000'))
|
||||
'550e8400e29b41d4a716446655440000'
|
||||
|
||||
Invalid cases:
|
||||
>>> validate_uuid1_hex("not-a-uuid") # raises PydanticCustomError
|
||||
>>> validate_uuid1_hex(12345) # raises PydanticCustomError
|
||||
>>> validate_uuid1_hex(UUID(int=0)) # v4, raises PydanticCustomError
|
||||
|
||||
Notes:
|
||||
- Uses Python's built-in UUID parser for format validation
|
||||
- Version check prevents accidental use of other UUID versions
|
||||
- Hyphens in input strings are automatically removed in output
|
||||
"""
|
||||
try:
|
||||
uuid_obj = UUID(v) if isinstance(v, str) else v
|
||||
if uuid_obj.version != 1:
|
||||
raise PydanticCustomError("invalid_UUID1_format", "Must be a UUID1 format")
|
||||
return uuid_obj.hex
|
||||
except (AttributeError, ValueError, TypeError):
|
||||
raise PydanticCustomError("invalid_UUID1_format", "Invalid UUID1 format")
|
||||
|
||||
|
||||
class PermissionEnum(StrEnum):
|
||||
me = auto()
|
||||
team = auto()
|
||||
|
||||
|
||||
class ChunkMethodEnum(StrEnum):
|
||||
naive = auto()
|
||||
book = auto()
|
||||
email = auto()
|
||||
laws = auto()
|
||||
manual = auto()
|
||||
one = auto()
|
||||
paper = auto()
|
||||
picture = auto()
|
||||
presentation = auto()
|
||||
qa = auto()
|
||||
table = auto()
|
||||
tag = auto()
|
||||
|
||||
|
||||
class GraphragMethodEnum(StrEnum):
|
||||
light = auto()
|
||||
general = auto()
|
||||
|
||||
|
||||
class Base(BaseModel):
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
|
||||
|
||||
class RaptorConfig(Base):
|
||||
use_raptor: bool = Field(default=False)
|
||||
prompt: Annotated[
|
||||
str,
|
||||
StringConstraints(strip_whitespace=True, min_length=1),
|
||||
Field(
|
||||
default="Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize."
|
||||
),
|
||||
]
|
||||
max_token: int = Field(default=256, ge=1, le=2048)
|
||||
threshold: float = Field(default=0.1, ge=0.0, le=1.0)
|
||||
max_cluster: int = Field(default=64, ge=1, le=1024)
|
||||
random_seed: int = Field(default=0, ge=0)
|
||||
|
||||
|
||||
class GraphragConfig(Base):
|
||||
use_graphrag: bool = Field(default=False)
|
||||
entity_types: list[str] = Field(default_factory=lambda: ["organization", "person", "geo", "event", "category"])
|
||||
method: GraphragMethodEnum = Field(default=GraphragMethodEnum.light)
|
||||
community: bool = Field(default=False)
|
||||
resolution: bool = Field(default=False)
|
||||
|
||||
|
||||
class ParserConfig(Base):
|
||||
auto_keywords: int = Field(default=0, ge=0, le=32)
|
||||
auto_questions: int = Field(default=0, ge=0, le=10)
|
||||
chunk_token_num: int = Field(default=128, ge=1, le=2048)
|
||||
delimiter: str = Field(default=r"\n", min_length=1)
|
||||
graphrag: GraphragConfig | None = None
|
||||
html4excel: bool = False
|
||||
layout_recognize: str = "DeepDOC"
|
||||
raptor: RaptorConfig | None = None
|
||||
tag_kb_ids: list[str] = Field(default_factory=list)
|
||||
topn_tags: int = Field(default=1, ge=1, le=10)
|
||||
filename_embd_weight: float | None = Field(default=None, ge=0.0, le=1.0)
|
||||
task_page_size: int | None = Field(default=None, ge=1)
|
||||
pages: list[list[int]] | None = None
|
||||
|
||||
|
||||
class CreateDatasetReq(Base):
|
||||
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(...)]
|
||||
avatar: str | None = Field(default=None, max_length=65535)
|
||||
description: str | None = Field(default=None, max_length=65535)
|
||||
embedding_model: Annotated[str, StringConstraints(strip_whitespace=True, max_length=255), Field(default="", serialization_alias="embd_id")]
|
||||
permission: PermissionEnum = Field(default=PermissionEnum.me, min_length=1, max_length=16)
|
||||
chunk_method: ChunkMethodEnum = Field(default=ChunkMethodEnum.naive, min_length=1, max_length=32, serialization_alias="parser_id")
|
||||
parser_config: ParserConfig | None = Field(default=None)
|
||||
|
||||
@field_validator("avatar")
|
||||
@classmethod
|
||||
def validate_avatar_base64(cls, v: str | None) -> str | None:
|
||||
"""
|
||||
Validates Base64-encoded avatar string format and MIME type compliance.
|
||||
|
||||
Implements a three-stage validation workflow:
|
||||
1. MIME prefix existence check
|
||||
2. MIME type format validation
|
||||
3. Supported type verification
|
||||
|
||||
Args:
|
||||
v (str): Raw avatar field value
|
||||
|
||||
Returns:
|
||||
str: Validated Base64 string
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: For structural errors in these cases:
|
||||
- Missing MIME prefix header
|
||||
- Invalid MIME prefix format
|
||||
- Unsupported image MIME type
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Valid case
|
||||
CreateDatasetReq(avatar="data:image/png;base64,iVBORw0KGg...")
|
||||
|
||||
# Invalid cases
|
||||
CreateDatasetReq(avatar="image/jpeg;base64,...") # Missing 'data:' prefix
|
||||
CreateDatasetReq(avatar="data:video/mp4;base64,...") # Unsupported MIME type
|
||||
```
|
||||
"""
|
||||
if v is None:
|
||||
return v
|
||||
|
||||
if "," in v:
|
||||
prefix, _ = v.split(",", 1)
|
||||
if not prefix.startswith("data:"):
|
||||
raise PydanticCustomError("format_invalid", "Invalid MIME prefix format. Must start with 'data:'")
|
||||
|
||||
mime_type = prefix[5:].split(";")[0]
|
||||
supported_mime_types = ["image/jpeg", "image/png"]
|
||||
if mime_type not in supported_mime_types:
|
||||
raise PydanticCustomError("format_invalid", "Unsupported MIME type. Allowed: {supported_mime_types}", {"supported_mime_types": supported_mime_types})
|
||||
|
||||
return v
|
||||
else:
|
||||
raise PydanticCustomError("format_invalid", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>")
|
||||
|
||||
@field_validator("embedding_model", mode="after")
|
||||
@classmethod
|
||||
def validate_embedding_model(cls, v: str) -> str:
|
||||
"""
|
||||
Validates embedding model identifier format compliance.
|
||||
|
||||
Validation pipeline:
|
||||
1. Structural format verification
|
||||
2. Component non-empty check
|
||||
3. Value normalization
|
||||
|
||||
Args:
|
||||
v (str): Raw model identifier
|
||||
|
||||
Returns:
|
||||
str: Validated <model_name>@<provider> format
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: For these violations:
|
||||
- Missing @ separator
|
||||
- Empty model_name/provider
|
||||
- Invalid component structure
|
||||
|
||||
Examples:
|
||||
Valid: "text-embedding-3-large@openai"
|
||||
Invalid: "invalid_model" (no @)
|
||||
Invalid: "@openai" (empty model_name)
|
||||
Invalid: "text-embedding-3-large@" (empty provider)
|
||||
"""
|
||||
if "@" not in v:
|
||||
raise PydanticCustomError("format_invalid", "Embedding model identifier must follow <model_name>@<provider> format")
|
||||
|
||||
components = v.split("@", 1)
|
||||
if len(components) != 2 or not all(components):
|
||||
raise PydanticCustomError("format_invalid", "Both model_name and provider must be non-empty strings")
|
||||
|
||||
model_name, provider = components
|
||||
if not model_name.strip() or not provider.strip():
|
||||
raise PydanticCustomError("format_invalid", "Model name and provider cannot be whitespace-only strings")
|
||||
return v
|
||||
|
||||
@field_validator("permission", mode="before")
|
||||
@classmethod
|
||||
def normalize_permission(cls, v: Any) -> Any:
|
||||
return normalize_str(v)
|
||||
|
||||
@field_validator("parser_config", mode="before")
|
||||
@classmethod
|
||||
def normalize_empty_parser_config(cls, v: Any) -> Any:
|
||||
"""
|
||||
Normalizes empty parser configuration by converting empty dictionaries to None.
|
||||
|
||||
This validator ensures consistent handling of empty parser configurations across
|
||||
the application by converting empty dicts to None values.
|
||||
|
||||
Args:
|
||||
v (Any): Raw input value for the parser config field
|
||||
|
||||
Returns:
|
||||
Any: Returns None if input is an empty dict, otherwise returns the original value
|
||||
|
||||
Example:
|
||||
>>> normalize_empty_parser_config({})
|
||||
None
|
||||
|
||||
>>> normalize_empty_parser_config({"key": "value"})
|
||||
{"key": "value"}
|
||||
"""
|
||||
if v == {}:
|
||||
return None
|
||||
return v
|
||||
|
||||
@field_validator("parser_config", mode="after")
|
||||
@classmethod
|
||||
def validate_parser_config_json_length(cls, v: ParserConfig | None) -> ParserConfig | None:
|
||||
"""
|
||||
Validates serialized JSON length constraints for parser configuration.
|
||||
|
||||
Implements a two-stage validation workflow:
|
||||
1. Null check - bypass validation for empty configurations
|
||||
2. Model serialization - convert Pydantic model to JSON string
|
||||
3. Size verification - enforce maximum allowed payload size
|
||||
|
||||
Args:
|
||||
v (ParserConfig | None): Raw parser configuration object
|
||||
|
||||
Returns:
|
||||
ParserConfig | None: Validated configuration object
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: When serialized JSON exceeds 65,535 characters
|
||||
"""
|
||||
if v is None:
|
||||
return None
|
||||
|
||||
if (json_str := v.model_dump_json()) and len(json_str) > 65535:
|
||||
raise PydanticCustomError("string_too_long", "Parser config exceeds size limit (max 65,535 characters). Current size: {actual}", {"actual": len(json_str)})
|
||||
return v
|
||||
|
||||
|
||||
class UpdateDatasetReq(CreateDatasetReq):
|
||||
dataset_id: str = Field(...)
|
||||
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(default="")]
|
||||
pagerank: int = Field(default=0, ge=0, le=100)
|
||||
|
||||
@field_validator("dataset_id", mode="before")
|
||||
@classmethod
|
||||
def validate_dataset_id(cls, v: Any) -> str:
|
||||
return validate_uuid1_hex(v)
|
||||
|
||||
|
||||
class DeleteReq(Base):
|
||||
ids: list[str] | None = Field(...)
|
||||
|
||||
@field_validator("ids", mode="after")
|
||||
@classmethod
|
||||
def validate_ids(cls, v_list: list[str] | None) -> list[str] | None:
|
||||
"""
|
||||
Validates and normalizes a list of UUID strings with None handling.
|
||||
|
||||
This post-processing validator performs:
|
||||
1. None input handling (pass-through)
|
||||
2. UUID version 1 validation for each list item
|
||||
3. Duplicate value detection
|
||||
4. Returns normalized UUID hex strings or None
|
||||
|
||||
Args:
|
||||
v_list (list[str] | None): Input list that has passed initial validation.
|
||||
Either a list of UUID strings or None.
|
||||
|
||||
Returns:
|
||||
list[str] | None:
|
||||
- None if input was None
|
||||
- List of normalized UUID hex strings otherwise:
|
||||
* 32-character lowercase
|
||||
* Valid UUID version 1
|
||||
* Unique within list
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: With structured error details when:
|
||||
- "invalid_UUID1_format": Any string fails UUIDv1 validation
|
||||
- "duplicate_uuids": If duplicate IDs are detected
|
||||
|
||||
Validation Rules:
|
||||
- None input returns None
|
||||
- Empty list returns empty list
|
||||
- All non-None items must be valid UUIDv1
|
||||
- No duplicates permitted
|
||||
- Original order preserved
|
||||
|
||||
Examples:
|
||||
Valid cases:
|
||||
>>> validate_ids(None)
|
||||
None
|
||||
>>> validate_ids([])
|
||||
[]
|
||||
>>> validate_ids(["550e8400-e29b-41d4-a716-446655440000"])
|
||||
["550e8400e29b41d4a716446655440000"]
|
||||
|
||||
Invalid cases:
|
||||
>>> validate_ids(["invalid"])
|
||||
# raises PydanticCustomError(invalid_UUID1_format)
|
||||
>>> validate_ids(["550e...", "550e..."])
|
||||
# raises PydanticCustomError(duplicate_uuids)
|
||||
|
||||
Security Notes:
|
||||
- Validates UUID version to prevent version spoofing
|
||||
- Duplicate check prevents data injection
|
||||
- None handling maintains pipeline integrity
|
||||
"""
|
||||
if v_list is None:
|
||||
return None
|
||||
|
||||
ids_list = []
|
||||
for v in v_list:
|
||||
try:
|
||||
ids_list.append(validate_uuid1_hex(v))
|
||||
except PydanticCustomError as e:
|
||||
raise e
|
||||
|
||||
duplicates = [item for item, count in Counter(ids_list).items() if count > 1]
|
||||
if duplicates:
|
||||
duplicates_str = ", ".join(duplicates)
|
||||
raise PydanticCustomError("duplicate_uuids", "Duplicate ids: '{duplicate_ids}'", {"duplicate_ids": duplicates_str})
|
||||
|
||||
return ids_list
|
||||
|
||||
|
||||
class DeleteDatasetReq(DeleteReq): ...
|
||||
|
||||
|
||||
class OrderByEnum(StrEnum):
|
||||
create_time = auto()
|
||||
update_time = auto()
|
||||
|
||||
|
||||
class BaseListReq(Base):
|
||||
id: str | None = None
|
||||
name: str | None = None
|
||||
page: int = Field(default=1, ge=1)
|
||||
page_size: int = Field(default=30, ge=1)
|
||||
orderby: OrderByEnum = Field(default=OrderByEnum.create_time)
|
||||
desc: bool = Field(default=True)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def validate_id(cls, v: Any) -> str:
|
||||
return validate_uuid1_hex(v)
|
||||
|
||||
@field_validator("orderby", mode="before")
|
||||
@classmethod
|
||||
def normalize_orderby(cls, v: Any) -> Any:
|
||||
return normalize_str(v)
|
||||
|
||||
|
||||
class ListDatasetReq(BaseListReq): ...
|
||||
@ -38,5 +38,7 @@
|
||||
"entity_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"source_id": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"n_hop_with_weight": {"type": "varchar", "default": ""},
|
||||
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"}
|
||||
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
|
||||
"doc_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
213
conf/os_mapping.json
Normal file
213
conf/os_mapping.json
Normal file
@ -0,0 +1,213 @@
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
"number_of_shards": 2,
|
||||
"number_of_replicas": 0,
|
||||
"refresh_interval": "1000ms",
|
||||
"knn": true,
|
||||
"similarity": {
|
||||
"scripted_sim": {
|
||||
"type": "scripted",
|
||||
"script": {
|
||||
"source": "double idf = Math.log(1+(field.docCount-term.docFreq+0.5)/(term.docFreq + 0.5))/Math.log(1+((field.docCount-0.5)/1.5)); return query.boost * idf * Math.min(doc.freq, 1);"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"lat_lon": {
|
||||
"type": "geo_point",
|
||||
"store": "true"
|
||||
}
|
||||
},
|
||||
"date_detection": "true",
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"int": {
|
||||
"match": "*_int",
|
||||
"mapping": {
|
||||
"type": "integer",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"ulong": {
|
||||
"match": "*_ulong",
|
||||
"mapping": {
|
||||
"type": "unsigned_long",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"long": {
|
||||
"match": "*_long",
|
||||
"mapping": {
|
||||
"type": "long",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"short": {
|
||||
"match": "*_short",
|
||||
"mapping": {
|
||||
"type": "short",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"numeric": {
|
||||
"match": "*_flt",
|
||||
"mapping": {
|
||||
"type": "float",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"tks": {
|
||||
"match": "*_tks",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"similarity": "scripted_sim",
|
||||
"analyzer": "whitespace",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"ltks": {
|
||||
"match": "*_ltks",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"analyzer": "whitespace",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kwd": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^(.*_(kwd|id|ids|uid|uids)|uid)$",
|
||||
"mapping": {
|
||||
"type": "keyword",
|
||||
"similarity": "boolean",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dt": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^.*(_dt|_time|_at)$",
|
||||
"mapping": {
|
||||
"type": "date",
|
||||
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||yyyy-MM-dd_HH:mm:ss",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"nested": {
|
||||
"match": "*_nst",
|
||||
"mapping": {
|
||||
"type": "nested"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"object": {
|
||||
"match": "*_obj",
|
||||
"mapping": {
|
||||
"type": "object",
|
||||
"dynamic": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"string": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^.*_(with_weight|list)$",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"index": "false",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"rank_feature": {
|
||||
"match": "*_fea",
|
||||
"mapping": {
|
||||
"type": "rank_feature"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"rank_features": {
|
||||
"match": "*_feas",
|
||||
"mapping": {
|
||||
"type": "rank_features"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"knn_vector": {
|
||||
"match": "*_512_vec",
|
||||
"mapping": {
|
||||
"type": "knn_vector",
|
||||
"index": true,
|
||||
"space_type": "cosinesimil",
|
||||
"dimension": 512
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"knn_vector": {
|
||||
"match": "*_768_vec",
|
||||
"mapping": {
|
||||
"type": "knn_vector",
|
||||
"index": true,
|
||||
"space_type": "cosinesimil",
|
||||
"dimension": 768
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"knn_vector": {
|
||||
"match": "*_1024_vec",
|
||||
"mapping": {
|
||||
"type": "knn_vector",
|
||||
"index": true,
|
||||
"space_type": "cosinesimil",
|
||||
"dimension": 1024
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"knn_vector": {
|
||||
"match": "*_1536_vec",
|
||||
"mapping": {
|
||||
"type": "knn_vector",
|
||||
"index": true,
|
||||
"space_type": "cosinesimil",
|
||||
"dimension": 1536
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"match": "*_bin",
|
||||
"mapping": {
|
||||
"type": "binary"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -7,8 +7,9 @@ mysql:
|
||||
password: 'infini_rag_flow'
|
||||
host: 'localhost'
|
||||
port: 5455
|
||||
max_connections: 100
|
||||
stale_timeout: 30
|
||||
max_connections: 900
|
||||
stale_timeout: 300
|
||||
max_allowed_packet: 1073741824
|
||||
minio:
|
||||
user: 'rag_flow'
|
||||
password: 'infini_rag_flow'
|
||||
@ -17,6 +18,10 @@ es:
|
||||
hosts: 'http://localhost:1200'
|
||||
username: 'elastic'
|
||||
password: 'infini_rag_flow'
|
||||
os:
|
||||
hosts: 'http://localhost:1201'
|
||||
username: 'admin'
|
||||
password: 'infini_rag_flow_OS_01'
|
||||
infinity:
|
||||
uri: 'localhost:23817'
|
||||
db_name: 'default_db'
|
||||
@ -24,7 +29,6 @@ redis:
|
||||
db: 1
|
||||
password: 'infini_rag_flow'
|
||||
host: 'localhost:6379'
|
||||
|
||||
# postgres:
|
||||
# name: 'rag_flow'
|
||||
# user: 'rag_flow'
|
||||
@ -54,21 +58,38 @@ redis:
|
||||
# secret: 'secret'
|
||||
# tenant_id: 'tenant_id'
|
||||
# container_name: 'container_name'
|
||||
# The OSS object storage uses the MySQL configuration above by default. If you need to switch to another object storage service, please uncomment and configure the following parameters.
|
||||
# opendal:
|
||||
# scheme: 'mysql' # Storage type, such as s3, oss, azure, etc.
|
||||
# config:
|
||||
# oss_table: 'your_table_name'
|
||||
# user_default_llm:
|
||||
# factory: 'Tongyi-Qianwen'
|
||||
# api_key: 'sk-xxxxxxxxxxxxx'
|
||||
# base_url: ''
|
||||
# oauth:
|
||||
# oauth2:
|
||||
# display_name: "OAuth2"
|
||||
# client_id: "your_client_id"
|
||||
# client_secret: "your_client_secret"
|
||||
# authorization_url: "https://your-oauth-provider.com/oauth/authorize"
|
||||
# token_url: "https://your-oauth-provider.com/oauth/token"
|
||||
# userinfo_url: "https://your-oauth-provider.com/oauth/userinfo"
|
||||
# redirect_uri: "https://your-app.com/v1/user/oauth/callback/oauth2"
|
||||
# oidc:
|
||||
# display_name: "OIDC"
|
||||
# client_id: "your_client_id"
|
||||
# client_secret: "your_client_secret"
|
||||
# issuer: "https://your-oauth-provider.com/oidc"
|
||||
# scope: "openid email profile"
|
||||
# redirect_uri: "https://your-app.com/v1/user/oauth/callback/oidc"
|
||||
# github:
|
||||
# client_id: xxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# url: https://github.com/login/oauth/access_token
|
||||
# feishu:
|
||||
# app_id: cli_xxxxxxxxxxxxxxxxxxx
|
||||
# app_secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# app_access_token_url: https://open.feishu.cn/open-apis/auth/v3/app_access_token/internal
|
||||
# user_access_token_url: https://open.feishu.cn/open-apis/authen/v1/oidc/access_token
|
||||
# grant_type: 'authorization_code'
|
||||
# type: "github"
|
||||
# icon: "github"
|
||||
# display_name: "Github"
|
||||
# client_id: "your_client_id"
|
||||
# client_secret: "your_client_secret"
|
||||
# redirect_uri: "https://your-app.com/v1/user/oauth/callback/github"
|
||||
# authentication:
|
||||
# client:
|
||||
# switch: false
|
||||
|
||||
@ -69,7 +69,7 @@ class RAGFlowDocxParser:
|
||||
max_type = max(max_type.items(), key=lambda x: x[1])[0]
|
||||
|
||||
colnm = len(df.iloc[0, :])
|
||||
hdrows = [0] # header is not nessesarily appear in the first line
|
||||
hdrows = [0] # header is not necessarily appear in the first line
|
||||
if max_type == "Nu":
|
||||
for r in range(1, len(df)):
|
||||
tys = Counter([blockType(str(df.iloc[r, j]))
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from PIL import Image
|
||||
|
||||
@ -21,11 +21,18 @@ from rag.app.picture import vision_llm_chunk as picture_vision_llm_chunk
|
||||
from rag.prompts import vision_llm_figure_describe_prompt
|
||||
|
||||
|
||||
def vision_figure_parser_figure_data_wraper(figures_data_without_positions):
|
||||
return [(
|
||||
(figure_data[1], [figure_data[0]]),
|
||||
[(0, 0, 0, 0, 0)]
|
||||
) for figure_data in figures_data_without_positions if isinstance(figure_data[1], Image.Image)]
|
||||
def vision_figure_parser_figure_data_wrapper(figures_data_without_positions):
|
||||
return [
|
||||
(
|
||||
(figure_data[1], [figure_data[0]]),
|
||||
[(0, 0, 0, 0, 0)],
|
||||
)
|
||||
for figure_data in figures_data_without_positions
|
||||
if isinstance(figure_data[1], Image.Image)
|
||||
]
|
||||
|
||||
|
||||
shared_executor = ThreadPoolExecutor(max_workers=10)
|
||||
|
||||
|
||||
class VisionFigureParser:
|
||||
@ -42,14 +49,14 @@ class VisionFigureParser:
|
||||
|
||||
for item in figures_data:
|
||||
# position
|
||||
if len(item) == 2 and isinstance(item[1], list) and len(item[1]) == 1 and isinstance(item[1][0], tuple) and len(item[1][0]) == 5:
|
||||
if len(item) == 2 and isinstance(item[0], tuple) and len(item[0]) == 2 and isinstance(item[1], list) and isinstance(item[1][0], tuple) and len(item[1][0]) == 5:
|
||||
img_desc = item[0]
|
||||
assert len(img_desc) == 2 and isinstance(img_desc[0], Image.Image) and isinstance(img_desc[1], list), "Should be (figure, [description])"
|
||||
self.figures.append(img_desc[0])
|
||||
self.descriptions.append(img_desc[1])
|
||||
self.positions.append(item[1])
|
||||
else:
|
||||
assert len(item) == 2 and isinstance(item, tuple) and isinstance(item[1], list), f"get {len(item)=}, {item=}"
|
||||
assert len(item) == 2 and isinstance(item[0], Image.Image) and isinstance(item[1], list), f"Unexpected form of figure data: get {len(item)=}, {item=}"
|
||||
self.figures.append(item[0])
|
||||
self.descriptions.append(item[1])
|
||||
|
||||
@ -73,16 +80,21 @@ class VisionFigureParser:
|
||||
def __call__(self, **kwargs):
|
||||
callback = kwargs.get("callback", lambda prog, msg: None)
|
||||
|
||||
for idx, img_binary in enumerate(self.figures or []):
|
||||
figure_num = idx # 0-based
|
||||
|
||||
txt = picture_vision_llm_chunk(
|
||||
binary=img_binary,
|
||||
def process(figure_idx, figure_binary):
|
||||
description_text = picture_vision_llm_chunk(
|
||||
binary=figure_binary,
|
||||
vision_model=self.vision_model,
|
||||
prompt=vision_llm_figure_describe_prompt(),
|
||||
callback=callback,
|
||||
)
|
||||
return figure_idx, description_text
|
||||
|
||||
futures = []
|
||||
for idx, img_binary in enumerate(self.figures or []):
|
||||
futures.append(shared_executor.submit(process, idx, img_binary))
|
||||
|
||||
for future in as_completed(futures):
|
||||
figure_num, txt = future.result()
|
||||
if txt:
|
||||
self.descriptions[figure_num] = txt + "\n".join(self.descriptions[figure_num])
|
||||
|
||||
|
||||
@ -61,7 +61,7 @@ class RAGFlowPdfParser:
|
||||
|
||||
self.ocr = OCR()
|
||||
self.parallel_limiter = None
|
||||
if PARALLEL_DEVICES is not None and PARALLEL_DEVICES > 1:
|
||||
if PARALLEL_DEVICES > 1:
|
||||
self.parallel_limiter = [trio.CapacityLimiter(1) for _ in range(PARALLEL_DEVICES)]
|
||||
|
||||
if hasattr(self, "model_speciess"):
|
||||
@ -180,13 +180,13 @@ class RAGFlowPdfParser:
|
||||
return fea
|
||||
|
||||
@staticmethod
|
||||
def sort_X_by_page(arr, threashold):
|
||||
def sort_X_by_page(arr, threshold):
|
||||
# sort using y1 first and then x1
|
||||
arr = sorted(arr, key=lambda r: (r["page_number"], r["x0"], r["top"]))
|
||||
for i in range(len(arr) - 1):
|
||||
for j in range(i, -1, -1):
|
||||
# restore the order using th
|
||||
if abs(arr[j + 1]["x0"] - arr[j]["x0"]) < threashold \
|
||||
if abs(arr[j + 1]["x0"] - arr[j]["x0"]) < threshold \
|
||||
and arr[j + 1]["top"] < arr[j]["top"] \
|
||||
and arr[j + 1]["page_number"] == arr[j]["page_number"]:
|
||||
tmp = arr[j]
|
||||
@ -264,13 +264,13 @@ class RAGFlowPdfParser:
|
||||
for b in self.boxes:
|
||||
if b.get("layout_type", "") != "table":
|
||||
continue
|
||||
ii = Recognizer.find_overlapped_with_threashold(b, rows, thr=0.3)
|
||||
ii = Recognizer.find_overlapped_with_threshold(b, rows, thr=0.3)
|
||||
if ii is not None:
|
||||
b["R"] = ii
|
||||
b["R_top"] = rows[ii]["top"]
|
||||
b["R_bott"] = rows[ii]["bottom"]
|
||||
|
||||
ii = Recognizer.find_overlapped_with_threashold(
|
||||
ii = Recognizer.find_overlapped_with_threshold(
|
||||
b, headers, thr=0.3)
|
||||
if ii is not None:
|
||||
b["H_top"] = headers[ii]["top"]
|
||||
@ -285,7 +285,7 @@ class RAGFlowPdfParser:
|
||||
b["C_left"] = clmns[ii]["x0"]
|
||||
b["C_right"] = clmns[ii]["x1"]
|
||||
|
||||
ii = Recognizer.find_overlapped_with_threashold(b, spans, thr=0.3)
|
||||
ii = Recognizer.find_overlapped_with_threshold(b, spans, thr=0.3)
|
||||
if ii is not None:
|
||||
b["H_top"] = spans[ii]["top"]
|
||||
b["H_bott"] = spans[ii]["bottom"]
|
||||
@ -307,13 +307,13 @@ class RAGFlowPdfParser:
|
||||
[{"x0": b[0][0] / ZM, "x1": b[1][0] / ZM,
|
||||
"top": b[0][1] / ZM, "text": "", "txt": t,
|
||||
"bottom": b[-1][1] / ZM,
|
||||
"chars": [],
|
||||
"page_number": pagenum} for b, t in bxs if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1]],
|
||||
self.mean_height[-1] / 3
|
||||
self.mean_height[pagenum-1] / 3
|
||||
)
|
||||
|
||||
# merge chars in the same rect
|
||||
for c in Recognizer.sort_Y_firstly(
|
||||
chars, self.mean_height[pagenum - 1] // 4):
|
||||
for c in chars:
|
||||
ii = Recognizer.find_overlapped(c, bxs)
|
||||
if ii is None:
|
||||
self.lefted_chars.append(c)
|
||||
@ -323,11 +323,20 @@ class RAGFlowPdfParser:
|
||||
if abs(ch - bh) / max(ch, bh) >= 0.7 and c["text"] != ' ':
|
||||
self.lefted_chars.append(c)
|
||||
continue
|
||||
if c["text"] == " " and bxs[ii]["text"]:
|
||||
if re.match(r"[0-9a-zA-Zа-яА-Я,.?;:!%%]", bxs[ii]["text"][-1]):
|
||||
bxs[ii]["text"] += " "
|
||||
else:
|
||||
bxs[ii]["text"] += c["text"]
|
||||
bxs[ii]["chars"].append(c)
|
||||
|
||||
for b in bxs:
|
||||
if not b["chars"]:
|
||||
del b["chars"]
|
||||
continue
|
||||
m_ht = np.mean([c["height"] for c in b["chars"]])
|
||||
for c in Recognizer.sort_Y_firstly(b["chars"], m_ht):
|
||||
if c["text"] == " " and b["text"]:
|
||||
if re.match(r"[0-9a-zA-Zа-яА-Я,.?;:!%%]", b["text"][-1]):
|
||||
b["text"] += " "
|
||||
else:
|
||||
b["text"] += c["text"]
|
||||
del b["chars"]
|
||||
|
||||
logging.info(f"__ocr sorting {len(chars)} chars cost {timer() - start}s")
|
||||
start = timer()
|
||||
@ -346,8 +355,8 @@ class RAGFlowPdfParser:
|
||||
del boxes_to_reg[i]["box_image"]
|
||||
logging.info(f"__ocr recognize {len(bxs)} boxes cost {timer() - start}s")
|
||||
bxs = [b for b in bxs if b["text"]]
|
||||
if self.mean_height[-1] == 0:
|
||||
self.mean_height[-1] = np.median([b["bottom"] - b["top"]
|
||||
if self.mean_height[pagenum-1] == 0:
|
||||
self.mean_height[pagenum-1] = np.median([b["bottom"] - b["top"]
|
||||
for b in bxs])
|
||||
self.boxes.append(bxs)
|
||||
|
||||
@ -1006,7 +1015,7 @@ class RAGFlowPdfParser:
|
||||
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||
with (pdfplumber.open(fnm) if isinstance(fnm, str) else pdfplumber.open(BytesIO(fnm))) as pdf:
|
||||
self.pdf = pdf
|
||||
self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
||||
self.page_images = [p.to_image(resolution=72 * zoomin, antialias=True).annotated for i, p in
|
||||
enumerate(self.pdf.pages[page_from:page_to])]
|
||||
|
||||
try:
|
||||
|
||||
@ -63,7 +63,7 @@ class RAGFlowPptParser:
|
||||
if shape_type == 6:
|
||||
texts = []
|
||||
for p in sorted(shape.shapes, key=lambda x: (x.top // 10, x.left)):
|
||||
t = self.__extract_texts(p)
|
||||
t = self.__extract(p)
|
||||
if t:
|
||||
texts.append(t)
|
||||
return "\n".join(texts)
|
||||
|
||||
@ -53,14 +53,14 @@ def corpNorm(nm, add_region=True):
|
||||
nm = re.sub(r"&", "&", nm)
|
||||
nm = re.sub(r"[\(\)()\+'\"\t \*\\【】-]+", " ", nm)
|
||||
nm = re.sub(
|
||||
r"([—-]+.*| +co\..*|corp\..*| +inc\..*| +ltd.*)", "", nm, 10000, re.IGNORECASE
|
||||
r"([—-]+.*| +co\..*|corp\..*| +inc\..*| +ltd.*)", "", nm, count=10000, flags=re.IGNORECASE
|
||||
)
|
||||
nm = re.sub(
|
||||
r"(计算机|技术|(技术|科技|网络)*有限公司|公司|有限|研发中心|中国|总部)$",
|
||||
"",
|
||||
nm,
|
||||
10000,
|
||||
re.IGNORECASE,
|
||||
count=10000,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
if not nm or (len(nm) < 5 and not regions.isName(nm[0:2])):
|
||||
return nm
|
||||
|
||||
@ -51,7 +51,7 @@ PY = Pinyin()
|
||||
|
||||
|
||||
def rmHtmlTag(line):
|
||||
return re.sub(r"<[a-z0-9.\"=';,:\+_/ -]+>", " ", line, 100000, re.IGNORECASE)
|
||||
return re.sub(r"<[a-z0-9.\"=';,:\+_/ -]+>", " ", line, count=100000, flags=re.IGNORECASE)
|
||||
|
||||
|
||||
def highest_degree(dg):
|
||||
@ -507,7 +507,7 @@ def parse(cv):
|
||||
(r".*国有.*", "国企"),
|
||||
(r"[ ()\(\)人/·0-9-]+", ""),
|
||||
(r".*(元|规模|于|=|北京|上海|至今|中国|工资|州|shanghai|强|餐饮|融资|职).*", "")]:
|
||||
cv["corporation_type"] = re.sub(p, r, cv["corporation_type"], 1000, re.IGNORECASE)
|
||||
cv["corporation_type"] = re.sub(p, r, cv["corporation_type"], count=1000, flags=re.IGNORECASE)
|
||||
if len(cv["corporation_type"]) < 2:
|
||||
del cv["corporation_type"]
|
||||
|
||||
|
||||
@ -106,7 +106,7 @@ class LayoutRecognizer(Recognizer):
|
||||
bxs.pop(i)
|
||||
continue
|
||||
|
||||
ii = self.find_overlapped_with_threashold(bxs[i], lts_,
|
||||
ii = self.find_overlapped_with_threshold(bxs[i], lts_,
|
||||
thr=0.4)
|
||||
if ii is None: # belong to nothing
|
||||
bxs[i]["layout_type"] = ""
|
||||
|
||||
@ -529,31 +529,30 @@ class OCR:
|
||||
"rag/res/deepdoc")
|
||||
|
||||
# Append muti-gpus task to the list
|
||||
if PARALLEL_DEVICES is not None and PARALLEL_DEVICES > 0:
|
||||
if PARALLEL_DEVICES > 0:
|
||||
self.text_detector = []
|
||||
self.text_recognizer = []
|
||||
for device_id in range(PARALLEL_DEVICES):
|
||||
self.text_detector.append(TextDetector(model_dir, device_id))
|
||||
self.text_recognizer.append(TextRecognizer(model_dir, device_id))
|
||||
else:
|
||||
self.text_detector = [TextDetector(model_dir, 0)]
|
||||
self.text_recognizer = [TextRecognizer(model_dir, 0)]
|
||||
self.text_detector = [TextDetector(model_dir)]
|
||||
self.text_recognizer = [TextRecognizer(model_dir)]
|
||||
|
||||
except Exception:
|
||||
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
|
||||
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
|
||||
local_dir_use_symlinks=False)
|
||||
|
||||
if PARALLEL_DEVICES is not None:
|
||||
assert PARALLEL_DEVICES > 0, "Number of devices must be >= 1"
|
||||
if PARALLEL_DEVICES > 0:
|
||||
self.text_detector = []
|
||||
self.text_recognizer = []
|
||||
for device_id in range(PARALLEL_DEVICES):
|
||||
self.text_detector.append(TextDetector(model_dir, device_id))
|
||||
self.text_recognizer.append(TextRecognizer(model_dir, device_id))
|
||||
else:
|
||||
self.text_detector = [TextDetector(model_dir, 0)]
|
||||
self.text_recognizer = [TextRecognizer(model_dir, 0)]
|
||||
self.text_detector = [TextDetector(model_dir)]
|
||||
self.text_recognizer = [TextRecognizer(model_dir)]
|
||||
|
||||
self.drop_score = 0.5
|
||||
self.crop_image_res_index = 0
|
||||
@ -589,7 +588,29 @@ class OCR:
|
||||
flags=cv2.INTER_CUBIC)
|
||||
dst_img_height, dst_img_width = dst_img.shape[0:2]
|
||||
if dst_img_height * 1.0 / dst_img_width >= 1.5:
|
||||
dst_img = np.rot90(dst_img)
|
||||
# Try original orientation
|
||||
rec_result = self.text_recognizer[0]([dst_img])
|
||||
text, score = rec_result[0][0]
|
||||
best_score = score
|
||||
best_img = dst_img
|
||||
|
||||
# Try clockwise 90° rotation
|
||||
rotated_cw = np.rot90(dst_img, k=3)
|
||||
rec_result = self.text_recognizer[0]([rotated_cw])
|
||||
rotated_cw_text, rotated_cw_score = rec_result[0][0]
|
||||
if rotated_cw_score > best_score:
|
||||
best_score = rotated_cw_score
|
||||
best_img = rotated_cw
|
||||
|
||||
# Try counter-clockwise 90° rotation
|
||||
rotated_ccw = np.rot90(dst_img, k=1)
|
||||
rec_result = self.text_recognizer[0]([rotated_ccw])
|
||||
rotated_ccw_text, rotated_ccw_score = rec_result[0][0]
|
||||
if rotated_ccw_score > best_score:
|
||||
best_img = rotated_ccw
|
||||
|
||||
# Use the best image
|
||||
dst_img = best_img
|
||||
return dst_img
|
||||
|
||||
def sorted_boxes(self, dt_boxes):
|
||||
|
||||
@ -52,20 +52,20 @@ class Recognizer:
|
||||
self.label_list = label_list
|
||||
|
||||
@staticmethod
|
||||
def sort_Y_firstly(arr, threashold):
|
||||
def sort_Y_firstly(arr, threshold):
|
||||
def cmp(c1, c2):
|
||||
diff = c1["top"] - c2["top"]
|
||||
if abs(diff) < threashold:
|
||||
if abs(diff) < threshold:
|
||||
diff = c1["x0"] - c2["x0"]
|
||||
return diff
|
||||
arr = sorted(arr, key=cmp_to_key(cmp))
|
||||
return arr
|
||||
|
||||
@staticmethod
|
||||
def sort_X_firstly(arr, threashold):
|
||||
def sort_X_firstly(arr, threshold):
|
||||
def cmp(c1, c2):
|
||||
diff = c1["x0"] - c2["x0"]
|
||||
if abs(diff) < threashold:
|
||||
if abs(diff) < threshold:
|
||||
diff = c1["top"] - c2["top"]
|
||||
return diff
|
||||
arr = sorted(arr, key=cmp_to_key(cmp))
|
||||
@ -133,7 +133,7 @@ class Recognizer:
|
||||
|
||||
@staticmethod
|
||||
def layouts_cleanup(boxes, layouts, far=2, thr=0.7):
|
||||
def notOverlapped(a, b):
|
||||
def not_overlapped(a, b):
|
||||
return any([a["x1"] < b["x0"],
|
||||
a["x0"] > b["x1"],
|
||||
a["bottom"] < b["top"],
|
||||
@ -144,7 +144,7 @@ class Recognizer:
|
||||
j = i + 1
|
||||
while j < min(i + far, len(layouts)) \
|
||||
and (layouts[i].get("type", "") != layouts[j].get("type", "")
|
||||
or notOverlapped(layouts[i], layouts[j])):
|
||||
or not_overlapped(layouts[i], layouts[j])):
|
||||
j += 1
|
||||
if j >= min(i + far, len(layouts)):
|
||||
i += 1
|
||||
@ -163,9 +163,9 @@ class Recognizer:
|
||||
|
||||
area_i, area_i_1 = 0, 0
|
||||
for b in boxes:
|
||||
if not notOverlapped(b, layouts[i]):
|
||||
if not not_overlapped(b, layouts[i]):
|
||||
area_i += Recognizer.overlapped_area(b, layouts[i], False)
|
||||
if not notOverlapped(b, layouts[j]):
|
||||
if not not_overlapped(b, layouts[j]):
|
||||
area_i_1 += Recognizer.overlapped_area(b, layouts[j], False)
|
||||
|
||||
if area_i > area_i_1:
|
||||
@ -239,15 +239,15 @@ class Recognizer:
|
||||
e -= 1
|
||||
break
|
||||
|
||||
max_overlaped_i, max_overlaped = None, 0
|
||||
max_overlapped_i, max_overlapped = None, 0
|
||||
for i in range(s, e):
|
||||
ov = Recognizer.overlapped_area(bxs[i], box)
|
||||
if ov <= max_overlaped:
|
||||
if ov <= max_overlapped:
|
||||
continue
|
||||
max_overlaped_i = i
|
||||
max_overlaped = ov
|
||||
max_overlapped_i = i
|
||||
max_overlapped = ov
|
||||
|
||||
return max_overlaped_i
|
||||
return max_overlapped_i
|
||||
|
||||
@staticmethod
|
||||
def find_horizontally_tightest_fit(box, boxes):
|
||||
@ -264,7 +264,7 @@ class Recognizer:
|
||||
return min_i
|
||||
|
||||
@staticmethod
|
||||
def find_overlapped_with_threashold(box, boxes, thr=0.3):
|
||||
def find_overlapped_with_threshold(box, boxes, thr=0.3):
|
||||
if not boxes:
|
||||
return
|
||||
max_overlapped_i, max_overlapped, _max_overlapped = None, thr, 0
|
||||
@ -408,18 +408,18 @@ class Recognizer:
|
||||
|
||||
def __call__(self, image_list, thr=0.7, batch_size=16):
|
||||
res = []
|
||||
imgs = []
|
||||
images = []
|
||||
for i in range(len(image_list)):
|
||||
if not isinstance(image_list[i], np.ndarray):
|
||||
imgs.append(np.array(image_list[i]))
|
||||
images.append(np.array(image_list[i]))
|
||||
else:
|
||||
imgs.append(image_list[i])
|
||||
images.append(image_list[i])
|
||||
|
||||
batch_loop_cnt = math.ceil(float(len(imgs)) / batch_size)
|
||||
batch_loop_cnt = math.ceil(float(len(images)) / batch_size)
|
||||
for i in range(batch_loop_cnt):
|
||||
start_index = i * batch_size
|
||||
end_index = min((i + 1) * batch_size, len(imgs))
|
||||
batch_image_list = imgs[start_index:end_index]
|
||||
end_index = min((i + 1) * batch_size, len(images))
|
||||
batch_image_list = images[start_index:end_index]
|
||||
inputs = self.preprocess(batch_image_list)
|
||||
logging.debug("preprocess")
|
||||
for ins in inputs:
|
||||
|
||||
@ -84,13 +84,13 @@ def get_table_html(img, tb_cpns, ocr):
|
||||
clmns = LayoutRecognizer.layouts_cleanup(boxes, clmns, 5, 0.5)
|
||||
|
||||
for b in boxes:
|
||||
ii = LayoutRecognizer.find_overlapped_with_threashold(b, rows, thr=0.3)
|
||||
ii = LayoutRecognizer.find_overlapped_with_threshold(b, rows, thr=0.3)
|
||||
if ii is not None:
|
||||
b["R"] = ii
|
||||
b["R_top"] = rows[ii]["top"]
|
||||
b["R_bott"] = rows[ii]["bottom"]
|
||||
|
||||
ii = LayoutRecognizer.find_overlapped_with_threashold(b, headers, thr=0.3)
|
||||
ii = LayoutRecognizer.find_overlapped_with_threshold(b, headers, thr=0.3)
|
||||
if ii is not None:
|
||||
b["H_top"] = headers[ii]["top"]
|
||||
b["H_bott"] = headers[ii]["bottom"]
|
||||
@ -104,7 +104,7 @@ def get_table_html(img, tb_cpns, ocr):
|
||||
b["C_left"] = clmns[ii]["x0"]
|
||||
b["C_right"] = clmns[ii]["x1"]
|
||||
|
||||
ii = LayoutRecognizer.find_overlapped_with_threashold(b, spans, thr=0.3)
|
||||
ii = LayoutRecognizer.find_overlapped_with_threshold(b, spans, thr=0.3)
|
||||
if ii is not None:
|
||||
b["H_top"] = spans[ii]["top"]
|
||||
b["H_bott"] = spans[ii]["bottom"]
|
||||
|
||||
104
docker/.env
104
docker/.env
@ -1,7 +1,8 @@
|
||||
# The type of doc engine to use.
|
||||
# Available options:
|
||||
# - `elasticsearch` (default)
|
||||
# - `elasticsearch` (default)
|
||||
# - `infinity` (https://github.com/infiniflow/infinity)
|
||||
# - `opensearch` (https://github.com/opensearch-project/OpenSearch)
|
||||
DOC_ENGINE=${DOC_ENGINE:-elasticsearch}
|
||||
|
||||
# ------------------------------
|
||||
@ -17,14 +18,24 @@ STACK_VERSION=8.11.3
|
||||
# The hostname where the Elasticsearch service is exposed
|
||||
ES_HOST=es01
|
||||
|
||||
# The port used to expose the Elasticsearch service to the host machine,
|
||||
# The port used to expose the Elasticsearch service to the host machine,
|
||||
# allowing EXTERNAL access to the service running inside the Docker container.
|
||||
ES_PORT=1200
|
||||
|
||||
# The password for Elasticsearch.
|
||||
# The password for Elasticsearch.
|
||||
ELASTIC_PASSWORD=infini_rag_flow
|
||||
|
||||
# The port used to expose the Kibana service to the host machine,
|
||||
# the hostname where OpenSearch service is exposed, set it not the same as elasticsearch
|
||||
OS_PORT=1201
|
||||
|
||||
# The hostname where the OpenSearch service is exposed
|
||||
OS_HOST=opensearch01
|
||||
|
||||
# The password for OpenSearch.
|
||||
# At least one uppercase letter, one lowercase letter, one digit, and one special character
|
||||
OPENSEARCH_PASSWORD=infini_rag_flow_OS_01
|
||||
|
||||
# The port used to expose the Kibana service to the host machine,
|
||||
# allowing EXTERNAL access to the service running inside the Docker container.
|
||||
KIBANA_PORT=6601
|
||||
KIBANA_USER=rag_flow
|
||||
@ -42,56 +53,54 @@ INFINITY_THRIFT_PORT=23817
|
||||
INFINITY_HTTP_PORT=23820
|
||||
INFINITY_PSQL_PORT=5432
|
||||
|
||||
# The password for MySQL.
|
||||
# The password for MySQL.
|
||||
MYSQL_PASSWORD=infini_rag_flow
|
||||
# The hostname where the MySQL service is exposed
|
||||
MYSQL_HOST=mysql
|
||||
# The database of the MySQL service to use
|
||||
MYSQL_DBNAME=rag_flow
|
||||
# The port used to expose the MySQL service to the host machine,
|
||||
# allowing EXTERNAL access to the MySQL database running inside the Docker container.
|
||||
# The port used to expose the MySQL service to the host machine,
|
||||
# allowing EXTERNAL access to the MySQL database running inside the Docker container.
|
||||
MYSQL_PORT=5455
|
||||
|
||||
# The hostname where the MinIO service is exposed
|
||||
MINIO_HOST=minio
|
||||
# The port used to expose the MinIO console interface to the host machine,
|
||||
# allowing EXTERNAL access to the web-based console running inside the Docker container.
|
||||
# The port used to expose the MinIO console interface to the host machine,
|
||||
# allowing EXTERNAL access to the web-based console running inside the Docker container.
|
||||
MINIO_CONSOLE_PORT=9001
|
||||
# The port used to expose the MinIO API service to the host machine,
|
||||
# allowing EXTERNAL access to the MinIO object storage service running inside the Docker container.
|
||||
# The port used to expose the MinIO API service to the host machine,
|
||||
# allowing EXTERNAL access to the MinIO object storage service running inside the Docker container.
|
||||
MINIO_PORT=9000
|
||||
# The username for MinIO.
|
||||
# The username for MinIO.
|
||||
# When updated, you must revise the `minio.user` entry in service_conf.yaml accordingly.
|
||||
MINIO_USER=rag_flow
|
||||
# The password for MinIO.
|
||||
# The password for MinIO.
|
||||
# When updated, you must revise the `minio.password` entry in service_conf.yaml accordingly.
|
||||
MINIO_PASSWORD=infini_rag_flow
|
||||
|
||||
# The hostname where the Redis service is exposed
|
||||
REDIS_HOST=redis
|
||||
# The port used to expose the Redis service to the host machine,
|
||||
# The port used to expose the Redis service to the host machine,
|
||||
# allowing EXTERNAL access to the Redis service running inside the Docker container.
|
||||
REDIS_PORT=6379
|
||||
# The password for Redis.
|
||||
REDIS_PASSWORD=infini_rag_flow
|
||||
|
||||
# The port used to expose RAGFlow's HTTP API service to the host machine,
|
||||
# The port used to expose RAGFlow's HTTP API service to the host machine,
|
||||
# allowing EXTERNAL access to the service running inside the Docker container.
|
||||
SVR_HTTP_PORT=9380
|
||||
|
||||
# The RAGFlow Docker image to download.
|
||||
# Defaults to the v0.18.0-slim edition, which is the RAGFlow Docker image without embedding models.
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0-slim
|
||||
# Defaults to the v0.19.0-slim edition, which is the RAGFlow Docker image without embedding models.
|
||||
RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0-slim
|
||||
#
|
||||
# To download the RAGFlow Docker image with embedding models, uncomment the following line instead:
|
||||
# RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0
|
||||
#
|
||||
# The Docker image of the v0.18.0 edition includes built-in embedding models:
|
||||
# RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0
|
||||
#
|
||||
# The Docker image of the v0.19.0 edition includes built-in embedding models:
|
||||
# - BAAI/bge-large-zh-v1.5
|
||||
# - maidalun1020/bce-embedding-base_v1
|
||||
|
||||
#
|
||||
|
||||
#
|
||||
|
||||
# If you cannot download the RAGFlow Docker image:
|
||||
#
|
||||
@ -120,13 +129,21 @@ TIMEZONE='Asia/Shanghai'
|
||||
# Note that neither `MAX_CONTENT_LENGTH` nor `client_max_body_size` sets the maximum size for files uploaded to an agent.
|
||||
# See https://ragflow.io/docs/dev/begin_component for details.
|
||||
|
||||
# The log level for the RAGFlow's owned packages and imported packages.
|
||||
# Available level:
|
||||
# Controls how many documents are processed in a single batch.
|
||||
# Defaults to 4 if DOC_BULK_SIZE is not explicitly set.
|
||||
DOC_BULK_SIZE=${DOC_BULK_SIZE:-4}
|
||||
|
||||
# Defines the number of items to process per batch when generating embeddings.
|
||||
# Defaults to 16 if EMBEDDING_BATCH_SIZE is not set in the environment.
|
||||
EMBEDDING_BATCH_SIZE=${EMBEDDING_BATCH_SIZE:-16}
|
||||
|
||||
# Log level for the RAGFlow's own and imported packages.
|
||||
# Available levels:
|
||||
# - `DEBUG`
|
||||
# - `INFO` (default)
|
||||
# - `WARNING`
|
||||
# - `ERROR`
|
||||
# For example, following line changes the log level of `ragflow.es_conn` to `DEBUG`:
|
||||
# For example, the following line changes the log level of `ragflow.es_conn` to `DEBUG`:
|
||||
# LOG_LEVELS=ragflow.es_conn=DEBUG
|
||||
|
||||
# aliyun OSS configuration
|
||||
@ -137,5 +154,38 @@ TIMEZONE='Asia/Shanghai'
|
||||
# REGION=cn-hangzhou
|
||||
# BUCKET=ragflow65536
|
||||
|
||||
# user registration switch
|
||||
# A user registration switch:
|
||||
# - Enable registration: 1
|
||||
# - Disable registration: 0
|
||||
REGISTER_ENABLED=1
|
||||
|
||||
# Sandbox settings
|
||||
# Important: To enable sandbox, you must re-declare the compose profiles. See hints at the end of file.
|
||||
# Double check if you add `sandbox-executor-manager` to your `/etc/hosts`
|
||||
# Pull the required base images before running:
|
||||
# docker pull infiniflow/sandbox-base-nodejs:latest
|
||||
# docker pull infiniflow/sandbox-base-python:latest
|
||||
# Our default sandbox environments include:
|
||||
# - Node.js base image: includes axios
|
||||
# - Python base image: includes requests, numpy, and pandas
|
||||
# Specify custom executor images below if you're using non-default environments.
|
||||
# SANDBOX_ENABLED=1
|
||||
# SANDBOX_HOST=sandbox-executor-manager
|
||||
# SANDBOX_EXECUTOR_MANAGER_IMAGE=infiniflow/sandbox-executor-manager:latest
|
||||
# SANDBOX_EXECUTOR_MANAGER_POOL_SIZE=3
|
||||
# SANDBOX_BASE_PYTHON_IMAGE=infiniflow/sandbox-base-python:latest
|
||||
# SANDBOX_BASE_NODEJS_IMAGE=infiniflow/sandbox-base-nodejs:latest
|
||||
# SANDBOX_EXECUTOR_MANAGER_PORT=9385
|
||||
# SANDBOX_ENABLE_SECCOMP=false
|
||||
# SANDBOX_MAX_MEMORY=256m # b, k, m, g
|
||||
# SANDBOX_TIMEOUT=10s # s, m, 1m30s
|
||||
|
||||
# Important: To enable sandbox, you must re-declare the compose profiles.
|
||||
# 1. Comment out the COMPOSE_PROFILES line above.
|
||||
# 2. Uncomment one of the following based on your chosen document engine:
|
||||
# - For Elasticsearch:
|
||||
# COMPOSE_PROFILES=elasticsearch,sandbox
|
||||
# - For Infinity:
|
||||
# COMPOSE_PROFILES=infinity,sandbox
|
||||
# - For OpenSearch:
|
||||
# COMPOSE_PROFILES=opensearch,sandbox
|
||||
|
||||
@ -78,8 +78,8 @@ The [.env](./.env) file contains important environment variables for Docker.
|
||||
- `RAGFLOW-IMAGE`
|
||||
The Docker image edition. Available editions:
|
||||
|
||||
- `infiniflow/ragflow:v0.18.0-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.18.0`: The RAGFlow Docker image with embedding models including:
|
||||
- `infiniflow/ragflow:v0.19.1-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.19.1`: The RAGFlow Docker image with embedding models including:
|
||||
- Built-in embedding models:
|
||||
- `BAAI/bge-large-zh-v1.5`
|
||||
- `maidalun1020/bce-embedding-base_v1`
|
||||
@ -115,6 +115,16 @@ The [.env](./.env) file contains important environment variables for Docker.
|
||||
- `MAX_CONTENT_LENGTH`
|
||||
The maximum file size for each uploaded file, in bytes. You can uncomment this line if you wish to change the 128M file size limit. After making the change, ensure you update `client_max_body_size` in nginx/nginx.conf correspondingly.
|
||||
|
||||
### Doc bulk size
|
||||
|
||||
- `DOC_BULK_SIZE`
|
||||
The number of document chunks processed in a single batch during document parsing. Defaults to `4`.
|
||||
|
||||
### Embedding batch size
|
||||
|
||||
- `EMBEDDING_BATCH_SIZE`
|
||||
The number of text chunks processed in a single batch during embedding vectorization. Defaults to `16`.
|
||||
|
||||
## 🐋 Service configuration
|
||||
|
||||
[service_conf.yaml](./service_conf.yaml) specifies the system-level configuration for RAGFlow and is used by its API server and task executor. In a dockerized setup, this file is automatically created based on the [service_conf.yaml.template](./service_conf.yaml.template) file (replacing all environment variables by their values).
|
||||
@ -154,9 +164,20 @@ The [.env](./.env) file contains important environment variables for Docker.
|
||||
- `addressing_style`: Optional. The style of addressing to use for the S3 endpoint. This can be `path` or `virtual`.
|
||||
- `prefix_path`: Optional. A prefix path to prepend to file names in the S3 bucket, which can help organize files within the bucket.
|
||||
|
||||
- `oauth`
|
||||
The OAuth configuration for signing up or signing in to RAGFlow using a third-party account. It is disabled by default. To enable this feature, uncomment the corresponding lines in **service_conf.yaml.template**.
|
||||
- `github`: The GitHub authentication settings for your application. Visit the [Github Developer Settings page](https://github.com/settings/developers) to obtain your client_id and secret_key.
|
||||
- `oauth`
|
||||
The OAuth configuration for signing up or signing in to RAGFlow using a third-party account.
|
||||
- `<channel>`: Custom channel ID.
|
||||
- `type`: Authentication type, options include `oauth2`, `oidc`, `github`. Default is `oauth2`, when `issuer` parameter is provided, defaults to `oidc`.
|
||||
- `icon`: Icon ID, options include `github`, `sso`, default is `sso`.
|
||||
- `display_name`: Channel name, defaults to the Title Case format of the channel ID.
|
||||
- `client_id`: Required, unique identifier assigned to the client application.
|
||||
- `client_secret`: Required, secret key for the client application, used for communication with the authentication server.
|
||||
- `authorization_url`: Base URL for obtaining user authorization.
|
||||
- `token_url`: URL for exchanging authorization code and obtaining access token.
|
||||
- `userinfo_url`: URL for obtaining user information (username, email, etc.).
|
||||
- `issuer`: Base URL of the identity provider. OIDC clients can dynamically obtain the identity provider's metadata (`authorization_url`, `token_url`, `userinfo_url`) through `issuer`.
|
||||
- `scope`: Requested permission scope, a space-separated string. For example, `openid profile email`.
|
||||
- `redirect_uri`: Required, URI to which the authorization server redirects during the authentication flow to return results. Must match the callback URI registered with the authentication server. Format: `https://your-app.com/v1/user/oauth/callback/<channel>`. For local configuration, you can directly use `http://127.0.0.1:80/v1/user/oauth/callback/<channel>`.
|
||||
|
||||
- `user_default_llm`
|
||||
The default LLM to use for a new RAGFlow user. It is disabled by default. To enable this feature, uncomment the corresponding lines in **service_conf.yaml.template**.
|
||||
|
||||
@ -35,6 +35,44 @@ services:
|
||||
- ragflow
|
||||
restart: on-failure
|
||||
|
||||
opensearch01:
|
||||
container_name: ragflow-opensearch-01
|
||||
profiles:
|
||||
- opensearch
|
||||
image: hub.icert.top/opensearchproject/opensearch:2.19.1
|
||||
volumes:
|
||||
- osdata01:/usr/share/opensearch/data
|
||||
ports:
|
||||
- ${OS_PORT}:9201
|
||||
env_file: .env
|
||||
environment:
|
||||
- node.name=opensearch01
|
||||
- OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD}
|
||||
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_PASSWORD}
|
||||
- bootstrap.memory_lock=false
|
||||
- discovery.type=single-node
|
||||
- plugins.security.disabled=false
|
||||
- plugins.security.ssl.http.enabled=false
|
||||
- plugins.security.ssl.transport.enabled=true
|
||||
- cluster.routing.allocation.disk.watermark.low=5gb
|
||||
- cluster.routing.allocation.disk.watermark.high=3gb
|
||||
- cluster.routing.allocation.disk.watermark.flood_stage=2gb
|
||||
- TZ=${TIMEZONE}
|
||||
- http.port=9201
|
||||
mem_limit: ${MEM_LIMIT}
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl http://localhost:9201"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 120
|
||||
networks:
|
||||
- ragflow
|
||||
restart: on-failure
|
||||
|
||||
infinity:
|
||||
container_name: ragflow-infinity
|
||||
profiles:
|
||||
@ -65,6 +103,35 @@ services:
|
||||
retries: 120
|
||||
restart: on-failure
|
||||
|
||||
sandbox-executor-manager:
|
||||
container_name: ragflow-sandbox-executor-manager
|
||||
profiles:
|
||||
- sandbox
|
||||
image: ${SANDBOX_EXECUTOR_MANAGER_IMAGE}
|
||||
privileged: true
|
||||
ports:
|
||||
- ${SANDBOX_EXECUTOR_MANAGER_PORT}:9385
|
||||
env_file: .env
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
networks:
|
||||
- ragflow
|
||||
security_opt:
|
||||
- no-new-privileges:true
|
||||
environment:
|
||||
- TZ=${TIMEZONE}
|
||||
- SANDBOX_EXECUTOR_MANAGER_POOL_SIZE=${SANDBOX_EXECUTOR_MANAGER_POOL_SIZE:-3}
|
||||
- SANDBOX_BASE_PYTHON_IMAGE=${SANDBOX_BASE_PYTHON_IMAGE:-infiniflow/sandbox-base-python:latest}
|
||||
- SANDBOX_BASE_NODEJS_IMAGE=${SANDBOX_BASE_NODEJS_IMAGE:-infiniflow/sandbox-base-nodejs:latest}
|
||||
- SANDBOX_ENABLE_SECCOMP=${SANDBOX_ENABLE_SECCOMP:-false}
|
||||
- SANDBOX_MAX_MEMORY=${SANDBOX_MAX_MEMORY:-256m}
|
||||
- SANDBOX_TIMEOUT=${SANDBOX_TIMEOUT:-10s}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "http://localhost:9385/healthz"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
restart: on-failure
|
||||
|
||||
mysql:
|
||||
# mysql:5.7 linux/arm64 image is unavailable.
|
||||
@ -133,6 +200,8 @@ services:
|
||||
volumes:
|
||||
esdata01:
|
||||
driver: local
|
||||
osdata01:
|
||||
driver: local
|
||||
infinity_data:
|
||||
driver: local
|
||||
mysql_data:
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
|
||||
include:
|
||||
- ./docker-compose-base.yml
|
||||
|
||||
@ -8,7 +9,7 @@ services:
|
||||
mysql:
|
||||
condition: service_healthy
|
||||
image: ${RAGFLOW_IMAGE}
|
||||
# example to setup MCP server
|
||||
# Example configuration to set up an MCP server:
|
||||
# command:
|
||||
# - --enable-mcpserver
|
||||
# - --mcp-host=0.0.0.0
|
||||
@ -16,7 +17,7 @@ services:
|
||||
# - --mcp-base-url=http://127.0.0.1:9380
|
||||
# - --mcp-script-path=/ragflow/mcp/server/server.py
|
||||
# - --mcp-mode=self-host
|
||||
# - --mcp--host-api-key="ragflow-xxxxxxx"
|
||||
# - --mcp-host-api-key=ragflow-xxxxxxx
|
||||
container_name: ragflow-server
|
||||
ports:
|
||||
- ${SVR_HTTP_PORT}:9380
|
||||
@ -24,7 +25,7 @@ services:
|
||||
- 443:443
|
||||
- 5678:5678
|
||||
- 5679:5679
|
||||
- 9382:9382 # entry for MCP (host_port:docker_port). The docker_port should match with the value you set for `mcp-port` above
|
||||
- 9382:9382 # entry for MCP (host_port:docker_port). The docker_port must match the value you set for `mcp-port` above.
|
||||
volumes:
|
||||
- ./ragflow-logs:/ragflow/logs
|
||||
- ./nginx/ragflow.conf:/etc/nginx/conf.d/ragflow.conf
|
||||
@ -32,6 +33,7 @@ services:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
|
||||
- ../history_data_agent:/ragflow/history_data_agent
|
||||
- ./service_conf.yaml.template:/ragflow/conf/service_conf.yaml.template
|
||||
- ./entrypoint.sh:/ragflow/entrypoint.sh
|
||||
|
||||
env_file: .env
|
||||
environment:
|
||||
@ -42,7 +44,7 @@ services:
|
||||
- ragflow
|
||||
restart: on-failure
|
||||
# https://docs.docker.com/engine/daemon/prometheus/#create-a-prometheus-configuration
|
||||
# If you're using Docker Desktop, the --add-host flag is optional. This flag makes sure that the host's internal IP gets exposed to the Prometheus container.
|
||||
# If you use Docker Desktop, the --add-host flag is optional. This flag ensures that the host's internal IP is exposed to the Prometheus container.
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
# executor:
|
||||
|
||||
2
docker/entrypoint.sh
Normal file → Executable file
2
docker/entrypoint.sh
Normal file → Executable file
@ -150,7 +150,7 @@ function start_mcp_server() {
|
||||
--port="${MCP_PORT}" \
|
||||
--base_url="${MCP_BASE_URL}" \
|
||||
--mode="${MCP_MODE}" \
|
||||
--api_key="${MCP_HOST_API_KEY}" \ &
|
||||
--api_key="${MCP_HOST_API_KEY}" &
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@ -47,6 +47,9 @@ STOP=false
|
||||
# Array to keep track of child PIDs
|
||||
PIDS=()
|
||||
|
||||
# Set the path to the NLTK data directory
|
||||
export NLTK_DATA="./nltk_data"
|
||||
|
||||
# Function to handle termination signals
|
||||
cleanup() {
|
||||
echo "Termination signal received. Shutting down..."
|
||||
|
||||
@ -7,8 +7,8 @@ mysql:
|
||||
password: '${MYSQL_PASSWORD:-infini_rag_flow}'
|
||||
host: '${MYSQL_HOST:-mysql}'
|
||||
port: 3306
|
||||
max_connections: 100
|
||||
stale_timeout: 30
|
||||
max_connections: 900
|
||||
stale_timeout: 300
|
||||
minio:
|
||||
user: '${MINIO_USER:-rag_flow}'
|
||||
password: '${MINIO_PASSWORD:-infini_rag_flow}'
|
||||
@ -17,6 +17,10 @@ es:
|
||||
hosts: 'http://${ES_HOST:-es01}:9200'
|
||||
username: '${ES_USER:-elastic}'
|
||||
password: '${ELASTIC_PASSWORD:-infini_rag_flow}'
|
||||
os:
|
||||
hosts: 'http://${OS_HOST:-opensearch01}:9201'
|
||||
username: '${OS_USER:-admin}'
|
||||
password: '${OPENSEARCH_PASSWORD:-infini_rag_flow_OS_01}'
|
||||
infinity:
|
||||
uri: '${INFINITY_HOST:-infinity}:23817'
|
||||
db_name: 'default_db'
|
||||
@ -71,16 +75,28 @@ redis:
|
||||
# asr_model: ''
|
||||
# image2text_model: ''
|
||||
# oauth:
|
||||
# oauth2:
|
||||
# display_name: "OAuth2"
|
||||
# client_id: "your_client_id"
|
||||
# client_secret: "your_client_secret"
|
||||
# authorization_url: "https://your-oauth-provider.com/oauth/authorize"
|
||||
# token_url: "https://your-oauth-provider.com/oauth/token"
|
||||
# userinfo_url: "https://your-oauth-provider.com/oauth/userinfo"
|
||||
# redirect_uri: "https://your-app.com/v1/user/oauth/callback/oauth2"
|
||||
# oidc:
|
||||
# display_name: "OIDC"
|
||||
# client_id: "your_client_id"
|
||||
# client_secret: "your_client_secret"
|
||||
# issuer: "https://your-oauth-provider.com/oidc"
|
||||
# scope: "openid email profile"
|
||||
# redirect_uri: "https://your-app.com/v1/user/oauth/callback/oidc"
|
||||
# github:
|
||||
# client_id: xxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# url: https://github.com/login/oauth/access_token
|
||||
# feishu:
|
||||
# app_id: cli_xxxxxxxxxxxxxxxxxxx
|
||||
# app_secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
# app_access_token_url: https://open.feishu.cn/open-apis/auth/v3/app_access_token/internal
|
||||
# user_access_token_url: https://open.feishu.cn/open-apis/authen/v1/oidc/access_token
|
||||
# grant_type: 'authorization_code'
|
||||
# type: "github"
|
||||
# icon: "github"
|
||||
# display_name: "Github"
|
||||
# client_id: "your_client_id"
|
||||
# client_secret: "your_client_secret"
|
||||
# redirect_uri: "https://your-app.com/v1/user/oauth/callback/github"
|
||||
# authentication:
|
||||
# client:
|
||||
# switch: false
|
||||
|
||||
@ -99,8 +99,8 @@ RAGFlow utilizes MinIO as its object storage solution, leveraging its scalabilit
|
||||
- `RAGFLOW-IMAGE`
|
||||
The Docker image edition. Available editions:
|
||||
|
||||
- `infiniflow/ragflow:v0.18.0-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.18.0`: The RAGFlow Docker image with embedding models including:
|
||||
- `infiniflow/ragflow:v0.19.1-slim` (default): The RAGFlow Docker image without embedding models.
|
||||
- `infiniflow/ragflow:v0.19.1`: The RAGFlow Docker image with embedding models including:
|
||||
- Built-in embedding models:
|
||||
- `BAAI/bge-large-zh-v1.5`
|
||||
- `maidalun1020/bce-embedding-base_v1`
|
||||
@ -132,6 +132,12 @@ If you cannot download the RAGFlow Docker image, try the following mirrors.
|
||||
- `MACOS`
|
||||
Optimizations for macOS. It is disabled by default. You can uncomment this line if your OS is macOS.
|
||||
|
||||
### User registration
|
||||
|
||||
- `REGISTER_ENABLED`
|
||||
- `1`: (Default) Enable user registration.
|
||||
- `0`: Disable user registration.
|
||||
|
||||
## Service configuration
|
||||
|
||||
[service_conf.yaml.template](https://github.com/infiniflow/ragflow/blob/main/docker/service_conf.yaml.template) specifies the system-level configuration for RAGFlow and is used by its API server and task executor.
|
||||
@ -158,9 +164,52 @@ If you cannot download the RAGFlow Docker image, try the following mirrors.
|
||||
|
||||
### `oauth`
|
||||
|
||||
The OAuth configuration for signing up or signing in to RAGFlow using a third-party account. It is disabled by default. To enable this feature, uncomment the corresponding lines in **service_conf.yaml.template**.
|
||||
The OAuth configuration for signing up or signing in to RAGFlow using a third-party account.
|
||||
|
||||
- `github`: The GitHub authentication settings for your application. Visit the [GitHub Developer Settings](https://github.com/settings/developers) page to obtain your client_id and secret_key.
|
||||
- `<channel>`: Custom channel ID.
|
||||
- `type`: Authentication type, options include `oauth2`, `oidc`, `github`. Default is `oauth2`, when `issuer` parameter is provided, defaults to `oidc`.
|
||||
- `icon`: Icon ID, options include `github`, `sso`, default is `sso`.
|
||||
- `display_name`: Channel name, defaults to the Title Case format of the channel ID.
|
||||
- `client_id`: Required, unique identifier assigned to the client application.
|
||||
- `client_secret`: Required, secret key for the client application, used for communication with the authentication server.
|
||||
- `authorization_url`: Base URL for obtaining user authorization.
|
||||
- `token_url`: URL for exchanging authorization code and obtaining access token.
|
||||
- `userinfo_url`: URL for obtaining user information (username, email, etc.).
|
||||
- `issuer`: Base URL of the identity provider. OIDC clients can dynamically obtain the identity provider's metadata (`authorization_url`, `token_url`, `userinfo_url`) through `issuer`.
|
||||
- `scope`: Requested permission scope, a space-separated string. For example, `openid profile email`.
|
||||
- `redirect_uri`: Required, URI to which the authorization server redirects during the authentication flow to return results. Must match the callback URI registered with the authentication server. Format: `https://your-app.com/v1/user/oauth/callback/<channel>`. For local configuration, you can directly use `http://127.0.0.1:80/v1/user/oauth/callback/<channel>`.
|
||||
|
||||
:::tip NOTE
|
||||
The following are best practices for configuring various third-party authentication methods. You can configure one or multiple third-party authentication methods for Ragflow:
|
||||
```yaml
|
||||
oauth:
|
||||
oauth2:
|
||||
display_name: "OAuth2"
|
||||
client_id: "your_client_id"
|
||||
client_secret: "your_client_secret"
|
||||
authorization_url: "https://your-oauth-provider.com/oauth/authorize"
|
||||
token_url: "https://your-oauth-provider.com/oauth/token"
|
||||
userinfo_url: "https://your-oauth-provider.com/oauth/userinfo"
|
||||
redirect_uri: "https://your-app.com/v1/user/oauth/callback/oauth2"
|
||||
|
||||
oidc:
|
||||
display_name: "OIDC"
|
||||
client_id: "your_client_id"
|
||||
client_secret: "your_client_secret"
|
||||
issuer: "https://your-oauth-provider.com/oidc"
|
||||
scope: "openid email profile"
|
||||
redirect_uri: "https://your-app.com/v1/user/oauth/callback/oidc"
|
||||
|
||||
github:
|
||||
# https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/creating-an-oauth-app
|
||||
type: "github"
|
||||
icon: "github"
|
||||
display_name: "Github"
|
||||
client_id: "your_client_id"
|
||||
client_secret: "your_client_secret"
|
||||
redirect_uri: "https://your-app.com/v1/user/oauth/callback/github"
|
||||
```
|
||||
:::
|
||||
|
||||
### `user_default_llm`
|
||||
|
||||
|
||||
8
docs/contribution/_category_.json
Normal file
8
docs/contribution/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Contribution",
|
||||
"position": 8,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Miscellaneous contribution guides."
|
||||
}
|
||||
}
|
||||
@ -1,5 +1,14 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
slug: /contributing
|
||||
---
|
||||
|
||||
# Contribution guidelines
|
||||
|
||||
General guidelines for RAGFlow's community contributors.
|
||||
|
||||
---
|
||||
|
||||
This document offers guidelines and major considerations for submitting your contributions to RAGFlow.
|
||||
|
||||
- To report a bug, file a [GitHub issue](https://github.com/infiniflow/ragflow/issues/new/choose) with us.
|
||||
@ -1,11 +1,11 @@
|
||||
---
|
||||
sidebar_position: 3
|
||||
sidebar_position: 4
|
||||
slug: /acquire_ragflow_api_key
|
||||
---
|
||||
|
||||
# Acquire RAGFlow API key
|
||||
|
||||
A key is required for the RAGFlow server to authenticate your requests via HTTP or a Python API. This documents provides instructions on obtaining a RAGFlow API key.
|
||||
An API key is required for the RAGFlow server to authenticate your HTTP/Python or MCP requests. This documents provides instructions on obtaining a RAGFlow API key.
|
||||
|
||||
1. Click your avatar in the top right corner of the RAGFlow UI to access the configuration page.
|
||||
2. Click **API** to switch to the **API** page.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user