mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
617 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a5cf6fc546 | |||
| 57fe5d0864 | |||
| bfdc4944a3 | |||
| a45ba3a91e | |||
| e513ad2f16 | |||
| 1fdad50dac | |||
| 4764ca5ef7 | |||
| 85f3d92816 | |||
| 742eef028f | |||
| dfbdeaddaf | |||
| 50c2b9d562 | |||
| f8cef73244 | |||
| f8c9ec4d56 | |||
| db74a3ef34 | |||
| 00f99ecbd5 | |||
| 0a3c6fff7c | |||
| 79e435fc2e | |||
| 163c2a70fc | |||
| bedc09f69c | |||
| 251592eeeb | |||
| 09436f6c60 | |||
| e8b4e8b3d7 | |||
| 000cd6d615 | |||
| 1d65299791 | |||
| bcccaccc2b | |||
| fddac1345d | |||
| 4a95349492 | |||
| 0fcb564261 | |||
| 96667696d2 | |||
| ce1e855328 | |||
| b5e4a5563c | |||
| 1053ef5551 | |||
| cb6e9ce164 | |||
| 8ea631a2a0 | |||
| 7fb67c4f67 | |||
| 44ac87aef4 | |||
| 7ddccbb952 | |||
| 4a7bc4df92 | |||
| 3b7d182720 | |||
| 78527acd88 | |||
| e5c3083826 | |||
| 9b9039de92 | |||
| 9b2ef62aee | |||
| 86507af770 | |||
| 93635674c3 | |||
| 1defe0b19b | |||
| 0bca46ac3a | |||
| 1ecb687c51 | |||
| 68d46b2a1e | |||
| 7559bbd46d | |||
| 275b5d14f2 | |||
| 9ae81b42a3 | |||
| d6c74ff131 | |||
| e8d74108a5 | |||
| c8b1a564aa | |||
| 301f95837c | |||
| 835fd7abcd | |||
| bb8f97c9cd | |||
| 6d19294ddc | |||
| f61c276f74 | |||
| 409acf0d9f | |||
| 74c6b21f3b | |||
| beeacd3e3f | |||
| 95259af68f | |||
| 855455006b | |||
| b844ad6e06 | |||
| e0533f19e9 | |||
| 9a6d976252 | |||
| 3d76f10a91 | |||
| e9b8c30a38 | |||
| 601d74160b | |||
| fc4e644e5f | |||
| 03f00c9e6f | |||
| 87e46b4425 | |||
| d5a322a352 | |||
| 7d4f1c0645 | |||
| 927873bfa6 | |||
| 5fe0791684 | |||
| 3e134ac0ad | |||
| 7a6bf4326e | |||
| 41a0601735 | |||
| 60486ecde5 | |||
| 255f4ccffc | |||
| afe82feb57 | |||
| 044afa83d1 | |||
| 4b00be4173 | |||
| 215e9361ea | |||
| aaec630759 | |||
| 3d735dca87 | |||
| dcedfc5ec8 | |||
| 1254ecf445 | |||
| 0d68a6cd1b | |||
| e267a026f3 | |||
| 44d4686b20 | |||
| 95614175e6 | |||
| c817ff184b | |||
| f284578cea | |||
| e69e6b2274 | |||
| 8cdb805c0b | |||
| 885418f3b0 | |||
| b44321f9c3 | |||
| f54a8d7748 | |||
| 311a475b6f | |||
| 655b01a0a4 | |||
| d4ee082735 | |||
| 1f5a7c4b12 | |||
| dab58b9311 | |||
| e56a60b316 | |||
| f189452446 | |||
| f576c555e4 | |||
| d8eea624e2 | |||
| e64c7dfdf6 | |||
| c76e7b1e28 | |||
| 0d5486aa57 | |||
| 3a0e9f9263 | |||
| 1f0a153d0e | |||
| 8bdf1d98a3 | |||
| 8037dc7b76 | |||
| 56f473b680 | |||
| b502dc7399 | |||
| cfe23badb0 | |||
| 593ffc4067 | |||
| a88a1848ff | |||
| 5ae33184d5 | |||
| 78601ee1bd | |||
| 84afb4259c | |||
| 1b817a5b4c | |||
| 1b589609a4 | |||
| 289f4f1916 | |||
| cf37e2ef1a | |||
| 41e2dadea7 | |||
| f3318b2e49 | |||
| 3f3469130b | |||
| fc38afcec4 | |||
| efae7afd62 | |||
| 285bc58364 | |||
| 6657ca7cde | |||
| 87455d79e4 | |||
| 821fdf02b4 | |||
| 54980337e4 | |||
| 92ab7ef659 | |||
| 934dbc2e2b | |||
| 95da6de9e1 | |||
| ccdeeda9cc | |||
| 74b28ef1b0 | |||
| 7543047de3 | |||
| e66addc82d | |||
| 7b6a5ffaff | |||
| 19545282aa | |||
| 6a0583f5ad | |||
| ed7e46b6ca | |||
| 9654e64a0a | |||
| 8b650fc9ef | |||
| 69fb323581 | |||
| 9d093547e8 | |||
| c5f13629af | |||
| c4b6df350a | |||
| 976d112280 | |||
| 8fba5c4179 | |||
| d19f059f34 | |||
| deca6c1b72 | |||
| 3ee9ca749d | |||
| 7058ac0041 | |||
| a7efd3cac5 | |||
| 59a5813f1b | |||
| 08c1a5e1e8 | |||
| ea84cc2e33 | |||
| b5f643681f | |||
| 5497ea34b9 | |||
| e079656473 | |||
| d00297a763 | |||
| a19210daf1 | |||
| b2abc36baa | |||
| fadbe23bfe | |||
| ea8a59d0b0 | |||
| 381219aa41 | |||
| 0f08b0f053 | |||
| 0dafce31c4 | |||
| c93e0355c3 | |||
| 1e0fc76efa | |||
| d94386e00a | |||
| 0a62dd7a7e | |||
| 06a21d2031 | |||
| 9a3febb7c5 | |||
| 27cd765d6f | |||
| a0c0a957b4 | |||
| b89f7c69ad | |||
| fcdc6ad085 | |||
| 834c4d81f3 | |||
| a3e0ac9c0b | |||
| 80af3cc2d4 | |||
| 966bcda6b9 | |||
| 112ef42a19 | |||
| 91f1814a87 | |||
| 4e8e4fe53f | |||
| cdae8d28fe | |||
| 964a6f4ec4 | |||
| 9fcad0500d | |||
| ec560cc99d | |||
| 7ae8828e61 | |||
| 43e367f2ea | |||
| e678819f70 | |||
| bc701d7b4c | |||
| 9f57534843 | |||
| 52b3492b18 | |||
| 2229431803 | |||
| 57208d8e53 | |||
| 535b15ace9 | |||
| 2249d5d413 | |||
| 6fb1a181aa | |||
| 90ffcb4ddb | |||
| 7f48acb3fd | |||
| d61bbe6750 | |||
| ee37ee3d28 | |||
| 8b35776916 | |||
| b6f3f15f0b | |||
| fa8e2c1678 | |||
| 7669fc8f52 | |||
| 98cf1c2a9d | |||
| 5337cad7e4 | |||
| 0891a393d7 | |||
| 5c59651bda | |||
| f6c3d7ccf6 | |||
| 3df1663e4f | |||
| 32cf566a08 | |||
| 769c67a470 | |||
| 49494d4e3c | |||
| 3839d8abc7 | |||
| d8b150a34c | |||
| 4454b33e51 | |||
| ce6b4c0e05 | |||
| ddf01e0450 | |||
| 86e48179a1 | |||
| b2c33b4df7 | |||
| 9348616659 | |||
| a0e9b62de5 | |||
| 7874aaaf60 | |||
| 08ead81dde | |||
| e5af18d5ea | |||
| 609236f5c1 | |||
| 6a3f9bc32a | |||
| 934d6d9ad1 | |||
| 875096384b | |||
| a10c2f2eff | |||
| 646ac1f2b4 | |||
| 8872aed512 | |||
| 55692e4da6 | |||
| 6314d3c727 | |||
| 06b9256972 | |||
| cc219ff648 | |||
| ee33bf71eb | |||
| ee7fd71fdc | |||
| d56f52eef8 | |||
| 9f3141804f | |||
| 60a3e1a8dc | |||
| 9541d7e7bc | |||
| 811c49d7a2 | |||
| 482c1b59c8 | |||
| 691ea287c2 | |||
| b87d14492f | |||
| cc5960b88e | |||
| ee50f78d99 | |||
| 193b08a3ed | |||
| 3a3e23d8d9 | |||
| 30f111edb3 | |||
| d47ee88454 | |||
| 13ff463845 | |||
| bf9ebda3c8 | |||
| 85dd9fde43 | |||
| c7c8b3812f | |||
| 0ac6dc8f8c | |||
| 58a2200b80 | |||
| e10b0e6b60 | |||
| d9c882399d | |||
| 8930bfcff8 | |||
| 9b9afa9d6e | |||
| 362db857d0 | |||
| 541272eb99 | |||
| 5b44b99cfd | |||
| 6be7901df2 | |||
| 4d42bcd517 | |||
| 9b4c2868bd | |||
| d02a2b131a | |||
| 81c7b6afc5 | |||
| cad341e794 | |||
| e559cebcdc | |||
| 8b4407a68c | |||
| 289034f36e | |||
| 17a7ea42eb | |||
| 2044bb0039 | |||
| c4f2464935 | |||
| bcb6f7168f | |||
| 361cff34fc | |||
| 0cd5b64c3b | |||
| 16fbe9920d | |||
| e4280be5e5 | |||
| d42362deb6 | |||
| 883fafde72 | |||
| 568322aeaf | |||
| 31decadd8e | |||
| dec9b3e540 | |||
| d0f94a42ff | |||
| ed0d47fc8a | |||
| aa9a16e073 | |||
| eef84a86bf | |||
| ed72d1100b | |||
| f4e9dae33a | |||
| 50f7b7e0a3 | |||
| 01c2712941 | |||
| 4413683898 | |||
| 3824c1fec0 | |||
| 4b3eeaa6ef | |||
| 70cd5c1599 | |||
| f9643adc43 | |||
| 7b9e0723d6 | |||
| a1d01a1b2f | |||
| dc05f43eee | |||
| 77bdeb32bd | |||
| af18217d78 | |||
| 4ed5ca2666 | |||
| 1e90a1bf36 | |||
| ac033b62cf | |||
| cb3b9d7ada | |||
| ca9e97d2f2 | |||
| 6d451dbe06 | |||
| e0659a4f0e | |||
| a854bc22d1 | |||
| 48e060aa53 | |||
| 47abfc32d4 | |||
| a1ba228bc2 | |||
| 996c94a8e7 | |||
| 220aaddc62 | |||
| 6878d23a57 | |||
| df9d054551 | |||
| 30c1f7ee29 | |||
| e4c4fdabbd | |||
| 30f6421760 | |||
| ab4384e011 | |||
| 201bbef7c0 | |||
| 95d21e5d9f | |||
| c5368c7745 | |||
| 0657a09e2c | |||
| 4caf932808 | |||
| 400fc3f5e9 | |||
| e44e3a67b0 | |||
| 9d395ab74e | |||
| 83c6b1f308 | |||
| 7ab9715b0e | |||
| 632b23486f | |||
| ccf189cb7f | |||
| 1fe9a2e6fd | |||
| 9fc092a911 | |||
| fa54cd5f5c | |||
| 667d0e5537 | |||
| 91332fa0f8 | |||
| 0c95a3382b | |||
| 7274420ecd | |||
| a2a5631da4 | |||
| 567a7563e7 | |||
| 62a9afd382 | |||
| aa68d3b8db | |||
| 784ae896d1 | |||
| f4c52371ab | |||
| 00b6000b76 | |||
| db23d62827 | |||
| 70ea6661ed | |||
| a01fceb328 | |||
| e9e98ea093 | |||
| 528646a958 | |||
| 8536335e63 | |||
| 88072b1e90 | |||
| 34d1daac67 | |||
| 3faae0b2c2 | |||
| 5e5a35191e | |||
| 7c486ee3f9 | |||
| 20d686737a | |||
| 85047e7e36 | |||
| ac64e35a45 | |||
| 004487cca0 | |||
| 74d1eeb4d3 | |||
| 464a4d6ead | |||
| 3d3913419b | |||
| 63f7d3bae2 | |||
| 8b6e272197 | |||
| 5205bdab24 | |||
| 37d4708880 | |||
| d88f0d43ea | |||
| a2153d61ce | |||
| f16ef57979 | |||
| ff2bbb487f | |||
| 416efbe7e8 | |||
| 9c6cc20356 | |||
| 7c0d28b62d | |||
| 48ab6d7a45 | |||
| 96b5d2b3a9 | |||
| f45c29360c | |||
| cdcbe6c2b3 | |||
| 5038552ed9 | |||
| 1b3e39dd12 | |||
| fbcc0bb408 | |||
| d3bb5e9f3d | |||
| 4097912d59 | |||
| f3aaa0d453 | |||
| 0dff64f6ad | |||
| 601a128cd3 | |||
| af74bf01c0 | |||
| a418a343d1 | |||
| ab6e6019a7 | |||
| 13053172cb | |||
| 38ebf6b2c0 | |||
| a7bf4ca8fc | |||
| 7e89be5ed1 | |||
| b7b30c4b57 | |||
| 55953819c1 | |||
| 677f02c2a7 | |||
| 185c6a0c71 | |||
| 339639a9db | |||
| 18ae8a4091 | |||
| cbca7dfce6 | |||
| a9344e6838 | |||
| aa733b1ea4 | |||
| 8305632852 | |||
| 57f23e0808 | |||
| 16b6a78c1e | |||
| dd1146ec64 | |||
| 07c453500b | |||
| 3e4fc12d30 | |||
| 285fd6ae14 | |||
| 8d9238db14 | |||
| c06e765a5b | |||
| c7ea7e9974 | |||
| 37d71dfa90 | |||
| 44ad9a6cd7 | |||
| 7eafccf78a | |||
| b42d24575c | |||
| 3963aaa23e | |||
| 33e5e5db5b | |||
| 039cde7893 | |||
| fa9d76224b | |||
| 35a451c024 | |||
| 1d0a5606b2 | |||
| 4ad031e97d | |||
| 0081d0f05f | |||
| 800c25a6b4 | |||
| 9aeb07d830 | |||
| 5590a823c6 | |||
| 3fa570f49b | |||
| 60053e7b02 | |||
| fa1b873280 | |||
| 578f70817e | |||
| 6c6b658ffe | |||
| 9a5ff320f3 | |||
| 48688afa5e | |||
| a2b35098c6 | |||
| 4d5354387b | |||
| c6512e689b | |||
| b7aff4f560 | |||
| 18dfa2900c | |||
| 86b546f657 | |||
| 3fb2bc7613 | |||
| f4cb939317 | |||
| d868c283c4 | |||
| c7dfb0193b | |||
| f7705d6bc9 | |||
| 3ed096fd3f | |||
| 2d1fbefdb5 | |||
| c5a3146a8c | |||
| 1c364e0e5c | |||
| 9906526a91 | |||
| 7e0148c058 | |||
| f86826b7a0 | |||
| 497bc1438a | |||
| d133cc043b | |||
| e56bd770ea | |||
| 07bb2a6fd6 | |||
| 396feadd4b | |||
| f93f485696 | |||
| a813736194 | |||
| 322bafdf2a | |||
| 8257eeb3f2 | |||
| 00810525d6 | |||
| 391b950be6 | |||
| d78f215caa | |||
| 9457d20ef1 | |||
| 648f8e81d1 | |||
| 161c7a231b | |||
| e997b42504 | |||
| 524699da7d | |||
| 765a114be7 | |||
| c86afff447 | |||
| b73fe0cc3c | |||
| 2a614e0e23 | |||
| 50b425cf89 | |||
| 2174c350be | |||
| 7f81fc8f9b | |||
| f090075cb2 | |||
| ec6d942d83 | |||
| 8714754afc | |||
| 43b959fe58 | |||
| 320e8f6553 | |||
| 89d5b2414e | |||
| 91ea559f9e | |||
| 445dce4363 | |||
| 1fce6caf80 | |||
| adb0a93d95 | |||
| 226bdd6e99 | |||
| 5aa9d7787e | |||
| b2524eec49 | |||
| 6a4858a7ee | |||
| 1a623df849 | |||
| bfc07fe4f9 | |||
| 3e702aa4ac | |||
| 2ced25c676 | |||
| 1935c3be1a | |||
| 609cfa7b5f | |||
| ac26d09a59 | |||
| 4bdf3fd48e | |||
| c1d0473f49 | |||
| e5f7733b31 | |||
| 5aec1e3e17 | |||
| 1d6bcf5aa2 | |||
| 1e6d44d6ef | |||
| cec208051f | |||
| 526fcbbfde | |||
| c760f058df | |||
| 8fdfa0f669 | |||
| ceecac69e9 | |||
| e0c0bdeb0a | |||
| cf3106040a | |||
| 791afbba15 | |||
| 8358245f64 | |||
| 396bb4b688 | |||
| 167b4af52b | |||
| bedb05012d | |||
| 6a60e26020 | |||
| 6496055e23 | |||
| dab92ac1e8 | |||
| b9fa00f341 | |||
| e5d3ab0332 | |||
| 4991107822 | |||
| 51ecda0ff5 | |||
| 6850fd69c6 | |||
| e1e5711680 | |||
| 4463128436 | |||
| c8783672d7 | |||
| ce495e4e3e | |||
| fcabdf7745 | |||
| b540d41cdc | |||
| 260d694bbc | |||
| 6329427ad5 | |||
| df223eddf3 | |||
| 85b359556e | |||
| b164116277 | |||
| 8e5efcc47f | |||
| 6eed115723 | |||
| 7d80fc474c | |||
| a20b82092f | |||
| 2a86472b88 | |||
| 190eea7097 | |||
| 2d1c83da59 | |||
| 3f065c75da | |||
| 1bae479b37 | |||
| 5e7c1fb23a | |||
| bae30e5cc4 | |||
| 18f80743eb | |||
| bfaef2cca6 | |||
| cbd7cd7c4d | |||
| a2f9c03a95 | |||
| 2c56d274d8 | |||
| 7742f67481 | |||
| 6af9d4e5f9 | |||
| 51efecf4b5 | |||
| 9dfcae2b5d | |||
| 66172cef3e | |||
| 29f022c91c | |||
| 485bfd6c08 | |||
| f7a73c5149 | |||
| 5d966b1120 | |||
| ce79144e75 | |||
| d8566f0ddf | |||
| e904c134e7 | |||
| 7fc3bb3241 | |||
| 20e63f8ec4 | |||
| 2df15742fc | |||
| 8f815a6c1e | |||
| 8f4bd10b19 | |||
| 511d272d0d | |||
| 7f44cf543a | |||
| 16472eb3ea | |||
| d92acdcf1d | |||
| 2e33ed3ba0 | |||
| 04ff9cda7c | |||
| 5cc9981a4d | |||
| 5845b2b137 | |||
| b3b54680e7 | |||
| a3ab5ba9ac | |||
| c552a02e7f | |||
| a005be7c74 | |||
| 6f7fcdc897 | |||
| 34761fa4ca | |||
| abe9995a7c | |||
| 7f2ee3bbe9 | |||
| a1ffc7fa2c | |||
| 70c6b5a7f9 | |||
| 1b80a693ba | |||
| e46a4d1875 | |||
| 5f4d2dc4fe | |||
| 62202b7eff | |||
| 1518824b0c | |||
| 0a7654c747 | |||
| d6db805885 | |||
| 570ad420a8 | |||
| ae5a877ed4 | |||
| 9945988e44 | |||
| 79b8210498 | |||
| c80d311474 | |||
| 64429578da |
12
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
12
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -15,16 +15,16 @@ body:
|
|||||||
value: "Please provide the following information to help us understand the issue."
|
value: "Please provide the following information to help us understand the issue."
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: Branch name
|
label: RAGFlow workspace code commit ID
|
||||||
description: Enter the name of the branch where you encountered the issue.
|
description: Enter the commit ID associated with the issue.
|
||||||
placeholder: e.g., main
|
placeholder: e.g., 26d3480e
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: Commit ID
|
label: RAGFlow image version
|
||||||
description: Enter the commit ID associated with the issue.
|
description: Enter the image version(shown in RAGFlow UI, `System` page) associated with the issue.
|
||||||
placeholder: e.g., c3b2a1
|
placeholder: e.g., 26d3480e(v0.13.0~174)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
|||||||
124
.github/workflows/release.yml
vendored
Normal file
124
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
name: release
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 13 * * *' # This schedule runs every 13:00:00Z(21:00:00+08:00)
|
||||||
|
# The "create tags" trigger is specifically focused on the creation of new tags, while the "push tags" trigger is activated when tags are pushed, including both new tag creations and updates to existing tags.
|
||||||
|
create:
|
||||||
|
tags:
|
||||||
|
- "v*.*.*" # normal release
|
||||||
|
- "nightly" # the only one mutable tag
|
||||||
|
|
||||||
|
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
runs-on: [ "self-hosted", "overseas" ]
|
||||||
|
steps:
|
||||||
|
- name: Ensure workspace ownership
|
||||||
|
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||||
|
|
||||||
|
# https://github.com/actions/checkout/blob/v3/README.md
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Prepare release body
|
||||||
|
run: |
|
||||||
|
if [[ $GITHUB_EVENT_NAME == 'create' ]]; then
|
||||||
|
RELEASE_TAG=${GITHUB_REF#refs/tags/}
|
||||||
|
if [[ $RELEASE_TAG == 'nightly' ]]; then
|
||||||
|
PRERELEASE=true
|
||||||
|
else
|
||||||
|
PRERELEASE=false
|
||||||
|
fi
|
||||||
|
echo "Workflow triggered by create tag: $RELEASE_TAG"
|
||||||
|
else
|
||||||
|
RELEASE_TAG=nightly
|
||||||
|
PRERELEASE=true
|
||||||
|
echo "Workflow triggered by schedule"
|
||||||
|
fi
|
||||||
|
echo "RELEASE_TAG=$RELEASE_TAG" >> $GITHUB_ENV
|
||||||
|
echo "PRERELEASE=$PRERELEASE" >> $GITHUB_ENV
|
||||||
|
RELEASE_DATETIME=$(date --rfc-3339=seconds)
|
||||||
|
echo Release $RELEASE_TAG created from $GITHUB_SHA at $RELEASE_DATETIME > release_body.md
|
||||||
|
|
||||||
|
- name: Move the existing mutable tag
|
||||||
|
# https://github.com/softprops/action-gh-release/issues/171
|
||||||
|
run: |
|
||||||
|
git fetch --tags
|
||||||
|
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||||
|
# Determine if a given tag exists and matches a specific Git commit.
|
||||||
|
# actions/checkout@v4 fetch-tags doesn't work when triggered by schedule
|
||||||
|
if [ "$(git rev-parse -q --verify "refs/tags/$RELEASE_TAG")" = "$GITHUB_SHA" ]; then
|
||||||
|
echo "mutable tag $RELEASE_TAG exists and matches $GITHUB_SHA"
|
||||||
|
else
|
||||||
|
git tag -f $RELEASE_TAG $GITHUB_SHA
|
||||||
|
git push -f origin $RELEASE_TAG:refs/tags/$RELEASE_TAG
|
||||||
|
echo "created/moved mutable tag $RELEASE_TAG to $GITHUB_SHA"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Create or overwrite a release
|
||||||
|
# https://github.com/actions/upload-release-asset has been replaced by https://github.com/softprops/action-gh-release
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||||
|
prerelease: ${{ env.PRERELEASE }}
|
||||||
|
tag_name: ${{ env.RELEASE_TAG }}
|
||||||
|
# The body field does not support environment variable substitution directly.
|
||||||
|
body_path: release_body.md
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
# https://github.com/marketplace/actions/docker-login
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: infiniflow
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||||
|
- name: Build and push full image
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}
|
||||||
|
file: Dockerfile
|
||||||
|
platforms: linux/amd64
|
||||||
|
|
||||||
|
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||||
|
- name: Build and push slim image
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim
|
||||||
|
file: Dockerfile
|
||||||
|
build-args: LIGHTEN=1
|
||||||
|
platforms: linux/amd64
|
||||||
|
|
||||||
|
- name: Build ragflow-sdk
|
||||||
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
run: |
|
||||||
|
cd sdk/python && \
|
||||||
|
poetry build
|
||||||
|
|
||||||
|
- name: Publish package distributions to PyPI
|
||||||
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
packages-dir: dist/
|
||||||
|
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
verbose: true
|
||||||
137
.github/workflows/tests.yml
vendored
Normal file
137
.github/workflows/tests.yml
vendored
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
name: tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
|
- '*.*.*'
|
||||||
|
paths-ignore:
|
||||||
|
- 'docs/**'
|
||||||
|
- '*.md'
|
||||||
|
- '*.mdx'
|
||||||
|
pull_request:
|
||||||
|
types: [ opened, synchronize, reopened, labeled ]
|
||||||
|
paths-ignore:
|
||||||
|
- 'docs/**'
|
||||||
|
- '*.md'
|
||||||
|
- '*.mdx'
|
||||||
|
|
||||||
|
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
ragflow_tests:
|
||||||
|
name: ragflow_tests
|
||||||
|
# https://docs.github.com/en/actions/using-jobs/using-conditions-to-control-job-execution
|
||||||
|
# https://github.com/orgs/community/discussions/26261
|
||||||
|
if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'ci') }}
|
||||||
|
runs-on: [ "self-hosted", "debug" ]
|
||||||
|
steps:
|
||||||
|
# https://github.com/hmarr/debug-action
|
||||||
|
#- uses: hmarr/debug-action@v2
|
||||||
|
|
||||||
|
- name: Show PR labels
|
||||||
|
run: |
|
||||||
|
echo "Workflow triggered by ${{ github.event_name }}"
|
||||||
|
if [[ ${{ github.event_name }} == 'pull_request' ]]; then
|
||||||
|
echo "PR labels: ${{ join(github.event.pull_request.labels.*.name, ', ') }}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Ensure workspace ownership
|
||||||
|
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||||
|
|
||||||
|
# https://github.com/actions/checkout/issues/1781
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
# https://github.com/astral-sh/ruff-action
|
||||||
|
- name: Static check with Ruff
|
||||||
|
uses: astral-sh/ruff-action@v2
|
||||||
|
with:
|
||||||
|
version: ">=0.8.2"
|
||||||
|
args: "check --ignore E402"
|
||||||
|
|
||||||
|
- name: Build ragflow:nightly-slim
|
||||||
|
run: |
|
||||||
|
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-$HOME}
|
||||||
|
sudo docker pull ubuntu:22.04
|
||||||
|
sudo docker build --progress=plain --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
|
|
||||||
|
- name: Build ragflow:nightly
|
||||||
|
run: |
|
||||||
|
sudo docker build --progress=plain --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
|
|
||||||
|
- name: Start ragflow:nightly-slim
|
||||||
|
run: |
|
||||||
|
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||||
|
sudo docker compose -f docker/docker-compose.yml up -d
|
||||||
|
|
||||||
|
- name: Stop ragflow:nightly-slim
|
||||||
|
if: always() # always run this step even if previous steps failed
|
||||||
|
run: |
|
||||||
|
sudo docker compose -f docker/docker-compose.yml down -v
|
||||||
|
|
||||||
|
- name: Start ragflow:nightly
|
||||||
|
run: |
|
||||||
|
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||||
|
sudo docker compose -f docker/docker-compose.yml up -d
|
||||||
|
|
||||||
|
- name: Run sdk tests against Elasticsearch
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||||
|
|
||||||
|
- name: Run frontend api tests against Elasticsearch
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||||
|
|
||||||
|
|
||||||
|
- name: Stop ragflow:nightly
|
||||||
|
if: always() # always run this step even if previous steps failed
|
||||||
|
run: |
|
||||||
|
sudo docker compose -f docker/docker-compose.yml down -v
|
||||||
|
|
||||||
|
- name: Start ragflow:nightly
|
||||||
|
run: |
|
||||||
|
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml up -d
|
||||||
|
|
||||||
|
- name: Run sdk tests against Infinity
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||||
|
|
||||||
|
- name: Run frontend api tests against Infinity
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||||
|
|
||||||
|
- name: Stop ragflow:nightly
|
||||||
|
if: always() # always run this step even if previous steps failed
|
||||||
|
run: |
|
||||||
|
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml down -v
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -35,4 +35,6 @@ rag/res/deepdoc
|
|||||||
sdk/python/ragflow.egg-info/
|
sdk/python/ragflow.egg-info/
|
||||||
sdk/python/build/
|
sdk/python/build/
|
||||||
sdk/python/dist/
|
sdk/python/dist/
|
||||||
sdk/python/ragflow_sdk.egg-info/
|
sdk/python/ragflow_sdk.egg-info/
|
||||||
|
huggingface.co/
|
||||||
|
nltk_data/
|
||||||
|
|||||||
199
Dockerfile
199
Dockerfile
@ -1,69 +1,177 @@
|
|||||||
# base stage
|
# base stage
|
||||||
FROM ubuntu:24.04 AS base
|
FROM ubuntu:22.04 AS base
|
||||||
USER root
|
USER root
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
|
||||||
ENV LIGHTEN=0
|
ARG NEED_MIRROR=0
|
||||||
|
ARG LIGHTEN=0
|
||||||
|
ENV LIGHTEN=${LIGHTEN}
|
||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
# Copy models downloaded via download_deps.py
|
||||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||||
|
cp /huggingface.co/InfiniFlow/huqie/huqie.txt.trie /ragflow/rag/res/ && \
|
||||||
|
tar --exclude='.*' -cf - \
|
||||||
|
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||||
|
/huggingface.co/InfiniFlow/deepdoc \
|
||||||
|
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||||
|
if [ "$LIGHTEN" != "1" ]; then \
|
||||||
|
(tar -cf - \
|
||||||
|
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||||
|
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
||||||
|
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||||
|
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
||||||
|
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
||||||
|
fi
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
# https://github.com/chrismattmann/tika-python
|
||||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||||
|
cp -r /deps/nltk_data /root/ && \
|
||||||
|
cp /deps/tika-server-standard-3.0.0.jar /deps/tika-server-standard-3.0.0.jar.md5 /ragflow/ && \
|
||||||
|
cp /deps/cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||||
|
|
||||||
# if you located in China, you can use tsinghua mirror to speed up apt
|
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.0.0.jar"
|
||||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list.d/ubuntu.sources
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
# Setup apt
|
||||||
apt update && apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus \
|
# Python package and implicit dependencies:
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
# opencv-python: libglib2.0-0 libglx-mesa0 libgl1
|
||||||
&& curl -sSL https://install.python-poetry.org | python3 -
|
# aspose-slides: pkg-config libicu-dev libgdiplus libssl1.1_1.1.1f-1ubuntu2_amd64.deb
|
||||||
|
# python-pptx: default-jdk tika-server-standard-3.0.0.jar
|
||||||
|
# selenium: libatk-bridge2.0-0 chrome-linux64-121-0-6167-85
|
||||||
|
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
||||||
|
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||||
|
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
|
sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||||
|
fi; \
|
||||||
|
rm -f /etc/apt/apt.conf.d/docker-clean && \
|
||||||
|
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache && \
|
||||||
|
chmod 1777 /tmp && \
|
||||||
|
apt update && \
|
||||||
|
apt --no-install-recommends install -y ca-certificates && \
|
||||||
|
apt update && \
|
||||||
|
apt install -y libglib2.0-0 libglx-mesa0 libgl1 && \
|
||||||
|
apt install -y pkg-config libicu-dev libgdiplus && \
|
||||||
|
apt install -y default-jdk && \
|
||||||
|
apt install -y libatk-bridge2.0-0 && \
|
||||||
|
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||||
|
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||||
|
|
||||||
RUN curl -o libssl1.deb http://archive.ubuntu.com/ubuntu/pool/main/o/openssl1.0/libssl1.0.0_1.0.2n-1ubuntu5_amd64.deb && dpkg -i libssl1.deb && rm -f libssl1.deb
|
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
|
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
|
||||||
|
pip3 config set global.trusted-host pypi.tuna.tsinghua.edu.cn; \
|
||||||
|
fi; \
|
||||||
|
pipx install poetry; \
|
||||||
|
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
|
pipx inject poetry poetry-plugin-pypi-mirror; \
|
||||||
|
fi
|
||||||
|
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||||
|
ENV PATH=/root/.local/bin:$PATH
|
||||||
# Configure Poetry
|
# Configure Poetry
|
||||||
ENV POETRY_NO_INTERACTION=1
|
ENV POETRY_NO_INTERACTION=1
|
||||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
ENV POETRY_REQUESTS_TIMEOUT=15
|
||||||
|
|
||||||
|
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||||
|
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||||
|
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||||
|
apt purge -y nodejs npm && \
|
||||||
|
apt autoremove && \
|
||||||
|
apt update && \
|
||||||
|
apt install -y nodejs cargo
|
||||||
|
|
||||||
|
|
||||||
|
# Add msssql ODBC driver
|
||||||
|
# macOS ARM64 environment, install msodbcsql18.
|
||||||
|
# general x86_64 environment, install msodbcsql17.
|
||||||
|
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||||
|
curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \
|
||||||
|
curl https://packages.microsoft.com/config/ubuntu/22.04/prod.list > /etc/apt/sources.list.d/mssql-release.list && \
|
||||||
|
apt update && \
|
||||||
|
if [ -n "$ARCH" ] && [ "$ARCH" = "arm64" ]; then \
|
||||||
|
# MacOS ARM64
|
||||||
|
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql18; \
|
||||||
|
else \
|
||||||
|
# (x86_64)
|
||||||
|
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql17; \
|
||||||
|
fi || \
|
||||||
|
{ echo "Failed to install ODBC driver"; exit 1; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Add dependencies of selenium
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||||
|
unzip /chrome-linux64.zip && \
|
||||||
|
mv chrome-linux64 /opt/chrome && \
|
||||||
|
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||||
|
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||||
|
mv chromedriver /usr/local/bin/ && \
|
||||||
|
rm -f /usr/bin/google-chrome
|
||||||
|
|
||||||
|
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||||
|
# aspose-slides on linux/arm64 is unavailable
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||||
|
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||||
|
dpkg -i /deps/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||||
|
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||||
|
dpkg -i /deps/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
# builder stage
|
# builder stage
|
||||||
FROM base AS builder
|
FROM base AS builder
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt install -y nodejs npm cargo && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY web web
|
|
||||||
RUN cd web && npm i --force && npm run build
|
|
||||||
|
|
||||||
# install dependencies from poetry.lock file
|
# install dependencies from poetry.lock file
|
||||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pypoetry,sharing=locked \
|
RUN --mount=type=cache,id=ragflow_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||||
if [ "$LIGHTEN" -eq 0 ]; then \
|
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
/root/.local/bin/poetry install --sync --no-cache --no-root --with=full; \
|
export POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/; \
|
||||||
|
fi; \
|
||||||
|
if [ "$LIGHTEN" == "1" ]; then \
|
||||||
|
poetry install --no-root; \
|
||||||
else \
|
else \
|
||||||
/root/.local/bin/poetry install --sync --no-cache --no-root; \
|
poetry install --no-root --with=full; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
COPY web web
|
||||||
|
COPY docs docs
|
||||||
|
RUN --mount=type=cache,id=ragflow_npm,target=/root/.npm,sharing=locked \
|
||||||
|
cd web && npm install --force && npm run build
|
||||||
|
|
||||||
|
COPY .git /ragflow/.git
|
||||||
|
|
||||||
|
RUN version_info=$(git describe --tags --match=v* --first-parent --always); \
|
||||||
|
if [ "$LIGHTEN" == "1" ]; then \
|
||||||
|
version_info="$version_info slim"; \
|
||||||
|
else \
|
||||||
|
version_info="$version_info full"; \
|
||||||
|
fi; \
|
||||||
|
echo "RAGFlow version: $version_info"; \
|
||||||
|
echo $version_info > /ragflow/VERSION
|
||||||
|
|
||||||
# production stage
|
# production stage
|
||||||
FROM base AS production
|
FROM base AS production
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
# Install python packages' dependencies
|
# Copy Python environment and packages
|
||||||
# cv2 requires libGL.so.1
|
ENV VIRTUAL_ENV=/ragflow/.venv
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
apt update && apt install -y --no-install-recommends nginx libgl1 vim less && \
|
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
ENV PYTHONPATH=/ragflow/
|
||||||
|
|
||||||
COPY web web
|
COPY web web
|
||||||
COPY api api
|
COPY api api
|
||||||
@ -74,35 +182,12 @@ COPY agent agent
|
|||||||
COPY graphrag graphrag
|
COPY graphrag graphrag
|
||||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||||
|
|
||||||
# Copy models downloaded via download_deps.py
|
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
RUN chmod +x ./entrypoint.sh
|
||||||
tar --exclude='.*' -cf - \
|
|
||||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
|
||||||
/huggingface.co/InfiniFlow/deepdoc \
|
|
||||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
|
||||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
|
||||||
tar -cf - \
|
|
||||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
|
||||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
|
||||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
|
||||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
|
||||||
| tar -xf - --strip-components=2 -C /root/.ragflow
|
|
||||||
|
|
||||||
# Copy compiled web pages
|
# Copy compiled web pages
|
||||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||||
|
|
||||||
# Copy Python environment and packages
|
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
|
||||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
|
||||||
ENV PATH="${VIRTUAL_ENV}/bin:/root/.local/bin:${PATH}"
|
|
||||||
|
|
||||||
# Download nltk data
|
|
||||||
RUN python3 -m nltk.downloader wordnet punkt punkt_tab
|
|
||||||
|
|
||||||
ENV PYTHONPATH=/ragflow/
|
|
||||||
|
|
||||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
|
||||||
RUN chmod +x ./entrypoint.sh
|
|
||||||
|
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
|
|||||||
10
Dockerfile.deps
Normal file
10
Dockerfile.deps
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# This builds an image that contains the resources needed by Dockerfile
|
||||||
|
#
|
||||||
|
FROM scratch
|
||||||
|
|
||||||
|
# Copy resources downloaded via download_deps.py
|
||||||
|
COPY chromedriver-linux64-121-0-6167-85 chrome-linux64-121-0-6167-85 cl100k_base.tiktoken libssl1.1_1.1.1f-1ubuntu2_amd64.deb libssl1.1_1.1.1f-1ubuntu2_arm64.deb tika-server-standard-3.0.0.jar tika-server-standard-3.0.0.jar.md5 libssl*.deb /
|
||||||
|
|
||||||
|
COPY nltk_data /nltk_data
|
||||||
|
|
||||||
|
COPY huggingface.co /huggingface.co
|
||||||
@ -26,6 +26,7 @@ RUN dnf install -y nginx
|
|||||||
|
|
||||||
ADD ./web ./web
|
ADD ./web ./web
|
||||||
ADD ./api ./api
|
ADD ./api ./api
|
||||||
|
ADD ./docs ./docs
|
||||||
ADD ./conf ./conf
|
ADD ./conf ./conf
|
||||||
ADD ./deepdoc ./deepdoc
|
ADD ./deepdoc ./deepdoc
|
||||||
ADD ./rag ./rag
|
ADD ./rag ./rag
|
||||||
@ -37,7 +38,7 @@ RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
|||||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||||
ENV LD_LIBRARY_PATH /usr/lib64/openmpi/lib:$LD_LIBRARY_PATH
|
ENV LD_LIBRARY_PATH /usr/lib64/openmpi/lib:$LD_LIBRARY_PATH
|
||||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||||
RUN cd ./web && npm i --force && npm run build
|
RUN cd ./web && npm i && npm run build
|
||||||
RUN conda run -n py11 pip install $(grep -ivE "mpi4py" ./requirements.txt) # without mpi4py==3.1.5
|
RUN conda run -n py11 pip install $(grep -ivE "mpi4py" ./requirements.txt) # without mpi4py==3.1.5
|
||||||
RUN conda run -n py11 pip install redis
|
RUN conda run -n py11 pip install redis
|
||||||
|
|
||||||
@ -52,6 +53,7 @@ RUN conda run -n py11 python -m nltk.downloader wordnet
|
|||||||
ENV PYTHONPATH=/ragflow/
|
ENV PYTHONPATH=/ragflow/
|
||||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
|
||||||
|
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||||
RUN chmod +x ./entrypoint.sh
|
RUN chmod +x ./entrypoint.sh
|
||||||
|
|
||||||
|
|||||||
101
Dockerfile.slim
101
Dockerfile.slim
@ -1,101 +0,0 @@
|
|||||||
# base stage
|
|
||||||
FROM ubuntu:24.04 AS base
|
|
||||||
USER root
|
|
||||||
|
|
||||||
ENV LIGHTEN=1
|
|
||||||
|
|
||||||
WORKDIR /ragflow
|
|
||||||
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
|
||||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
|
||||||
|
|
||||||
# if you located in China, you can use tsinghua mirror to speed up apt
|
|
||||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list.d/ubuntu.sources
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
|
||||||
&& curl -sSL https://install.python-poetry.org | python3 -
|
|
||||||
|
|
||||||
RUN curl -o libssl1.deb http://archive.ubuntu.com/ubuntu/pool/main/o/openssl1.0/libssl1.0.0_1.0.2n-1ubuntu5_amd64.deb && dpkg -i libssl1.deb && rm -f libssl1.deb
|
|
||||||
|
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
|
||||||
|
|
||||||
# Configure Poetry
|
|
||||||
ENV POETRY_NO_INTERACTION=1
|
|
||||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
|
||||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
|
||||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
|
||||||
|
|
||||||
# builder stage
|
|
||||||
FROM base AS builder
|
|
||||||
USER root
|
|
||||||
|
|
||||||
WORKDIR /ragflow
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt install -y nodejs npm cargo && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY web web
|
|
||||||
RUN cd web && npm i --force && npm run build
|
|
||||||
|
|
||||||
# install dependencies from poetry.lock file
|
|
||||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pypoetry,sharing=locked \
|
|
||||||
if [ "$LIGHTEN" -eq 0 ]; then \
|
|
||||||
/root/.local/bin/poetry install --sync --no-cache --no-root --with=full; \
|
|
||||||
else \
|
|
||||||
/root/.local/bin/poetry install --sync --no-cache --no-root; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# production stage
|
|
||||||
FROM base AS production
|
|
||||||
USER root
|
|
||||||
|
|
||||||
WORKDIR /ragflow
|
|
||||||
|
|
||||||
# Install python packages' dependencies
|
|
||||||
# cv2 requires libGL.so.1
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt install -y --no-install-recommends nginx libgl1 vim less && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY web web
|
|
||||||
COPY api api
|
|
||||||
COPY conf conf
|
|
||||||
COPY deepdoc deepdoc
|
|
||||||
COPY rag rag
|
|
||||||
COPY agent agent
|
|
||||||
COPY graphrag graphrag
|
|
||||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
|
||||||
|
|
||||||
# Copy models downloaded via download_deps.py
|
|
||||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
|
||||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
|
||||||
tar --exclude='.*' -cf - \
|
|
||||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
|
||||||
/huggingface.co/InfiniFlow/deepdoc \
|
|
||||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
|
||||||
|
|
||||||
# Copy compiled web pages
|
|
||||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
|
||||||
|
|
||||||
# Copy Python environment and packages
|
|
||||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
|
||||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
|
||||||
ENV PATH="${VIRTUAL_ENV}/bin:/root/.local/bin:${PATH}"
|
|
||||||
|
|
||||||
# Download nltk data
|
|
||||||
RUN python3 -m nltk.downloader wordnet punkt punkt_tab
|
|
||||||
|
|
||||||
ENV PYTHONPATH=/ragflow/
|
|
||||||
|
|
||||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
|
||||||
RUN chmod +x ./entrypoint.sh
|
|
||||||
|
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
|
||||||
185
README.md
185
README.md
@ -8,20 +8,26 @@
|
|||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a> |
|
<a href="./README_ja.md">日本語</a> |
|
||||||
<a href="./README_ko.md">한국어</a>
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
|
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||||
|
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="follow on X(Twitter)">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||||
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://demo.ragflow.io" target="_blank">
|
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.12.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.12.0"></a>
|
|
||||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
@ -34,7 +40,7 @@
|
|||||||
|
|
||||||
<details open>
|
<details open>
|
||||||
<summary></b>📕 Table of Contents</b></summary>
|
<summary></b>📕 Table of Contents</b></summary>
|
||||||
|
|
||||||
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
||||||
- 🎮 [Demo](#-demo)
|
- 🎮 [Demo](#-demo)
|
||||||
- 📌 [Latest Updates](#-latest-updates)
|
- 📌 [Latest Updates](#-latest-updates)
|
||||||
@ -42,8 +48,8 @@
|
|||||||
- 🔎 [System Architecture](#-system-architecture)
|
- 🔎 [System Architecture](#-system-architecture)
|
||||||
- 🎬 [Get Started](#-get-started)
|
- 🎬 [Get Started](#-get-started)
|
||||||
- 🔧 [Configurations](#-configurations)
|
- 🔧 [Configurations](#-configurations)
|
||||||
- 🪛 [Build the docker image without embedding models](#-build-the-docker-image-without-embedding-models)
|
- 🔧 [Build a docker image without embedding models](#-build-a-docker-image-without-embedding-models)
|
||||||
- 🪚 [Build the docker image including embedding models](#-build-the-docker-image-including-embedding-models)
|
- 🔧 [Build a docker image including embedding models](#-build-a-docker-image-including-embedding-models)
|
||||||
- 🔨 [Launch service from source for development](#-launch-service-from-source-for-development)
|
- 🔨 [Launch service from source for development](#-launch-service-from-source-for-development)
|
||||||
- 📚 [Documentation](#-documentation)
|
- 📚 [Documentation](#-documentation)
|
||||||
- 📜 [Roadmap](#-roadmap)
|
- 📜 [Roadmap](#-roadmap)
|
||||||
@ -54,35 +60,42 @@
|
|||||||
|
|
||||||
## 💡 What is RAGFlow?
|
## 💡 What is RAGFlow?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document
|
||||||
|
understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models)
|
||||||
|
to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted
|
||||||
|
data.
|
||||||
|
|
||||||
## 🎮 Demo
|
## 🎮 Demo
|
||||||
|
|
||||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🔥 Latest Updates
|
## 🔥 Latest Updates
|
||||||
|
|
||||||
- 2024-09-29 Optimizes multi-round conversations.
|
- 2024-12-18 Upgrades Document Layout Analysis model in Deepdoc.
|
||||||
- 2024-09-13 Adds search mode for knowledge base Q&A.
|
- 2024-12-04 Adds support for pagerank score in knowledge base.
|
||||||
- 2024-09-09 Adds a medical consultant agent template.
|
- 2024-11-22 Adds more variables to Agent.
|
||||||
|
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||||
- 2024-08-22 Support text to SQL statements through RAG.
|
- 2024-08-22 Support text to SQL statements through RAG.
|
||||||
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
||||||
- 2024-07-23 Supports audio file parsing.
|
|
||||||
- 2024-07-08 Supports workflow based on [Graph](./agent/README.md).
|
|
||||||
- 2024-06-27 Supports Markdown and Docx in the Q&A parsing method, extracting images from Docx files, extracting tables from Markdown files.
|
|
||||||
- 2024-05-23 Supports [RAPTOR](https://arxiv.org/html/2401.18059v1) for better text retrieval.
|
|
||||||
|
|
||||||
|
## 🎉 Stay Tuned
|
||||||
|
|
||||||
|
⭐️ Star our repository to stay up-to-date with exciting new features and improvements! Get instant notifications for new
|
||||||
|
releases! 🌟
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
## 🌟 Key Features
|
## 🌟 Key Features
|
||||||
|
|
||||||
### 🍭 **"Quality in, quality out"**
|
### 🍭 **"Quality in, quality out"**
|
||||||
|
|
||||||
- [Deep document understanding](./deepdoc/README.md)-based knowledge extraction from unstructured data with complicated formats.
|
- [Deep document understanding](./deepdoc/README.md)-based knowledge extraction from unstructured data with complicated
|
||||||
|
formats.
|
||||||
- Finds "needle in a data haystack" of literally unlimited tokens.
|
- Finds "needle in a data haystack" of literally unlimited tokens.
|
||||||
|
|
||||||
### 🍱 **Template-based chunking**
|
### 🍱 **Template-based chunking**
|
||||||
@ -120,7 +133,8 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
- RAM >= 16 GB
|
- RAM >= 16 GB
|
||||||
- Disk >= 50 GB
|
- Disk >= 50 GB
|
||||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
> If you have not installed Docker on your local machine (Windows, Mac, or Linux),
|
||||||
|
see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||||
|
|
||||||
### 🚀 Start up the server
|
### 🚀 Start up the server
|
||||||
|
|
||||||
@ -139,7 +153,8 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
> $ sudo sysctl -w vm.max_map_count=262144
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
> ```
|
> ```
|
||||||
>
|
>
|
||||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the
|
||||||
|
`vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> vm.max_map_count=262144
|
> vm.max_map_count=262144
|
||||||
@ -151,15 +166,21 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Build the pre-built Docker images and start up the server:
|
3. Start up the server using the pre-built Docker images:
|
||||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_IMAGE` in **docker/.env** to the intended version, for example `RAGFLOW_IMAGE=infiniflow/ragflow:v0.12.0`, before running the following commands.
|
|
||||||
|
> The command below downloads the `v0.15.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download an RAGFlow edition different from `v0.14.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1` for the full edition `v0.14.1`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow
|
||||||
$ docker compose up -d
|
$ docker compose -f docker/docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> The core image is about 9 GB in size and may take a while to load.
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||||
|
|
||||||
4. Check the server status after having the server up and running:
|
4. Check the server status after having the server up and running:
|
||||||
|
|
||||||
@ -170,23 +191,26 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
_The following output confirms a successful launch of the system:_
|
_The following output confirms a successful launch of the system:_
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
____ ___ ______ ______ __
|
|
||||||
/ __ \ / | / ____// ____// /____ _ __
|
____ ___ ______ ______ __
|
||||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
* Running on http://127.0.0.1:9380
|
||||||
* Running on http://x.x.x.x:9380
|
* Running on http://x.x.x.x:9380
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
INFO:werkzeug:Press CTRL+C to quit
|
||||||
```
|
```
|
||||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network abnormal` error because, at that moment, your RAGFlow may not be fully initialized.
|
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||||
|
error because, at that moment, your RAGFlow may not be fully initialized.
|
||||||
|
|
||||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default
|
||||||
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update the `API_KEY` field with the corresponding API key.
|
HTTP serving port `80` can be omitted when using the default configurations.
|
||||||
|
6. In [service_conf.yaml.template](./docker/service_conf.yaml.template), select the desired LLM factory in `user_default_llm` and update
|
||||||
|
the `API_KEY` field with the corresponding API key.
|
||||||
|
|
||||||
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
||||||
|
|
||||||
@ -196,104 +220,120 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
When it comes to system configurations, you will need to manage the following files:
|
When it comes to system configurations, you will need to manage the following files:
|
||||||
|
|
||||||
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and `MINIO_PASSWORD`.
|
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml): Configures the back-end services.
|
`MINIO_PASSWORD`.
|
||||||
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Configures the back-end services. The environment variables in this file will be automatically populated when the Docker container starts. Any environment variables set within the Docker container will be available for use, allowing you to customize service behavior based on the deployment environment.
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): The system relies on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
- [docker-compose.yml](./docker/docker-compose.yml): The system relies on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
||||||
|
|
||||||
You must ensure that changes to the [.env](./docker/.env) file are in line with what are in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service
|
||||||
|
> configurations which can be used as `${ENV_VARS}` in the [service_conf.yaml.template](./docker/service_conf.yaml.template) file.
|
||||||
|
|
||||||
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service configurations, and you are REQUIRED to ensure that all environment settings listed in the [./docker/README](./docker/README.md) file are aligned with the corresponding configurations in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80`
|
||||||
|
to `<YOUR_SERVING_PORT>:80`.
|
||||||
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80` to `<YOUR_SERVING_PORT>:80`.
|
|
||||||
|
|
||||||
Updates to the above configurations require a reboot of all containers to take effect:
|
Updates to the above configurations require a reboot of all containers to take effect:
|
||||||
|
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker-compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker/docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## 🪛 Build the Docker image without embedding models
|
### Switch doc engine from Elasticsearch to Infinity
|
||||||
|
|
||||||
This image is approximately 1 GB in size and relies on external LLM and embedding services.
|
RAGFlow uses Elasticsearch by default for storing full text and vectors. To switch to [Infinity](https://github.com/infiniflow/infinity/), follow these steps:
|
||||||
|
|
||||||
|
1. Stop all running containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||||
|
|
||||||
|
3. Start the containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Switching to Infinity on a Linux/arm64 machine is not yet officially supported.
|
||||||
|
|
||||||
|
## 🔧 Build a Docker image without embedding models
|
||||||
|
|
||||||
|
This image is approximately 2 GB in size and relies on external LLM and embedding services.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub
|
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🪚 Build the Docker image including embedding models
|
## 🔧 Build a Docker image including embedding models
|
||||||
|
|
||||||
This image is approximately 9 GB in size. As it includes embedding models, it relies on external LLM services only.
|
This image is approximately 9 GB in size. As it includes embedding models, it relies on external LLM services only.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub
|
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 Launch service from source for development
|
## 🔨 Launch service from source for development
|
||||||
|
|
||||||
1. Install Poetry, or skip this step if it is already installed:
|
1. Install Poetry, or skip this step if it is already installed:
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
pipx install poetry
|
||||||
|
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Clone the source code and install Python dependencies:
|
2. Clone the source code and install Python dependencies:
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
~/.local/bin/poetry install --sync --no-root --with=full # install RAGFlow dependent python modules
|
||||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||||
```bash
|
```bash
|
||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/service_conf.yaml** to `127.0.0.1`:
|
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/.env** to `127.0.0.1`:
|
||||||
```
|
```
|
||||||
127.0.0.1 es01 mysql minio redis
|
127.0.0.1 es01 infinity mysql minio redis
|
||||||
```
|
```
|
||||||
In **docker/service_conf.yaml**, update mysql port to `5455` and es port to `1200`, as specified in **docker/.env**.
|
|
||||||
|
|
||||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Launch backend service:
|
5. Launch backend service:
|
||||||
```bash
|
```bash
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Install frontend dependencies:
|
6. Install frontend dependencies:
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install --force
|
npm install --force
|
||||||
```
|
```
|
||||||
7. Configure frontend to update `proxy.target` in **.umirc.ts** to `http://127.0.0.1:9380`:
|
7. Launch frontend service:
|
||||||
8. Launch frontend service:
|
|
||||||
```bash
|
```bash
|
||||||
npm run dev
|
npm run dev
|
||||||
```
|
```
|
||||||
|
|
||||||
_The following output confirms a successful launch of the system:_
|
_The following output confirms a successful launch of the system:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## 📚 Documentation
|
## 📚 Documentation
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
@ -309,4 +349,5 @@ See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
|||||||
|
|
||||||
## 🙌 Contributing
|
## 🙌 Contributing
|
||||||
|
|
||||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community. If you would like to be a part, review our [Contribution Guidelines](./CONTRIBUTING.md) first.
|
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community.
|
||||||
|
If you would like to be a part, review our [Contribution Guidelines](./CONTRIBUTING.md) first.
|
||||||
|
|||||||
322
README_id.md
Normal file
322
README_id.md
Normal file
@ -0,0 +1,322 @@
|
|||||||
|
<div align="center">
|
||||||
|
<a href="https://demo.ragflow.io/">
|
||||||
|
<img src="web/src/assets/logo-with-text.png" width="520" alt="Logo ragflow">
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="./README.md">English</a> |
|
||||||
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_ja.md">日本語</a> |
|
||||||
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||||
|
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="Ikuti di X (Twitter)">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
|
<img height="21" src="https://img.shields.io/badge/Lisensi-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="Lisensi">
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h4 align="center">
|
||||||
|
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/issues/162">Peta Jalan</a> |
|
||||||
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
|
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||||
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
|
</h4>
|
||||||
|
|
||||||
|
<details open>
|
||||||
|
<summary></b>📕 Daftar Isi</b></summary>
|
||||||
|
|
||||||
|
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
||||||
|
- 🎮 [Demo](#-demo)
|
||||||
|
- 📌 [Pembaruan Terbaru](#-pembaruan-terbaru)
|
||||||
|
- 🌟 [Fitur Utama](#-fitur-utama)
|
||||||
|
- 🔎 [Arsitektur Sistem](#-arsitektur-sistem)
|
||||||
|
- 🎬 [Mulai](#-mulai)
|
||||||
|
- 🔧 [Konfigurasi](#-konfigurasi)
|
||||||
|
- 🔧 [Membangun Image Docker tanpa Model Embedding](#-membangun-image-docker-tanpa-model-embedding)
|
||||||
|
- 🔧 [Membangun Image Docker dengan Model Embedding](#-membangun-image-docker-dengan-model-embedding)
|
||||||
|
- 🔨 [Meluncurkan aplikasi dari Sumber untuk Pengembangan](#-meluncurkan-aplikasi-dari-sumber-untuk-pengembangan)
|
||||||
|
- 📚 [Dokumentasi](#-dokumentasi)
|
||||||
|
- 📜 [Peta Jalan](#-peta-jalan)
|
||||||
|
- 🏄 [Komunitas](#-komunitas)
|
||||||
|
- 🙌 [Kontribusi](#-kontribusi)
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## 💡 Apa Itu RAGFlow?
|
||||||
|
|
||||||
|
[RAGFlow](https://ragflow.io/) adalah mesin RAG (Retrieval-Augmented Generation) open-source berbasis pemahaman dokumen yang mendalam. Platform ini menyediakan alur kerja RAG yang efisien untuk bisnis dengan berbagai skala, menggabungkan LLM (Large Language Models) untuk menyediakan kemampuan tanya-jawab yang benar dan didukung oleh referensi dari data terstruktur kompleks.
|
||||||
|
|
||||||
|
## 🎮 Demo
|
||||||
|
|
||||||
|
Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🔥 Pembaruan Terbaru
|
||||||
|
|
||||||
|
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di Deepdoc.
|
||||||
|
- 2024-12-04 Mendukung skor pagerank ke basis pengetahuan.
|
||||||
|
- 2024-11-22 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||||
|
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||||
|
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||||
|
- 2024-08-02 Dukungan GraphRAG yang terinspirasi oleh [graphrag](https://github.com/microsoft/graphrag) dan mind map.
|
||||||
|
|
||||||
|
## 🎉 Tetap Terkini
|
||||||
|
|
||||||
|
⭐️ Star repositori kami untuk tetap mendapat informasi tentang fitur baru dan peningkatan menarik! 🌟
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🌟 Fitur Utama
|
||||||
|
|
||||||
|
### 🍭 **"Kualitas Masuk, Kualitas Keluar"**
|
||||||
|
|
||||||
|
- Ekstraksi pengetahuan berbasis pemahaman dokumen mendalam dari data tidak terstruktur dengan format yang rumit.
|
||||||
|
- Menemukan "jarum di tumpukan data" dengan token yang hampir tidak terbatas.
|
||||||
|
|
||||||
|
### 🍱 **Pemotongan Berbasis Template**
|
||||||
|
|
||||||
|
- Cerdas dan dapat dijelaskan.
|
||||||
|
- Banyak pilihan template yang tersedia.
|
||||||
|
|
||||||
|
### 🌱 **Referensi yang Didasarkan pada Data untuk Mengurangi Hallusinasi**
|
||||||
|
|
||||||
|
- Visualisasi pemotongan teks memungkinkan intervensi manusia.
|
||||||
|
- Tampilan cepat referensi kunci dan referensi yang dapat dilacak untuk mendukung jawaban yang didasarkan pada fakta.
|
||||||
|
|
||||||
|
### 🍔 **Kompatibilitas dengan Sumber Data Heterogen**
|
||||||
|
|
||||||
|
- Mendukung Word, slide, excel, txt, gambar, salinan hasil scan, data terstruktur, halaman web, dan banyak lagi.
|
||||||
|
|
||||||
|
### 🛀 **Alur Kerja RAG yang Otomatis dan Mudah**
|
||||||
|
|
||||||
|
- Orkestrasi RAG yang ramping untuk bisnis kecil dan besar.
|
||||||
|
- LLM yang dapat dikonfigurasi serta model embedding.
|
||||||
|
- Peringkat ulang berpasangan dengan beberapa pengambilan ulang.
|
||||||
|
- API intuitif untuk integrasi yang mudah dengan bisnis.
|
||||||
|
|
||||||
|
## 🔎 Arsitektur Sistem
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🎬 Mulai
|
||||||
|
|
||||||
|
### 📝 Prasyarat
|
||||||
|
|
||||||
|
- CPU >= 4 inti
|
||||||
|
- RAM >= 16 GB
|
||||||
|
- Disk >= 50 GB
|
||||||
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
|
|
||||||
|
### 🚀 Menjalankan Server
|
||||||
|
|
||||||
|
1. Pastikan `vm.max_map_count` >= 262144:
|
||||||
|
|
||||||
|
> Untuk memeriksa nilai `vm.max_map_count`:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> $ sysctl vm.max_map_count
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Jika nilainya kurang dari 262144, setel ulang `vm.max_map_count` ke setidaknya 262144:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> # Dalam contoh ini, kita atur menjadi 262144:
|
||||||
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Perubahan ini akan hilang setelah sistem direboot. Untuk membuat perubahan ini permanen, tambahkan atau perbarui nilai
|
||||||
|
`vm.max_map_count` di **/etc/sysctl.conf**:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
|
||||||
|
2. Clone repositori:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Bangun image Docker pre-built dan jalankan server:
|
||||||
|
|
||||||
|
> Perintah di bawah ini mengunduh edisi v0.15.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.14.1-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1 untuk edisi lengkap v0.14.1.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd ragflow
|
||||||
|
$ docker compose -f docker/docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||||
|
|
||||||
|
4. Periksa status server setelah server aktif dan berjalan:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker logs -f ragflow-server
|
||||||
|
```
|
||||||
|
|
||||||
|
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||||
|
|
||||||
|
```bash
|
||||||
|
|
||||||
|
____ ___ ______ ______ __
|
||||||
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
|
* Running on all addresses (0.0.0.0)
|
||||||
|
* Running on http://127.0.0.1:9380
|
||||||
|
* Running on http://x.x.x.x:9380
|
||||||
|
INFO:werkzeug:Press CTRL+C to quit
|
||||||
|
```
|
||||||
|
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||||
|
karena RAGFlow mungkin belum sepenuhnya siap.
|
||||||
|
|
||||||
|
5. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||||
|
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||||
|
port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||||
|
6. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||||
|
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||||
|
|
||||||
|
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||||
|
|
||||||
|
_Sistem telah siap digunakan!_
|
||||||
|
|
||||||
|
## 🔧 Konfigurasi
|
||||||
|
|
||||||
|
Untuk konfigurasi sistem, Anda perlu mengelola file-file berikut:
|
||||||
|
|
||||||
|
- [.env](./docker/.env): Menyimpan pengaturan dasar sistem, seperti `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, dan
|
||||||
|
`MINIO_PASSWORD`.
|
||||||
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Mengonfigurasi aplikasi backend.
|
||||||
|
- [docker-compose.yml](./docker/docker-compose.yml): Sistem ini bergantung pada [docker-compose.yml](./docker/docker-compose.yml) untuk memulai.
|
||||||
|
|
||||||
|
Untuk memperbarui port HTTP default (80), buka [docker-compose.yml](./docker/docker-compose.yml) dan ubah `80:80`
|
||||||
|
menjadi `<YOUR_SERVING_PORT>:80`.
|
||||||
|
|
||||||
|
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||||
|
|
||||||
|
> ```bash
|
||||||
|
> $ docker compose -f docker/docker-compose.yml up -d
|
||||||
|
> ```
|
||||||
|
|
||||||
|
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||||
|
|
||||||
|
Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
||||||
|
|
||||||
|
Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hanya bergantung pada aplikasi LLM eksternal.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||||
|
|
||||||
|
1. Instal Poetry, atau lewati langkah ini jika sudah terinstal:
|
||||||
|
```bash
|
||||||
|
pipx install poetry
|
||||||
|
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Clone kode sumber dan instal dependensi Python:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
~/.local/bin/poetry install --sync --no-root # install modul python RAGFlow
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||||
|
```bash
|
||||||
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **conf/service_conf.yaml** ke `127.0.0.1`:
|
||||||
|
```
|
||||||
|
127.0.0.1 es01 infinity mysql minio redis
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Jalankan aplikasi backend:
|
||||||
|
```bash
|
||||||
|
source .venv/bin/activate
|
||||||
|
export PYTHONPATH=$(pwd)
|
||||||
|
bash docker/launch_backend_service.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Instal dependensi frontend:
|
||||||
|
```bash
|
||||||
|
cd web
|
||||||
|
npm install --force
|
||||||
|
```
|
||||||
|
7. Jalankan aplikasi frontend:
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## 📚 Dokumentasi
|
||||||
|
|
||||||
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
- [Panduan Pengguna](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Referensi](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
|
## 📜 Roadmap
|
||||||
|
|
||||||
|
Lihat [Roadmap RAGFlow 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||||
|
|
||||||
|
## 🏄 Komunitas
|
||||||
|
|
||||||
|
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||||
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
## 🙌 Kontribusi
|
||||||
|
|
||||||
|
RAGFlow berkembang melalui kolaborasi open-source. Dalam semangat ini, kami menerima kontribusi dari komunitas.
|
||||||
|
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||||
111
README_ja.md
111
README_ja.md
@ -8,23 +8,29 @@
|
|||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a> |
|
<a href="./README_ja.md">日本語</a> |
|
||||||
<a href="./README_ko.md">한국어</a>
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
|
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||||
|
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="follow on X(Twitter)">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||||
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://demo.ragflow.io" target="_blank">
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
</a>
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.12.0-brightgreen"
|
|
||||||
alt="docker pull infiniflow/ragflow:v0.12.0"></a>
|
|
||||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
|
||||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
|
||||||
</a>
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||||
@ -42,22 +48,24 @@
|
|||||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🔥 最新情報
|
## 🔥 最新情報
|
||||||
|
|
||||||
- 2024-09-29 マルチラウンドダイアログを最適化。
|
- 2024-12-18 Deepdoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||||
- 2024-09-13 ナレッジベース Q&A の検索モードを追加しました。
|
- 2024-12-04 ナレッジ ベースへのページランク スコアをサポートしました。
|
||||||
- 2024-09-09 エージェントに医療相談テンプレートを追加しました。
|
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
||||||
|
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||||
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
||||||
- 2024-07-23 音声ファイルの解析をサポートしました。
|
|
||||||
- 2024-07-08 [Graph](./agent/README.md) ベースのワークフローをサポート
|
|
||||||
- 2024-06-27 Q&A 解析メソッドで Markdown と Docx をサポートし、Docx ファイルから画像を抽出し、Markdown ファイルからテーブルを抽出します。
|
|
||||||
- 2024-05-23 より良いテキスト検索のために [RAPTOR](https://arxiv.org/html/2401.18059v1) をサポート。
|
|
||||||
|
|
||||||
|
## 🎉 続きを楽しみに
|
||||||
|
⭐️ リポジトリをスター登録して、エキサイティングな新機能やアップデートを最新の状態に保ちましょう!すべての新しいリリースに関する即時通知を受け取れます! 🌟
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
## 🌟 主な特徴
|
## 🌟 主な特徴
|
||||||
|
|
||||||
@ -134,15 +142,19 @@
|
|||||||
|
|
||||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||||
|
|
||||||
|
> 以下のコマンドは、RAGFlow Dockerイメージの v0.15.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.15.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.14.1 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1 と設定します。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow
|
||||||
$ chmod +x ./entrypoint.sh
|
$ docker compose -f docker/docker-compose.yml up -d
|
||||||
$ docker compose up -d
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> 上記のコマンドを実行すると、RAGFlowの開発版dockerイメージが自動的にダウンロードされます。 特定のバージョンのDockerイメージをダウンロードして実行したい場合は、docker/.envファイルのRAGFLOW_IMAGE変数を見つけて、対応するバージョンに変更してください。 例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.12.0`として、上記のコマンドを実行してください。
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
> コアイメージのサイズは約 9 GB で、ロードに時間がかかる場合があります。
|
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||||
|
|
||||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||||
|
|
||||||
@ -168,7 +180,7 @@
|
|||||||
|
|
||||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||||
|
|
||||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||||
|
|
||||||
@ -179,57 +191,70 @@
|
|||||||
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
||||||
|
|
||||||
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml): バックエンドのサービスを設定します。
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): バックエンドのサービスを設定します。
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
||||||
|
|
||||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml](./docker/service_conf.yaml) ファイルの内容と一致していることを確認する必要があります。
|
[.env](./docker/.env) ファイルの変更が [service_conf.yaml.template](./docker/service_conf.yaml.template) ファイルの内容と一致していることを確認する必要があります。
|
||||||
|
|
||||||
> [./docker/README](./docker/README.md) ファイルは環境設定とサービスコンフィグの詳細な説明を提供し、[./docker/README](./docker/README.md) ファイルに記載されている全ての環境設定が [service_conf.yaml](./docker/service_conf.yaml) ファイルの対応するコンフィグと一致していることを確認することが義務付けられています。
|
> [./docker/README](./docker/README.md) ファイル ./docker/README には、service_conf.yaml.template ファイルで ${ENV_VARS} として使用できる環境設定とサービス構成の詳細な説明が含まれています。
|
||||||
|
|
||||||
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
||||||
|
|
||||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker-compose up -d
|
> $ docker compose -f docker/docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## 🪛 ソースコードでDockerイメージを作成(埋め込みモデルなし)
|
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
||||||
|
|
||||||
|
RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトルを保存します。[Infinity]に切り替え(https://github.com/infiniflow/infinity/)、次の手順に従います。
|
||||||
|
|
||||||
|
1. 実行中のすべてのコンテナを停止するには:
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
2. **docker/.env** の「DOC _ ENGINE」を「infinity」に設定します。
|
||||||
|
|
||||||
|
3. 起動コンテナ:
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
> [!WARNING]
|
||||||
|
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||||
|
|
||||||
|
## 🔧 ソースコードでDockerイメージを作成(埋め込みモデルなし)
|
||||||
|
|
||||||
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub
|
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🪚 ソースコードをコンパイルしたDockerイメージ(埋め込みモデルを含む)
|
## 🔧 ソースコードをコンパイルしたDockerイメージ(埋め込みモデルを含む)
|
||||||
|
|
||||||
この Docker のサイズは約 9GB で、埋め込みモデルを含むため、外部の大モデルサービスのみが必要です。
|
この Docker のサイズは約 9GB で、埋め込みモデルを含むため、外部の大モデルサービスのみが必要です。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub
|
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 ソースコードからサービスを起動する方法
|
## 🔨 ソースコードからサービスを起動する方法
|
||||||
|
|
||||||
1. Poetry をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
1. Poetry をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
pipx install poetry
|
||||||
|
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||||
```
|
```
|
||||||
|
|
||||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
|
||||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -238,11 +263,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
`/etc/hosts` に以下の行を追加して、**docker/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||||
```
|
```
|
||||||
127.0.0.1 es01 mysql minio redis
|
127.0.0.1 es01 infinity mysql minio redis
|
||||||
```
|
```
|
||||||
**docker/service_conf.yaml** で mysql のポートを `5455` に、es のポートを `1200` に更新します(**docker/.env** に指定された通り).
|
|
||||||
|
|
||||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||||
|
|
||||||
@ -262,8 +286,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
cd web
|
cd web
|
||||||
npm install --force
|
npm install --force
|
||||||
```
|
```
|
||||||
7. フロントエンドを設定し、**.umirc.ts** の `proxy.target` を `http://127.0.0.1:9380` に更新します:
|
7. フロントエンドサービスを起動する:
|
||||||
8. フロントエンドサービスを起動する:
|
|
||||||
```bash
|
```bash
|
||||||
npm run dev
|
npm run dev
|
||||||
```
|
```
|
||||||
@ -275,7 +298,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
## 📚 ドキュメンテーション
|
## 📚 ドキュメンテーション
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
|
|||||||
114
README_ko.md
114
README_ko.md
@ -9,21 +9,28 @@
|
|||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a> |
|
<a href="./README_ja.md">日本語</a> |
|
||||||
<a href="./README_ko.md">한국어</a> |
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
|
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||||
|
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="follow on X(Twitter)">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||||
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://demo.ragflow.io" target="_blank">
|
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.12.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.12.0"></a>
|
|
||||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||||
@ -43,30 +50,30 @@
|
|||||||
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🔥 업데이트
|
## 🔥 업데이트
|
||||||
|
|
||||||
- 2024-09-29 다단계 대화를 최적화합니다.
|
- 2024-12-18 Deepdoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||||
|
|
||||||
- 2024-09-13 지식베이스 Q&A 검색 모드를 추가합니다.
|
- 2024-12-04 지식베이스에 대한 페이지랭크 점수를 지원합니다.
|
||||||
|
|
||||||
|
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||||
|
|
||||||
- 2024-09-09 Agent에 의료상담 템플릿을 추가하였습니다.
|
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||||
|
|
||||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||||
|
|
||||||
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
||||||
|
|
||||||
- 2024-07-23: 오디오 파일 분석을 지원합니다.
|
|
||||||
|
|
||||||
- 2024-07-08: [Graph](./agent/README.md)를 기반으로 한 워크플로우를 지원합니다.
|
|
||||||
|
|
||||||
- 2024-06-27 Q&A 구문 분석 방식에서 Markdown 및 Docx를 지원하고, Docx 파일에서 이미지 추출, Markdown 파일에서 테이블 추출을 지원합니다.
|
|
||||||
|
|
||||||
- 2024-05-23: 더 나은 텍스트 검색을 위해 [RAPTOR](https://arxiv.org/html/2401.18059v1)를 지원합니다.
|
|
||||||
|
|
||||||
|
## 🎉 계속 지켜봐 주세요
|
||||||
|
⭐️우리의 저장소를 즐겨찾기에 등록하여 흥미로운 새로운 기능과 업데이트를 최신 상태로 유지하세요! 모든 새로운 릴리스에 대한 즉시 알림을 받으세요! 🌟
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🌟 주요 기능
|
## 🌟 주요 기능
|
||||||
@ -140,15 +147,19 @@
|
|||||||
|
|
||||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||||
|
|
||||||
> 다음 명령어를 실행하면 *dev* 버전의 RAGFlow Docker 이미지가 자동으로 다운로드됩니다. 특정 Docker 버전을 다운로드하고 실행하려면, **docker/.env** 파일에서 `RAGFLOW_IMAGE`을 원하는 버전으로 업데이트한 후, 예를 들어 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.12.0`로 업데이트 한 뒤, 다음 명령어를 실행하세요.
|
> 아래 명령어는 RAGFlow Docker 이미지의 v0.15.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.15.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.14.1을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1로 설정합니다.
|
||||||
```bash
|
|
||||||
$ cd ragflow/docker
|
|
||||||
$ chmod +x ./entrypoint.sh
|
|
||||||
$ docker compose up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
> 기본 이미지는 약 9GB 크기이며 로드하는 데 시간이 걸릴 수 있습니다.
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd ragflow
|
||||||
|
$ docker compose -f docker/docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||||
|
|
||||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||||
|
|
||||||
@ -170,11 +181,11 @@
|
|||||||
* Running on http://x.x.x.x:9380
|
* Running on http://x.x.x.x:9380
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
INFO:werkzeug:Press CTRL+C to quit
|
||||||
```
|
```
|
||||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network abnormal` 오류가 발생할 수 있습니다.
|
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||||
|
|
||||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||||
6. [service_conf.yaml](./docker/service_conf.yaml) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||||
|
|
||||||
_이제 쇼가 시작됩니다!_
|
_이제 쇼가 시작됩니다!_
|
||||||
@ -184,57 +195,68 @@
|
|||||||
시스템 설정과 관련하여 다음 파일들을 관리해야 합니다:
|
시스템 설정과 관련하여 다음 파일들을 관리해야 합니다:
|
||||||
|
|
||||||
- [.env](./docker/.env): `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, `MINIO_PASSWORD`와 같은 시스템의 기본 설정을 포함합니다.
|
- [.env](./docker/.env): `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, `MINIO_PASSWORD`와 같은 시스템의 기본 설정을 포함합니다.
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml): 백엔드 서비스를 구성합니다.
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): 백엔드 서비스를 구성합니다.
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): 시스템은 [docker-compose.yml](./docker/docker-compose.yml)을 사용하여 시작됩니다.
|
- [docker-compose.yml](./docker/docker-compose.yml): 시스템은 [docker-compose.yml](./docker/docker-compose.yml)을 사용하여 시작됩니다.
|
||||||
|
|
||||||
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 내용과 일치하도록 해야 합니다.
|
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일의 내용과 일치하도록 해야 합니다.
|
||||||
|
|
||||||
> [./docker/README](./docker/README.md) 파일에는 환경 설정과 서비스 구성에 대한 자세한 설명이 있으며, [./docker/README](./docker/README.md) 파일에 나열된 모든 환경 설정이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 해당 구성과 일치하도록 해야 합니다.
|
> [./docker/README](./docker/README.md) 파일 ./docker/README은 service_conf.yaml.template 파일에서 ${ENV_VARS}로 사용할 수 있는 환경 설정과 서비스 구성에 대한 자세한 설명을 제공합니다.
|
||||||
|
|
||||||
기본 HTTP 서비스 포트(80)를 업데이트하려면 [docker-compose.yml](./docker/docker-compose.yml) 파일에서 `80:80`을 `<YOUR_SERVING_PORT>:80`으로 변경하세요.
|
기본 HTTP 서비스 포트(80)를 업데이트하려면 [docker-compose.yml](./docker/docker-compose.yml) 파일에서 `80:80`을 `<YOUR_SERVING_PORT>:80`으로 변경하세요.
|
||||||
|
|
||||||
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker-compose up -d
|
> $ docker compose -f docker/docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## 🪛 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함하지 않음)
|
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
||||||
|
|
||||||
|
RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및 벡터를 저장합니다. [Infinity]로 전환(https://github.com/infiniflow/infinity/), 다음 절차를 따르십시오.
|
||||||
|
1. 실행 중인 모든 컨테이너를 중지합니다.
|
||||||
|
```bash
|
||||||
|
$docker compose-f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||||
|
3. 컨테이너 부팅:
|
||||||
|
```bash
|
||||||
|
$docker compose-f docker/docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
> [!WARNING]
|
||||||
|
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||||
|
|
||||||
|
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함하지 않음)
|
||||||
|
|
||||||
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub
|
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🪚 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
||||||
|
|
||||||
이 Docker의 크기는 약 9GB이며, 이미 임베딩 모델을 포함하고 있으므로 외부 대형 모델 서비스에만 의존하면 됩니다.
|
이 Docker의 크기는 약 9GB이며, 이미 임베딩 모델을 포함하고 있으므로 외부 대형 모델 서비스에만 의존하면 됩니다.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub
|
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||||
|
|
||||||
1. Poetry를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
1. Poetry를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
pipx install poetry
|
||||||
|
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
|
||||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -243,11 +265,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
`/etc/hosts` 에 다음 줄을 추가하여 **docker/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||||
```
|
```
|
||||||
127.0.0.1 es01 mysql minio redis
|
127.0.0.1 es01 infinity mysql minio redis
|
||||||
```
|
```
|
||||||
**docker/service_conf.yaml** 에서 mysql 포트를 `5455` 로, es 포트를 `1200` 으로 업데이트합니다( **docker/.env** 에 지정된 대로).
|
|
||||||
|
|
||||||
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
||||||
|
|
||||||
@ -267,8 +288,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
cd web
|
cd web
|
||||||
npm install --force
|
npm install --force
|
||||||
```
|
```
|
||||||
7. **.umirc.ts** 에서 `proxy.target` 을 `http://127.0.0.1:9380` 으로 업데이트합니다:
|
7. 프론트엔드 서비스를 시작합니다:
|
||||||
8. 프론트엔드 서비스를 시작합니다:
|
|
||||||
```bash
|
```bash
|
||||||
npm run dev
|
npm run dev
|
||||||
```
|
```
|
||||||
@ -280,7 +300,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
## 📚 문서
|
## 📚 문서
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
|
|||||||
126
README_zh.md
126
README_zh.md
@ -8,22 +8,29 @@
|
|||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a> |
|
<a href="./README_ja.md">日本語</a> |
|
||||||
<a href="./README_ko.md">한국어</a>
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
|
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||||
|
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="follow on X(Twitter)">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.15.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.15.0">
|
||||||
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://demo.ragflow.io" target="_blank">
|
|
||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.12.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.12.0"></a>
|
|
||||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||||
@ -41,21 +48,25 @@
|
|||||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🔥 近期更新
|
## 🔥 近期更新
|
||||||
|
|
||||||
- 2024-09-29 优化多轮对话.
|
- 2024-12-18 升级了 Deepdoc 的文档布局分析模型。
|
||||||
- 2024-09-13 增加知识库问答搜索模式。
|
- 2024-12-04 支持知识库的 Pagerank 分数。
|
||||||
- 2024-09-09 在 Agent 中加入医疗问诊模板。
|
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
||||||
|
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||||
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
||||||
- 2024-07-23 支持解析音频文件。
|
|
||||||
- 2024-07-08 支持 Agentic RAG: 基于 [Graph](./agent/README.md) 的工作流。
|
## 🎉 关注项目
|
||||||
- 2024-06-27 Q&A 解析方式支持 Markdown 文件和 Docx 文件,支持提取出 Docx 文件中的图片和 Markdown 文件中的表格。
|
⭐️点击右上角的 Star 关注RAGFlow,可以获取最新发布的实时通知 !🌟
|
||||||
- 2024-05-23 实现 [RAPTOR](https://arxiv.org/html/2401.18059v1) 提供更好的文本检索。
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🌟 主要功能
|
## 🌟 主要功能
|
||||||
|
|
||||||
@ -132,15 +143,24 @@
|
|||||||
|
|
||||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||||
|
|
||||||
|
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.15.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.15.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1` 来下载 RAGFlow 镜像的 `v0.14.1` 完整发行版。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow
|
||||||
$ chmod +x ./entrypoint.sh
|
$ docker compose -f docker/docker-compose.yml up -d
|
||||||
$ docker compose -f docker-compose.yml up -d
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> 请注意,运行上述命令会自动下载 RAGFlow 的开发版本 docker 镜像。如果你想下载并运行特定版本的 docker 镜像,请在 docker/.env 文件中找到 RAGFLOW_IMAGE 变量,将其改为对应版本。例如 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.12.0`,然后运行上述命令。
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.15.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.15.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | *Unstable* nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | *Unstable* nightly build |
|
||||||
|
|
||||||
> 核心镜像下载大小为 9 GB,可能需要一定时间拉取。请耐心等待。
|
> [!TIP]
|
||||||
|
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
||||||
|
> - 华为云镜像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||||
|
> - 阿里云镜像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||||
|
|
||||||
4. 服务器启动成功后再次确认服务器状态:
|
4. 服务器启动成功后再次确认服务器状态:
|
||||||
|
|
||||||
@ -162,11 +182,11 @@
|
|||||||
* Running on http://x.x.x.x:9380
|
* Running on http://x.x.x.x:9380
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
INFO:werkzeug:Press CTRL+C to quit
|
||||||
```
|
```
|
||||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network abnormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||||
|
|
||||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||||
6. 在 [service_conf.yaml](./docker/service_conf.yaml) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||||
|
|
||||||
> 详见 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
> 详见 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||||
|
|
||||||
@ -177,59 +197,79 @@
|
|||||||
系统配置涉及以下三份文件:
|
系统配置涉及以下三份文件:
|
||||||
|
|
||||||
- [.env](./docker/.env):存放一些基本的系统环境变量,比如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
- [.env](./docker/.env):存放一些基本的系统环境变量,比如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml):配置各类后台服务。
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template):配置各类后台服务。
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): 系统依赖该文件完成启动。
|
- [docker-compose.yml](./docker/docker-compose.yml): 系统依赖该文件完成启动。
|
||||||
|
|
||||||
请务必确保 [.env](./docker/.env) 文件中的变量设置与 [service_conf.yaml](./docker/service_conf.yaml) 文件中的配置保持一致!
|
请务必确保 [.env](./docker/.env) 文件中的变量设置与 [service_conf.yaml.template](./docker/service_conf.yaml.template) 文件中的配置保持一致!
|
||||||
|
|
||||||
如果不能访问镜像站点hub.docker.com或者模型站点huggingface.co,请按照[.env](./docker/.env)注释修改`RAGFLOW_IMAGE`和`HF_ENDPOINT`。
|
如果不能访问镜像站点 hub.docker.com 或者模型站点 huggingface.co,请按照 [.env](./docker/.env) 注释修改 `RAGFLOW_IMAGE` 和 `HF_ENDPOINT`。
|
||||||
|
|
||||||
> [./docker/README](./docker/README.md) 文件提供了环境变量设置和服务配置的详细信息。请**一定要**确保 [./docker/README](./docker/README.md) 文件当中列出来的环境变量的值与 [service_conf.yaml](./docker/service_conf.yaml) 文件当中的系统配置保持一致。
|
> [./docker/README](./docker/README.md) 解释了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的环境变量设置和服务配置。
|
||||||
|
|
||||||
如需更新默认的 HTTP 服务端口(80), 可以在 [docker-compose.yml](./docker/docker-compose.yml) 文件中将配置 `80:80` 改为 `<YOUR_SERVING_PORT>:80`。
|
如需更新默认的 HTTP 服务端口(80), 可以在 [docker-compose.yml](./docker/docker-compose.yml) 文件中将配置 `80:80` 改为 `<YOUR_SERVING_PORT>:80`。
|
||||||
|
|
||||||
> 所有系统配置都需要通过系统重启生效:
|
> 所有系统配置都需要通过系统重启生效:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker-compose.yml up -d
|
> $ docker compose -f docker/docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## 🪛 源码编译 Docker 镜像(不含 embedding 模型)
|
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
||||||
|
|
||||||
本 Docker 镜像大小约 1 GB 左右并且依赖外部的大模型和 embedding 服务。
|
RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换为 [Infinity](https://github.com/infiniflow/infinity/), 可以按照下面步骤进行:
|
||||||
|
|
||||||
|
1. 停止所有容器运行:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
||||||
|
|
||||||
|
3. 启动容器:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Infinity 目前官方并未正式支持在 Linux/arm64 架构下的机器上运行.
|
||||||
|
|
||||||
|
|
||||||
|
## 🔧 源码编译 Docker 镜像(不含 embedding 模型)
|
||||||
|
|
||||||
|
本 Docker 镜像大小约 2 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub
|
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🪚 源码编译 Docker 镜像(包含 embedding 模型)
|
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
||||||
|
|
||||||
本 Docker 大小约 9 GB 左右。由于已包含 embedding 模型,所以只需依赖外部的大模型服务即可。
|
本 Docker 大小约 9 GB 左右。由于已包含 embedding 模型,所以只需依赖外部的大模型服务即可。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub
|
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 以源代码启动服务
|
## 🔨 以源代码启动服务
|
||||||
|
|
||||||
1. 安装 Poetry。如已经安装,可跳过本步骤:
|
1. 安装 Poetry。如已经安装,可跳过本步骤:
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
pipx install poetry
|
||||||
|
pipx inject poetry poetry-plugin-pypi-mirror
|
||||||
|
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||||
|
export POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 下载源代码并安装 Python 依赖:
|
2. 下载源代码并安装 Python 依赖:
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
|
||||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -238,11 +278,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
在 `/etc/hosts` 中添加以下代码,将 **docker/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
在 `/etc/hosts` 中添加以下代码,将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||||
```
|
```
|
||||||
127.0.0.1 es01 mysql minio redis
|
127.0.0.1 es01 infinity mysql minio redis
|
||||||
```
|
```
|
||||||
在文件 **docker/service_conf.yaml** 中,对照 **docker/.env** 的配置将 mysql 端口更新为 `5455`,es 端口更新为 `1200`。
|
|
||||||
|
|
||||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||||
|
|
||||||
@ -262,8 +301,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
cd web
|
cd web
|
||||||
npm install --force
|
npm install --force
|
||||||
```
|
```
|
||||||
7. 配置前端,将 **.umirc.ts** 的 `proxy.target` 更新为 `http://127.0.0.1:9380`:
|
7. 启动前端服务:
|
||||||
8. 启动前端服务:
|
|
||||||
```bash
|
```bash
|
||||||
npm run dev
|
npm run dev
|
||||||
```
|
```
|
||||||
@ -275,7 +313,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
## 📚 技术文档
|
## 📚 技术文档
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ It is used to compose a complex work flow or agent.
|
|||||||
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
||||||
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
||||||
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
||||||
Otherwise, correct configurations in conf/service_conf.yaml is essential.
|
Otherwise, correct configurations in service_conf.yaml is essential.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||||
|
|||||||
@ -11,7 +11,7 @@
|
|||||||
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
||||||
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
||||||
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
||||||
否则,正确配置 conf/service_conf.yaml 文件是必不可少的。
|
否则,正确配置 service_conf.yaml 文件是必不可少的。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||||
|
|||||||
@ -0,0 +1,2 @@
|
|||||||
|
from beartype.claw import beartype_this_package
|
||||||
|
beartype_this_package()
|
||||||
|
|||||||
138
agent/canvas.py
138
agent/canvas.py
@ -13,18 +13,13 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import importlib
|
import logging
|
||||||
import json
|
import json
|
||||||
import traceback
|
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
from agent.component import component_class
|
from agent.component import component_class
|
||||||
from agent.component.base import ComponentBase
|
from agent.component.base import ComponentBase
|
||||||
from agent.settings import flow_logger, DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class Canvas(ABC):
|
class Canvas(ABC):
|
||||||
@ -139,7 +134,8 @@ class Canvas(ABC):
|
|||||||
"components": {}
|
"components": {}
|
||||||
}
|
}
|
||||||
for k in self.dsl.keys():
|
for k in self.dsl.keys():
|
||||||
if k in ["components"]:continue
|
if k in ["components"]:
|
||||||
|
continue
|
||||||
dsl[k] = deepcopy(self.dsl[k])
|
dsl[k] = deepcopy(self.dsl[k])
|
||||||
|
|
||||||
for k, cpn in self.components.items():
|
for k, cpn in self.components.items():
|
||||||
@ -162,8 +158,13 @@ class Canvas(ABC):
|
|||||||
self.components[k]["obj"].reset()
|
self.components[k]["obj"].reset()
|
||||||
self._embed_id = ""
|
self._embed_id = ""
|
||||||
|
|
||||||
|
def get_compnent_name(self, cid):
|
||||||
|
for n in self.dsl["graph"]["nodes"]:
|
||||||
|
if cid == n["id"]:
|
||||||
|
return n["data"]["name"]
|
||||||
|
return ""
|
||||||
|
|
||||||
def run(self, **kwargs):
|
def run(self, **kwargs):
|
||||||
ans = ""
|
|
||||||
if self.answer:
|
if self.answer:
|
||||||
cpn_id = self.answer[0]
|
cpn_id = self.answer[0]
|
||||||
self.answer.pop(0)
|
self.answer.pop(0)
|
||||||
@ -173,71 +174,80 @@ class Canvas(ABC):
|
|||||||
ans = ComponentBase.be_output(str(e))
|
ans = ComponentBase.be_output(str(e))
|
||||||
self.path[-1].append(cpn_id)
|
self.path[-1].append(cpn_id)
|
||||||
if kwargs.get("stream"):
|
if kwargs.get("stream"):
|
||||||
assert isinstance(ans, partial)
|
for an in ans():
|
||||||
return ans
|
yield an
|
||||||
self.history.append(("assistant", ans.to_dict("records")))
|
else:
|
||||||
return ans
|
yield ans
|
||||||
|
return
|
||||||
|
|
||||||
if not self.path:
|
if not self.path:
|
||||||
self.components["begin"]["obj"].run(self.history, **kwargs)
|
self.components["begin"]["obj"].run(self.history, **kwargs)
|
||||||
self.path.append(["begin"])
|
self.path.append(["begin"])
|
||||||
|
|
||||||
self.path.append([])
|
self.path.append([])
|
||||||
|
|
||||||
ran = -1
|
ran = -1
|
||||||
|
waiting = []
|
||||||
|
without_dependent_checking = []
|
||||||
|
|
||||||
def prepare2run(cpns):
|
def prepare2run(cpns):
|
||||||
nonlocal ran, ans
|
nonlocal ran, ans
|
||||||
for c in cpns:
|
for c in cpns:
|
||||||
if self.path[-1] and c == self.path[-1][-1]: continue
|
if self.path[-1] and c == self.path[-1][-1]:
|
||||||
|
continue
|
||||||
cpn = self.components[c]["obj"]
|
cpn = self.components[c]["obj"]
|
||||||
if cpn.component_name == "Answer":
|
if cpn.component_name == "Answer":
|
||||||
self.answer.append(c)
|
self.answer.append(c)
|
||||||
else:
|
else:
|
||||||
if DEBUG: print("RUN: ", c)
|
logging.debug(f"Canvas.prepare2run: {c}")
|
||||||
if cpn.component_name == "Generate":
|
if c not in without_dependent_checking:
|
||||||
cpids = cpn.get_dependent_components()
|
cpids = cpn.get_dependent_components()
|
||||||
if any([c not in self.path[-1] for c in cpids]):
|
if any([cc not in self.path[-1] for cc in cpids]):
|
||||||
|
if c not in waiting:
|
||||||
|
waiting.append(c)
|
||||||
continue
|
continue
|
||||||
ans = cpn.run(self.history, **kwargs)
|
yield "*'{}'* is running...🕞".format(self.get_compnent_name(c))
|
||||||
|
try:
|
||||||
|
ans = cpn.run(self.history, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(f"Canvas.run got exception: {e}")
|
||||||
|
self.path[-1].append(c)
|
||||||
|
ran += 1
|
||||||
|
raise e
|
||||||
self.path[-1].append(c)
|
self.path[-1].append(c)
|
||||||
ran += 1
|
ran += 1
|
||||||
|
|
||||||
prepare2run(self.components[self.path[-2][-1]]["downstream"])
|
for m in prepare2run(self.components[self.path[-2][-1]]["downstream"]):
|
||||||
|
yield {"content": m, "running_status": True}
|
||||||
|
|
||||||
while 0 <= ran < len(self.path[-1]):
|
while 0 <= ran < len(self.path[-1]):
|
||||||
if DEBUG: print(ran, self.path)
|
logging.debug(f"Canvas.run: {ran} {self.path}")
|
||||||
cpn_id = self.path[-1][ran]
|
cpn_id = self.path[-1][ran]
|
||||||
cpn = self.get_component(cpn_id)
|
cpn = self.get_component(cpn_id)
|
||||||
if not cpn["downstream"]: break
|
if not cpn["downstream"]:
|
||||||
|
break
|
||||||
|
|
||||||
loop = self._find_loop()
|
loop = self._find_loop()
|
||||||
if loop: raise OverflowError(f"Too much loops: {loop}")
|
if loop:
|
||||||
|
raise OverflowError(f"Too much loops: {loop}")
|
||||||
|
|
||||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||||
assert switch_out in self.components, \
|
assert switch_out in self.components, \
|
||||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||||
try:
|
for m in prepare2run([switch_out]):
|
||||||
prepare2run([switch_out])
|
yield {"content": m, "running_status": True}
|
||||||
except Exception as e:
|
|
||||||
for p in [c for p in self.path for c in p][::-1]:
|
|
||||||
if p.lower().find("answer") >= 0:
|
|
||||||
self.get_component(p)["obj"].set_exception(e)
|
|
||||||
prepare2run([p])
|
|
||||||
break
|
|
||||||
traceback.print_exc()
|
|
||||||
break
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
for m in prepare2run(cpn["downstream"]):
|
||||||
prepare2run(cpn["downstream"])
|
yield {"content": m, "running_status": True}
|
||||||
except Exception as e:
|
|
||||||
for p in [c for p in self.path for c in p][::-1]:
|
if ran >= len(self.path[-1]) and waiting:
|
||||||
if p.lower().find("answer") >= 0:
|
without_dependent_checking = waiting
|
||||||
self.get_component(p)["obj"].set_exception(e)
|
waiting = []
|
||||||
prepare2run([p])
|
for m in prepare2run(without_dependent_checking):
|
||||||
break
|
yield {"content": m, "running_status": True}
|
||||||
traceback.print_exc()
|
ran -= 1
|
||||||
break
|
|
||||||
|
|
||||||
if self.answer:
|
if self.answer:
|
||||||
cpn_id = self.answer[0]
|
cpn_id = self.answer[0]
|
||||||
@ -246,11 +256,13 @@ class Canvas(ABC):
|
|||||||
self.path[-1].append(cpn_id)
|
self.path[-1].append(cpn_id)
|
||||||
if kwargs.get("stream"):
|
if kwargs.get("stream"):
|
||||||
assert isinstance(ans, partial)
|
assert isinstance(ans, partial)
|
||||||
return ans
|
for an in ans():
|
||||||
|
yield an
|
||||||
|
else:
|
||||||
|
yield ans
|
||||||
|
|
||||||
self.history.append(("assistant", ans.to_dict("records")))
|
else:
|
||||||
|
raise Exception("The dialog flow has no way to interact with you. Please add an 'Interact' component to the end of the flow.")
|
||||||
return ans
|
|
||||||
|
|
||||||
def get_component(self, cpn_id):
|
def get_component(self, cpn_id):
|
||||||
return self.components[cpn_id]
|
return self.components[cpn_id]
|
||||||
@ -260,9 +272,11 @@ class Canvas(ABC):
|
|||||||
|
|
||||||
def get_history(self, window_size):
|
def get_history(self, window_size):
|
||||||
convs = []
|
convs = []
|
||||||
for role, obj in self.history[(window_size + 1) * -1:]:
|
for role, obj in self.history[window_size * -1:]:
|
||||||
convs.append({"role": role, "content": (obj if role == "user" else
|
if isinstance(obj, list) and obj and all([isinstance(o, dict) for o in obj]):
|
||||||
'\n'.join(pd.DataFrame(obj)['content']))})
|
convs.append({"role": role, "content": '\n'.join([str(s.get("content", "")) for s in obj])})
|
||||||
|
else:
|
||||||
|
convs.append({"role": role, "content": str(obj)})
|
||||||
return convs
|
return convs
|
||||||
|
|
||||||
def add_user_input(self, question):
|
def add_user_input(self, question):
|
||||||
@ -276,19 +290,22 @@ class Canvas(ABC):
|
|||||||
|
|
||||||
def _find_loop(self, max_loops=6):
|
def _find_loop(self, max_loops=6):
|
||||||
path = self.path[-1][::-1]
|
path = self.path[-1][::-1]
|
||||||
if len(path) < 2: return False
|
if len(path) < 2:
|
||||||
|
return False
|
||||||
|
|
||||||
for i in range(len(path)):
|
for i in range(len(path)):
|
||||||
if path[i].lower().find("answer") >= 0:
|
if path[i].lower().find("answer") >= 0:
|
||||||
path = path[:i]
|
path = path[:i]
|
||||||
break
|
break
|
||||||
|
|
||||||
if len(path) < 2: return False
|
if len(path) < 2:
|
||||||
|
return False
|
||||||
|
|
||||||
for l in range(2, len(path) // 2):
|
for loc in range(2, len(path) // 2):
|
||||||
pat = ",".join(path[0:l])
|
pat = ",".join(path[0:loc])
|
||||||
path_str = ",".join(path)
|
path_str = ",".join(path)
|
||||||
if len(pat) >= len(path_str): return False
|
if len(pat) >= len(path_str):
|
||||||
|
return False
|
||||||
loop = max_loops
|
loop = max_loops
|
||||||
while path_str.find(pat) == 0 and loop >= 0:
|
while path_str.find(pat) == 0 and loop >= 0:
|
||||||
loop -= 1
|
loop -= 1
|
||||||
@ -296,10 +313,23 @@ class Canvas(ABC):
|
|||||||
return False
|
return False
|
||||||
path_str = path_str[len(pat)+1:]
|
path_str = path_str[len(pat)+1:]
|
||||||
if loop < 0:
|
if loop < 0:
|
||||||
pat = " => ".join([p.split(":")[0] for p in path[0:l]])
|
pat = " => ".join([p.split(":")[0] for p in path[0:loc]])
|
||||||
return pat + " => " + pat
|
return pat + " => " + pat
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_prologue(self):
|
def get_prologue(self):
|
||||||
return self.components["begin"]["obj"]._param.prologue
|
return self.components["begin"]["obj"]._param.prologue
|
||||||
|
|
||||||
|
def set_global_param(self, **kwargs):
|
||||||
|
for k, v in kwargs.items():
|
||||||
|
for q in self.components["begin"]["obj"]._param.query:
|
||||||
|
if k != q["key"]:
|
||||||
|
continue
|
||||||
|
q["value"] = v
|
||||||
|
|
||||||
|
def get_preset_param(self):
|
||||||
|
return self.components["begin"]["obj"]._param.query
|
||||||
|
|
||||||
|
def get_component_input_elements(self, cpnnm):
|
||||||
|
return self.components[cpnnm]["obj"].get_input_elements()
|
||||||
@ -28,9 +28,84 @@ from .wencai import WenCai, WenCaiParam
|
|||||||
from .jin10 import Jin10, Jin10Param
|
from .jin10 import Jin10, Jin10Param
|
||||||
from .tushare import TuShare, TuShareParam
|
from .tushare import TuShare, TuShareParam
|
||||||
from .akshare import AkShare, AkShareParam
|
from .akshare import AkShare, AkShareParam
|
||||||
|
from .crawler import Crawler, CrawlerParam
|
||||||
|
from .invoke import Invoke, InvokeParam
|
||||||
|
from .template import Template, TemplateParam
|
||||||
|
from .email import Email, EmailParam
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def component_class(class_name):
|
def component_class(class_name):
|
||||||
m = importlib.import_module("agent.component")
|
m = importlib.import_module("agent.component")
|
||||||
c = getattr(m, class_name)
|
c = getattr(m, class_name)
|
||||||
return c
|
return c
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"Begin",
|
||||||
|
"BeginParam",
|
||||||
|
"Generate",
|
||||||
|
"GenerateParam",
|
||||||
|
"Retrieval",
|
||||||
|
"RetrievalParam",
|
||||||
|
"Answer",
|
||||||
|
"AnswerParam",
|
||||||
|
"Categorize",
|
||||||
|
"CategorizeParam",
|
||||||
|
"Switch",
|
||||||
|
"SwitchParam",
|
||||||
|
"Relevant",
|
||||||
|
"RelevantParam",
|
||||||
|
"Message",
|
||||||
|
"MessageParam",
|
||||||
|
"RewriteQuestion",
|
||||||
|
"RewriteQuestionParam",
|
||||||
|
"KeywordExtract",
|
||||||
|
"KeywordExtractParam",
|
||||||
|
"Concentrator",
|
||||||
|
"ConcentratorParam",
|
||||||
|
"Baidu",
|
||||||
|
"BaiduParam",
|
||||||
|
"DuckDuckGo",
|
||||||
|
"DuckDuckGoParam",
|
||||||
|
"Wikipedia",
|
||||||
|
"WikipediaParam",
|
||||||
|
"PubMed",
|
||||||
|
"PubMedParam",
|
||||||
|
"ArXiv",
|
||||||
|
"ArXivParam",
|
||||||
|
"Google",
|
||||||
|
"GoogleParam",
|
||||||
|
"Bing",
|
||||||
|
"BingParam",
|
||||||
|
"GoogleScholar",
|
||||||
|
"GoogleScholarParam",
|
||||||
|
"DeepL",
|
||||||
|
"DeepLParam",
|
||||||
|
"GitHub",
|
||||||
|
"GitHubParam",
|
||||||
|
"BaiduFanyi",
|
||||||
|
"BaiduFanyiParam",
|
||||||
|
"QWeather",
|
||||||
|
"QWeatherParam",
|
||||||
|
"ExeSQL",
|
||||||
|
"ExeSQLParam",
|
||||||
|
"YahooFinance",
|
||||||
|
"YahooFinanceParam",
|
||||||
|
"WenCai",
|
||||||
|
"WenCaiParam",
|
||||||
|
"Jin10",
|
||||||
|
"Jin10Param",
|
||||||
|
"TuShare",
|
||||||
|
"TuShareParam",
|
||||||
|
"AkShare",
|
||||||
|
"AkShareParam",
|
||||||
|
"Crawler",
|
||||||
|
"CrawlerParam",
|
||||||
|
"Invoke",
|
||||||
|
"InvokeParam",
|
||||||
|
"Template",
|
||||||
|
"TemplateParam",
|
||||||
|
"Email",
|
||||||
|
"EmailParam",
|
||||||
|
"component_class"
|
||||||
|
]
|
||||||
|
|||||||
@ -13,13 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import arxiv
|
import arxiv
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class ArXivParam(ComponentParamBase):
|
class ArXivParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
Define the ArXiv component parameters.
|
Define the ArXiv component parameters.
|
||||||
@ -65,5 +64,5 @@ class ArXiv(ComponentBase, ABC):
|
|||||||
return ArXiv.be_output("")
|
return ArXiv.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(arxiv_res)
|
df = pd.DataFrame(arxiv_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {str(df)}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,13 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import random
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from functools import partial
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import requests
|
import requests
|
||||||
import re
|
import re
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -64,6 +62,6 @@ class Baidu(ComponentBase, ABC):
|
|||||||
return Baidu.be_output("")
|
return Baidu.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(baidu_res)
|
df = pd.DataFrame(baidu_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {str(df)}")
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
|||||||
@ -36,7 +36,6 @@ class BaiduFanyiParam(ComponentParamBase):
|
|||||||
self.domain = 'finance'
|
self.domain = 'finance'
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_positive_integer(self.top_n, "Top N")
|
|
||||||
self.check_empty(self.appid, "BaiduFanyi APPID")
|
self.check_empty(self.appid, "BaiduFanyi APPID")
|
||||||
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
||||||
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
||||||
|
|||||||
@ -17,14 +17,13 @@ from abc import ABC
|
|||||||
import builtins
|
import builtins
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from copy import deepcopy
|
import logging
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import List, Dict, Tuple, Union
|
from typing import Tuple, Union
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from agent import settings
|
from agent import settings
|
||||||
from agent.settings import flow_logger, DEBUG
|
|
||||||
|
|
||||||
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
||||||
_DEPRECATED_PARAMS = "_deprecated_params"
|
_DEPRECATED_PARAMS = "_deprecated_params"
|
||||||
@ -36,6 +35,9 @@ class ComponentParamBase(ABC):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.output_var_name = "output"
|
self.output_var_name = "output"
|
||||||
self.message_history_window_size = 22
|
self.message_history_window_size = 22
|
||||||
|
self.query = []
|
||||||
|
self.inputs = []
|
||||||
|
self.debug_inputs = []
|
||||||
|
|
||||||
def set_name(self, name: str):
|
def set_name(self, name: str):
|
||||||
self._name = name
|
self._name = name
|
||||||
@ -81,7 +83,6 @@ class ComponentParamBase(ABC):
|
|||||||
return {name: True for name in self.get_feeded_deprecated_params()}
|
return {name: True for name in self.get_feeded_deprecated_params()}
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
|
||||||
return json.dumps(self.as_dict(), ensure_ascii=False)
|
return json.dumps(self.as_dict(), ensure_ascii=False)
|
||||||
|
|
||||||
def as_dict(self):
|
def as_dict(self):
|
||||||
@ -359,13 +360,13 @@ class ComponentParamBase(ABC):
|
|||||||
|
|
||||||
def _warn_deprecated_param(self, param_name, descr):
|
def _warn_deprecated_param(self, param_name, descr):
|
||||||
if self._deprecated_params_set.get(param_name):
|
if self._deprecated_params_set.get(param_name):
|
||||||
flow_logger.warning(
|
logging.warning(
|
||||||
f"{descr} {param_name} is deprecated and ignored in this version."
|
f"{descr} {param_name} is deprecated and ignored in this version."
|
||||||
)
|
)
|
||||||
|
|
||||||
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
||||||
if self._deprecated_params_set.get(param_name):
|
if self._deprecated_params_set.get(param_name):
|
||||||
flow_logger.warning(
|
logging.warning(
|
||||||
f"{descr} {param_name} will be deprecated in future release; "
|
f"{descr} {param_name} will be deprecated in future release; "
|
||||||
f"please use {new_param} instead."
|
f"please use {new_param} instead."
|
||||||
)
|
)
|
||||||
@ -385,10 +386,14 @@ class ComponentBase(ABC):
|
|||||||
"""
|
"""
|
||||||
return """{{
|
return """{{
|
||||||
"component_name": "{}",
|
"component_name": "{}",
|
||||||
"params": {}
|
"params": {},
|
||||||
|
"output": {},
|
||||||
|
"inputs": {}
|
||||||
}}""".format(self.component_name,
|
}}""".format(self.component_name,
|
||||||
self._param
|
self._param,
|
||||||
)
|
json.dumps(json.loads(str(self._param)).get("output", {}), ensure_ascii=False),
|
||||||
|
json.dumps(json.loads(str(self._param)).get("inputs", []), ensure_ascii=False)
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||||
self._canvas = canvas
|
self._canvas = canvas
|
||||||
@ -396,9 +401,17 @@ class ComponentBase(ABC):
|
|||||||
self._param = param
|
self._param = param
|
||||||
self._param.check()
|
self._param.check()
|
||||||
|
|
||||||
|
def get_dependent_components(self):
|
||||||
|
cpnts = set([para["component_id"].split("@")[0] for para in self._param.query \
|
||||||
|
if para.get("component_id") \
|
||||||
|
and para["component_id"].lower().find("answer") < 0 \
|
||||||
|
and para["component_id"].lower().find("begin") < 0])
|
||||||
|
return list(cpnts)
|
||||||
|
|
||||||
def run(self, history, **kwargs):
|
def run(self, history, **kwargs):
|
||||||
flow_logger.info("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
logging.debug("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||||
json.dumps(kwargs, ensure_ascii=False)))
|
json.dumps(kwargs, ensure_ascii=False)))
|
||||||
|
self._param.debug_inputs = []
|
||||||
try:
|
try:
|
||||||
res = self._run(history, **kwargs)
|
res = self._run(history, **kwargs)
|
||||||
self.set_output(res)
|
self.set_output(res)
|
||||||
@ -414,7 +427,8 @@ class ComponentBase(ABC):
|
|||||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||||
o = getattr(self._param, self._param.output_var_name)
|
o = getattr(self._param, self._param.output_var_name)
|
||||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||||
if not isinstance(o, list): o = [o]
|
if not isinstance(o, list):
|
||||||
|
o = [o]
|
||||||
o = pd.DataFrame(o)
|
o = pd.DataFrame(o)
|
||||||
|
|
||||||
if allow_partial or not isinstance(o, partial):
|
if allow_partial or not isinstance(o, partial):
|
||||||
@ -426,53 +440,112 @@ class ComponentBase(ABC):
|
|||||||
for oo in o():
|
for oo in o():
|
||||||
if not isinstance(oo, pd.DataFrame):
|
if not isinstance(oo, pd.DataFrame):
|
||||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
||||||
else: outs = oo
|
else:
|
||||||
|
outs = oo
|
||||||
return self._param.output_var_name, outs
|
return self._param.output_var_name, outs
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
setattr(self._param, self._param.output_var_name, None)
|
setattr(self._param, self._param.output_var_name, None)
|
||||||
|
self._param.inputs = []
|
||||||
|
|
||||||
def set_output(self, v: pd.DataFrame):
|
def set_output(self, v):
|
||||||
setattr(self._param, self._param.output_var_name, v)
|
setattr(self._param, self._param.output_var_name, v)
|
||||||
|
|
||||||
def get_input(self):
|
def get_input(self):
|
||||||
upstream_outs = []
|
if self._param.debug_inputs:
|
||||||
|
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs])
|
||||||
|
|
||||||
reversed_cpnts = []
|
reversed_cpnts = []
|
||||||
if len(self._canvas.path) > 1:
|
if len(self._canvas.path) > 1:
|
||||||
reversed_cpnts.extend(self._canvas.path[-2])
|
reversed_cpnts.extend(self._canvas.path[-2])
|
||||||
reversed_cpnts.extend(self._canvas.path[-1])
|
reversed_cpnts.extend(self._canvas.path[-1])
|
||||||
|
|
||||||
if DEBUG: print(self.component_name, reversed_cpnts[::-1])
|
if self._param.query:
|
||||||
|
self._param.inputs = []
|
||||||
|
outs = []
|
||||||
|
for q in self._param.query:
|
||||||
|
if q.get("component_id"):
|
||||||
|
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||||
|
cpn_id, key = q["component_id"].split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] == key:
|
||||||
|
outs.append(pd.DataFrame([{"content": p.get("value", "")}]))
|
||||||
|
self._param.inputs.append({"component_id": q["component_id"],
|
||||||
|
"content": p.get("value", "")})
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||||
|
continue
|
||||||
|
|
||||||
|
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||||
|
self._param.inputs.append({"component_id": q["component_id"],
|
||||||
|
"content": "\n".join(
|
||||||
|
[str(d["content"]) for d in outs[-1].to_dict('records')])})
|
||||||
|
elif q.get("value"):
|
||||||
|
self._param.inputs.append({"component_id": None, "content": q["value"]})
|
||||||
|
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||||
|
if outs:
|
||||||
|
df = pd.concat(outs, ignore_index=True)
|
||||||
|
if "content" in df:
|
||||||
|
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||||
|
return df
|
||||||
|
|
||||||
|
upstream_outs = []
|
||||||
|
|
||||||
for u in reversed_cpnts[::-1]:
|
for u in reversed_cpnts[::-1]:
|
||||||
if self.get_component_name(u) in ["switch", "concentrator"]: continue
|
if self.get_component_name(u) in ["switch", "concentrator"]:
|
||||||
|
continue
|
||||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||||
if o is not None:
|
if o is not None:
|
||||||
|
o["component_id"] = u
|
||||||
upstream_outs.append(o)
|
upstream_outs.append(o)
|
||||||
continue
|
continue
|
||||||
if u not in self._canvas.get_component(self._id)["upstream"]: continue
|
#if self.component_name.lower()!="answer" and u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||||
if self.component_name.lower().find("switch") < 0 \
|
if self.component_name.lower().find("switch") < 0 \
|
||||||
and self.get_component_name(u) in ["relevant", "categorize"]:
|
and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||||
continue
|
continue
|
||||||
if u.lower().find("answer") >= 0:
|
if u.lower().find("answer") >= 0:
|
||||||
for r, c in self._canvas.history[::-1]:
|
for r, c in self._canvas.history[::-1]:
|
||||||
if r == "user":
|
if r == "user":
|
||||||
upstream_outs.append(pd.DataFrame([{"content": c}]))
|
upstream_outs.append(pd.DataFrame([{"content": c, "component_id": u}]))
|
||||||
break
|
break
|
||||||
break
|
break
|
||||||
if self.component_name.lower().find("answer") >= 0 and self.get_component_name(u) in ["relevant"]:
|
if self.component_name.lower().find("answer") >= 0 and self.get_component_name(u) in ["relevant"]:
|
||||||
continue
|
continue
|
||||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||||
if o is not None:
|
if o is not None:
|
||||||
|
o["component_id"] = u
|
||||||
upstream_outs.append(o)
|
upstream_outs.append(o)
|
||||||
break
|
break
|
||||||
|
|
||||||
if upstream_outs:
|
assert upstream_outs, "Can't inference the where the component input is. Please identify whose output is this component's input."
|
||||||
df = pd.concat(upstream_outs, ignore_index=True)
|
|
||||||
if "content" in df:
|
df = pd.concat(upstream_outs, ignore_index=True)
|
||||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
if "content" in df:
|
||||||
return df
|
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||||
return pd.DataFrame(self._canvas.get_history(3)[-1:])
|
|
||||||
|
self._param.inputs = []
|
||||||
|
for _, r in df.iterrows():
|
||||||
|
self._param.inputs.append({"component_id": r["component_id"], "content": r["content"]})
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
def get_input_elements(self):
|
||||||
|
assert self._param.query, "Please identify input parameters firstly."
|
||||||
|
eles = []
|
||||||
|
for q in self._param.query:
|
||||||
|
if q.get("component_id"):
|
||||||
|
cpn_id = q["component_id"]
|
||||||
|
if cpn_id.split("@")[0].lower().find("begin") >= 0:
|
||||||
|
cpn_id, key = cpn_id.split("@")
|
||||||
|
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||||
|
continue
|
||||||
|
|
||||||
|
eles.append({"name": self._canvas.get_compnent_name(cpn_id), "key": cpn_id})
|
||||||
|
else:
|
||||||
|
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||||
|
return eles
|
||||||
|
|
||||||
def get_stream_input(self):
|
def get_stream_input(self):
|
||||||
reversed_cpnts = []
|
reversed_cpnts = []
|
||||||
@ -481,7 +554,8 @@ class ComponentBase(ABC):
|
|||||||
reversed_cpnts.extend(self._canvas.path[-1])
|
reversed_cpnts.extend(self._canvas.path[-1])
|
||||||
|
|
||||||
for u in reversed_cpnts[::-1]:
|
for u in reversed_cpnts[::-1]:
|
||||||
if self.get_component_name(u) in ["switch", "answer"]: continue
|
if self.get_component_name(u) in ["switch", "answer"]:
|
||||||
|
continue
|
||||||
return self._canvas.get_component(u)["obj"].output()[1]
|
return self._canvas.get_component(u)["obj"].output()[1]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -490,3 +564,6 @@ class ComponentBase(ABC):
|
|||||||
|
|
||||||
def get_component_name(self, cpn_id):
|
def get_component_name(self, cpn_id):
|
||||||
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
return self._run([], **kwargs)
|
||||||
@ -26,6 +26,7 @@ class BeginParam(ComponentParamBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
||||||
|
self.query = []
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
return True
|
return True
|
||||||
@ -42,7 +43,7 @@ class Begin(ComponentBase):
|
|||||||
def stream_output(self):
|
def stream_output(self):
|
||||||
res = {"content": self._param.prologue}
|
res = {"content": self._param.prologue}
|
||||||
yield res
|
yield res
|
||||||
self.set_output(res)
|
self.set_output(self.be_output(res))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -13,13 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import requests
|
import requests
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class BingParam(ComponentParamBase):
|
class BingParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
Define the Bing component parameters.
|
Define the Bing component parameters.
|
||||||
@ -81,5 +80,5 @@ class Bing(ComponentBase, ABC):
|
|||||||
return Bing.be_output("")
|
return Bing.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(bing_res)
|
df = pd.DataFrame(bing_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {str(df)}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,11 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from agent.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
from agent.settings import DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class CategorizeParam(GenerateParam):
|
class CategorizeParam(GenerateParam):
|
||||||
@ -34,15 +34,18 @@ class CategorizeParam(GenerateParam):
|
|||||||
super().check()
|
super().check()
|
||||||
self.check_empty(self.category_description, "[Categorize] Category examples")
|
self.check_empty(self.category_description, "[Categorize] Category examples")
|
||||||
for k, v in self.category_description.items():
|
for k, v in self.category_description.items():
|
||||||
if not k: raise ValueError(f"[Categorize] Category name can not be empty!")
|
if not k:
|
||||||
if not v.get("to"): raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
raise ValueError("[Categorize] Category name can not be empty!")
|
||||||
|
if not v.get("to"):
|
||||||
|
raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||||
|
|
||||||
def get_prompt(self):
|
def get_prompt(self):
|
||||||
cate_lines = []
|
cate_lines = []
|
||||||
for c, desc in self.category_description.items():
|
for c, desc in self.category_description.items():
|
||||||
for l in desc.get("examples", "").split("\n"):
|
for line in desc.get("examples", "").split("\n"):
|
||||||
if not l: continue
|
if not line:
|
||||||
cate_lines.append("Question: {}\tCategory: {}".format(l, c))
|
continue
|
||||||
|
cate_lines.append("Question: {}\tCategory: {}".format(line, c))
|
||||||
descriptions = []
|
descriptions = []
|
||||||
for c, desc in self.category_description.items():
|
for c, desc in self.category_description.items():
|
||||||
if desc.get("description"):
|
if desc.get("description"):
|
||||||
@ -73,15 +76,19 @@ class Categorize(Generate, ABC):
|
|||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
input = self.get_input()
|
input = self.get_input()
|
||||||
input = "Question: " + ("; ".join(input["content"]) if "content" in input else "") + "Category: "
|
input = "Question: " + (list(input["content"])[-1] if "content" in input else "") + "\tCategory: "
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::", input)
|
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||||
for c in self._param.category_description.keys():
|
for c in self._param.category_description.keys():
|
||||||
if ans.lower().find(c.lower()) >= 0:
|
if ans.lower().find(c.lower()) >= 0:
|
||||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
return Categorize.be_output(self._param.category_description[c]["to"])
|
||||||
|
|
||||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
df = self._run([], **kwargs)
|
||||||
|
cpn_id = df.iloc[0, 0]
|
||||||
|
return Categorize.be_output(self._canvas.get_compnent_name(cpn_id))
|
||||||
|
|
||||||
|
|||||||
@ -1,75 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
from abc import ABC
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
from api.db import LLMType
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
|
||||||
from api.db.services.llm_service import LLMBundle
|
|
||||||
from api.settings import retrievaler
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
|
||||||
|
|
||||||
|
|
||||||
class CiteParam(ComponentParamBase):
|
|
||||||
|
|
||||||
"""
|
|
||||||
Define the Retrieval component parameters.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.cite_sources = []
|
|
||||||
|
|
||||||
def check(self):
|
|
||||||
self.check_empty(self.cite_source, "Please specify where you want to cite from.")
|
|
||||||
|
|
||||||
|
|
||||||
class Cite(ComponentBase, ABC):
|
|
||||||
component_name = "Cite"
|
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
|
||||||
input = "\n- ".join(self.get_input()["content"])
|
|
||||||
sources = [self._canvas.get_component(cpn_id).output()[1] for cpn_id in self._param.cite_source]
|
|
||||||
query = []
|
|
||||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
|
||||||
if role != "user":continue
|
|
||||||
query.append(cnt)
|
|
||||||
query = "\n".join(query)
|
|
||||||
|
|
||||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
|
||||||
if not kbs:
|
|
||||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
|
||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
|
||||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
|
||||||
|
|
||||||
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
|
||||||
|
|
||||||
rerank_mdl = None
|
|
||||||
if self._param.rerank_id:
|
|
||||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
|
||||||
|
|
||||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
|
||||||
1, self._param.top_n,
|
|
||||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
|
||||||
aggs=False, rerank_mdl=rerank_mdl)
|
|
||||||
|
|
||||||
if not kbinfos["chunks"]: return pd.DataFrame()
|
|
||||||
df = pd.DataFrame(kbinfos["chunks"])
|
|
||||||
df["content"] = df["content_with_weight"]
|
|
||||||
del df["content_with_weight"]
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
67
agent/component/crawler.py
Normal file
67
agent/component/crawler.py
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import asyncio
|
||||||
|
from crawl4ai import AsyncWebCrawler
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from api.utils.web_utils import is_valid_url
|
||||||
|
|
||||||
|
|
||||||
|
class CrawlerParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Crawler component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.proxy = None
|
||||||
|
self.extract_type = "markdown"
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_valid_value(self.extract_type, "Type of content from the crawler", ['html', 'markdown', 'content'])
|
||||||
|
|
||||||
|
|
||||||
|
class Crawler(ComponentBase, ABC):
|
||||||
|
component_name = "Crawler"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not is_valid_url(ans):
|
||||||
|
return Crawler.be_output("")
|
||||||
|
try:
|
||||||
|
result = asyncio.run(self.get_web(ans))
|
||||||
|
|
||||||
|
return Crawler.be_output(result)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return Crawler.be_output(f"An unexpected error occurred: {str(e)}")
|
||||||
|
|
||||||
|
async def get_web(self, url):
|
||||||
|
proxy = self._param.proxy if self._param.proxy else None
|
||||||
|
async with AsyncWebCrawler(verbose=True, proxy=proxy) as crawler:
|
||||||
|
result = await crawler.arun(
|
||||||
|
url=url,
|
||||||
|
bypass_cache=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._param.extract_type == 'html':
|
||||||
|
return result.cleaned_html
|
||||||
|
elif self._param.extract_type == 'markdown':
|
||||||
|
return result.markdown
|
||||||
|
elif self._param.extract_type == 'content':
|
||||||
|
result.extracted_content
|
||||||
|
return result.markdown
|
||||||
@ -14,7 +14,6 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import re
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
import deepl
|
import deepl
|
||||||
|
|
||||||
|
|||||||
@ -13,10 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from duckduckgo_search import DDGS
|
from duckduckgo_search import DDGS
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -62,5 +62,5 @@ class DuckDuckGo(ComponentBase, ABC):
|
|||||||
return DuckDuckGo.be_output("")
|
return DuckDuckGo.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(duck_res)
|
df = pd.DataFrame(duck_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug("df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
138
agent/component/email.py
Normal file
138
agent/component/email.py
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from abc import ABC
|
||||||
|
import json
|
||||||
|
import smtplib
|
||||||
|
import logging
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
from email.mime.multipart import MIMEMultipart
|
||||||
|
from email.header import Header
|
||||||
|
from email.utils import formataddr
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
class EmailParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Email component parameters.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
# Fixed configuration parameters
|
||||||
|
self.smtp_server = "" # SMTP server address
|
||||||
|
self.smtp_port = 465 # SMTP port
|
||||||
|
self.email = "" # Sender email
|
||||||
|
self.password = "" # Email authorization code
|
||||||
|
self.sender_name = "" # Sender name
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
# Check required parameters
|
||||||
|
self.check_empty(self.smtp_server, "SMTP Server")
|
||||||
|
self.check_empty(self.email, "Email")
|
||||||
|
self.check_empty(self.password, "Password")
|
||||||
|
self.check_empty(self.sender_name, "Sender Name")
|
||||||
|
|
||||||
|
class Email(ComponentBase, ABC):
|
||||||
|
component_name = "Email"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
# Get upstream component output and parse JSON
|
||||||
|
ans = self.get_input()
|
||||||
|
content = "".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not content:
|
||||||
|
return Email.be_output("No content to send")
|
||||||
|
|
||||||
|
success = False
|
||||||
|
try:
|
||||||
|
# Parse JSON string passed from upstream
|
||||||
|
email_data = json.loads(content)
|
||||||
|
|
||||||
|
# Validate required fields
|
||||||
|
if "to_email" not in email_data:
|
||||||
|
return Email.be_output("Missing required field: to_email")
|
||||||
|
|
||||||
|
# Create email object
|
||||||
|
msg = MIMEMultipart('alternative')
|
||||||
|
|
||||||
|
# Properly handle sender name encoding
|
||||||
|
msg['From'] = formataddr((str(Header(self._param.sender_name,'utf-8')), self._param.email))
|
||||||
|
msg['To'] = email_data["to_email"]
|
||||||
|
if "cc_email" in email_data and email_data["cc_email"]:
|
||||||
|
msg['Cc'] = email_data["cc_email"]
|
||||||
|
msg['Subject'] = Header(email_data.get("subject", "No Subject"), 'utf-8').encode()
|
||||||
|
|
||||||
|
# Use content from email_data or default content
|
||||||
|
email_content = email_data.get("content", "No content provided")
|
||||||
|
# msg.attach(MIMEText(email_content, 'plain', 'utf-8'))
|
||||||
|
msg.attach(MIMEText(email_content, 'html', 'utf-8'))
|
||||||
|
|
||||||
|
# Connect to SMTP server and send
|
||||||
|
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
||||||
|
|
||||||
|
context = smtplib.ssl.create_default_context()
|
||||||
|
with smtplib.SMTP_SSL(self._param.smtp_server, self._param.smtp_port, context=context) as server:
|
||||||
|
# Login
|
||||||
|
logging.info(f"Attempting to login with email: {self._param.email}")
|
||||||
|
server.login(self._param.email, self._param.password)
|
||||||
|
|
||||||
|
# Get all recipient list
|
||||||
|
recipients = [email_data["to_email"]]
|
||||||
|
if "cc_email" in email_data and email_data["cc_email"]:
|
||||||
|
recipients.extend(email_data["cc_email"].split(','))
|
||||||
|
|
||||||
|
# Send email
|
||||||
|
logging.info(f"Sending email to recipients: {recipients}")
|
||||||
|
try:
|
||||||
|
server.send_message(msg, self._param.email, recipients)
|
||||||
|
success = True
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error during send_message: {str(e)}")
|
||||||
|
# Try alternative method
|
||||||
|
server.sendmail(self._param.email, recipients, msg.as_string())
|
||||||
|
success = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
server.quit()
|
||||||
|
except Exception as e:
|
||||||
|
# Ignore errors when closing connection
|
||||||
|
logging.warning(f"Non-fatal error during connection close: {str(e)}")
|
||||||
|
|
||||||
|
if success:
|
||||||
|
return Email.be_output("Email sent successfully")
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
error_msg = "Invalid JSON format in input"
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(error_msg)
|
||||||
|
|
||||||
|
except smtplib.SMTPAuthenticationError:
|
||||||
|
error_msg = "SMTP Authentication failed. Please check your email and authorization code."
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||||
|
|
||||||
|
except smtplib.SMTPConnectError:
|
||||||
|
error_msg = f"Failed to connect to SMTP server {self._param.smtp_server}:{self._param.smtp_port}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||||
|
|
||||||
|
except smtplib.SMTPException as e:
|
||||||
|
error_msg = f"SMTP error occurred: {str(e)}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Unexpected error: {str(e)}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||||
@ -16,9 +16,11 @@
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
import re
|
import re
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
import pymysql
|
||||||
|
import psycopg2
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
import pyodbc
|
||||||
|
import logging
|
||||||
|
|
||||||
class ExeSQLParam(ComponentParamBase):
|
class ExeSQLParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
@ -37,13 +39,18 @@ class ExeSQLParam(ComponentParamBase):
|
|||||||
self.top_n = 30
|
self.top_n = 30
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb'])
|
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb', 'mssql'])
|
||||||
self.check_empty(self.database, "Database name")
|
self.check_empty(self.database, "Database name")
|
||||||
self.check_empty(self.username, "database username")
|
self.check_empty(self.username, "database username")
|
||||||
self.check_empty(self.host, "IP Address")
|
self.check_empty(self.host, "IP Address")
|
||||||
self.check_positive_integer(self.port, "IP Port")
|
self.check_positive_integer(self.port, "IP Port")
|
||||||
self.check_empty(self.password, "Database password")
|
self.check_empty(self.password, "Database password")
|
||||||
self.check_positive_integer(self.top_n, "Number of records")
|
self.check_positive_integer(self.top_n, "Number of records")
|
||||||
|
if self.database == "rag_flow":
|
||||||
|
if self.host == "ragflow-mysql":
|
||||||
|
raise ValueError("The host is not accessible.")
|
||||||
|
if self.password == "infini_rag_flow":
|
||||||
|
raise ValueError("The host is not accessible.")
|
||||||
|
|
||||||
|
|
||||||
class ExeSQL(ComponentBase, ABC):
|
class ExeSQL(ComponentBase, ABC):
|
||||||
@ -58,22 +65,43 @@ class ExeSQL(ComponentBase, ABC):
|
|||||||
self._loop += 1
|
self._loop += 1
|
||||||
|
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
|
||||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
|
||||||
|
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||||
|
if self._param.db_type == 'mssql':
|
||||||
|
# improve the information extraction, most llm return results in markdown format ```sql query ```
|
||||||
|
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||||
|
if match:
|
||||||
|
ans = match.group(1) # Query content
|
||||||
|
print(ans)
|
||||||
|
else:
|
||||||
|
print("no markdown")
|
||||||
|
ans = re.sub(r'^.*?SELECT ', 'SELECT ', (ans), flags=re.IGNORECASE)
|
||||||
|
else:
|
||||||
|
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
||||||
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
||||||
ans = re.sub(r';[^;]*$', r';', ans)
|
ans = re.sub(r';[^;]*$', r';', ans)
|
||||||
if not ans:
|
if not ans:
|
||||||
raise Exception("SQL statement not found!")
|
raise Exception("SQL statement not found!")
|
||||||
|
|
||||||
|
logging.info("db_type: ",self._param.db_type)
|
||||||
if self._param.db_type in ["mysql", "mariadb"]:
|
if self._param.db_type in ["mysql", "mariadb"]:
|
||||||
db = MySQLDatabase(self._param.database, user=self._param.username, host=self._param.host,
|
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||||
port=self._param.port, password=self._param.password)
|
port=self._param.port, password=self._param.password)
|
||||||
elif self._param.db_type == 'postgresql':
|
elif self._param.db_type == 'postgresql':
|
||||||
db = PostgresqlDatabase(self._param.database, user=self._param.username, host=self._param.host,
|
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
|
||||||
port=self._param.port, password=self._param.password)
|
port=self._param.port, password=self._param.password)
|
||||||
|
elif self._param.db_type == 'mssql':
|
||||||
|
conn_str = (
|
||||||
|
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
||||||
|
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
||||||
|
r'DATABASE=' + self._param.database + ';'
|
||||||
|
r'UID=' + self._param.username + ';'
|
||||||
|
r'PWD=' + self._param.password
|
||||||
|
)
|
||||||
|
db = pyodbc.connect(conn_str)
|
||||||
try:
|
try:
|
||||||
db.connect()
|
cursor = db.cursor()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception("Database Connection Failed! \n" + str(e))
|
raise Exception("Database Connection Failed! \n" + str(e))
|
||||||
sql_res = []
|
sql_res = []
|
||||||
@ -81,13 +109,14 @@ class ExeSQL(ComponentBase, ABC):
|
|||||||
if not single_sql:
|
if not single_sql:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
query = db.execute_sql(single_sql)
|
logging.info("single_sql: ",single_sql)
|
||||||
if query.rowcount == 0:
|
cursor.execute(single_sql)
|
||||||
sql_res.append({"content": "\nTotal: " + str(query.rowcount) + "\n No record in the database!"})
|
if cursor.rowcount == 0:
|
||||||
|
sql_res.append({"content": "\nTotal: 0\n No record in the database!"})
|
||||||
continue
|
continue
|
||||||
single_res = pd.DataFrame([i for i in query.fetchmany(size=self._param.top_n)])
|
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||||
single_res.columns = [i[0] for i in query.description]
|
single_res.columns = [i[0] for i in cursor.description]
|
||||||
sql_res.append({"content": "\nTotal: " + str(query.rowcount) + "\n" + single_res.to_markdown()})
|
sql_res.append({"content": "\nTotal: " + str(cursor.rowcount) + "\n" + single_res.to_markdown()})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
sql_res.append({"content": "**Error**:" + str(e) + "\nError SQL Statement:" + single_sql})
|
sql_res.append({"content": "**Error**:" + str(e) + "\nError SQL Statement:" + single_sql})
|
||||||
pass
|
pass
|
||||||
|
|||||||
@ -17,8 +17,10 @@ import re
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
|
from api.db.services.conversation_service import structure_answer
|
||||||
|
from api.db.services.dialog_service import message_fit_in
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.settings import retrievaler
|
from api import settings
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -50,11 +52,16 @@ class GenerateParam(ComponentParamBase):
|
|||||||
|
|
||||||
def gen_conf(self):
|
def gen_conf(self):
|
||||||
conf = {}
|
conf = {}
|
||||||
if self.max_tokens > 0: conf["max_tokens"] = self.max_tokens
|
if self.max_tokens > 0:
|
||||||
if self.temperature > 0: conf["temperature"] = self.temperature
|
conf["max_tokens"] = self.max_tokens
|
||||||
if self.top_p > 0: conf["top_p"] = self.top_p
|
if self.temperature > 0:
|
||||||
if self.presence_penalty > 0: conf["presence_penalty"] = self.presence_penalty
|
conf["temperature"] = self.temperature
|
||||||
if self.frequency_penalty > 0: conf["frequency_penalty"] = self.frequency_penalty
|
if self.top_p > 0:
|
||||||
|
conf["top_p"] = self.top_p
|
||||||
|
if self.presence_penalty > 0:
|
||||||
|
conf["presence_penalty"] = self.presence_penalty
|
||||||
|
if self.frequency_penalty > 0:
|
||||||
|
conf["frequency_penalty"] = self.frequency_penalty
|
||||||
return conf
|
return conf
|
||||||
|
|
||||||
|
|
||||||
@ -62,23 +69,28 @@ class Generate(ComponentBase):
|
|||||||
component_name = "Generate"
|
component_name = "Generate"
|
||||||
|
|
||||||
def get_dependent_components(self):
|
def get_dependent_components(self):
|
||||||
cpnts = [para["component_id"] for para in self._param.parameters]
|
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||||
return cpnts
|
if para.get("component_id") \
|
||||||
|
and para["component_id"].lower().find("answer") < 0 \
|
||||||
|
and para["component_id"].lower().find("begin") < 0])
|
||||||
|
return list(cpnts)
|
||||||
|
|
||||||
def set_cite(self, retrieval_res, answer):
|
def set_cite(self, retrieval_res, answer):
|
||||||
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
||||||
if "empty_response" in retrieval_res.columns:
|
if "empty_response" in retrieval_res.columns:
|
||||||
retrieval_res["empty_response"].fillna("", inplace=True)
|
retrieval_res["empty_response"].fillna("", inplace=True)
|
||||||
answer, idx = retrievaler.insert_citations(answer, [ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
answer, idx = settings.retrievaler.insert_citations(answer,
|
||||||
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
[ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
||||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
||||||
self._canvas.get_embedding_model()), tkweight=0.7,
|
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||||
vtweight=0.3)
|
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||||
|
vtweight=0.3)
|
||||||
doc_ids = set([])
|
doc_ids = set([])
|
||||||
recall_docs = []
|
recall_docs = []
|
||||||
for i in idx:
|
for i in idx:
|
||||||
did = retrieval_res.loc[int(i), "doc_id"]
|
did = retrieval_res.loc[int(i), "doc_id"]
|
||||||
if did in doc_ids: continue
|
if did in doc_ids:
|
||||||
|
continue
|
||||||
doc_ids.add(did)
|
doc_ids.add(did)
|
||||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
||||||
|
|
||||||
@ -91,28 +103,71 @@ class Generate(ComponentBase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||||
res = {"content": answer, "reference": reference}
|
res = {"content": answer, "reference": reference}
|
||||||
|
res = structure_answer(None, res, "", "")
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def get_input_elements(self):
|
||||||
|
if self._param.parameters:
|
||||||
|
return [{"key": "user", "name": "User"}, *self._param.parameters]
|
||||||
|
|
||||||
|
return [{"key": "user", "name": "User"}]
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
prompt = self._param.prompt
|
prompt = self._param.prompt
|
||||||
|
|
||||||
retrieval_res = self.get_input()
|
retrieval_res = []
|
||||||
input = (" - " + "\n - ".join(retrieval_res["content"])) if "content" in retrieval_res else ""
|
self._param.inputs = []
|
||||||
for para in self._param.parameters:
|
for para in self._param.parameters:
|
||||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
if not para.get("component_id"):
|
||||||
|
continue
|
||||||
|
component_id = para["component_id"].split("@")[0]
|
||||||
|
if para["component_id"].lower().find("@") >= 0:
|
||||||
|
cpn_id, key = para["component_id"].split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] == key:
|
||||||
|
kwargs[para["key"]] = p.get("value", "")
|
||||||
|
self._param.inputs.append(
|
||||||
|
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||||
|
continue
|
||||||
|
|
||||||
|
cpn = self._canvas.get_component(component_id)["obj"]
|
||||||
|
if cpn.component_name.lower() == "answer":
|
||||||
|
hist = self._canvas.get_history(1)
|
||||||
|
if hist:
|
||||||
|
hist = hist[0]["content"]
|
||||||
|
else:
|
||||||
|
hist = ""
|
||||||
|
kwargs[para["key"]] = hist
|
||||||
|
continue
|
||||||
_, out = cpn.output(allow_partial=False)
|
_, out = cpn.output(allow_partial=False)
|
||||||
if "content" not in out.columns:
|
if "content" not in out.columns:
|
||||||
kwargs[para["key"]] = "Nothing"
|
kwargs[para["key"]] = ""
|
||||||
else:
|
else:
|
||||||
kwargs[para["key"]] = " - " + "\n - ".join(out["content"])
|
if cpn.component_name.lower() == "retrieval":
|
||||||
|
retrieval_res.append(out)
|
||||||
|
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||||
|
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||||
|
|
||||||
|
if retrieval_res:
|
||||||
|
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||||
|
else:
|
||||||
|
retrieval_res = pd.DataFrame([])
|
||||||
|
|
||||||
kwargs["input"] = input
|
|
||||||
for n, v in kwargs.items():
|
for n, v in kwargs.items():
|
||||||
prompt = re.sub(r"\{%s\}" % n, re.escape(str(v)), prompt)
|
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||||
|
|
||||||
|
if not self._param.inputs and prompt.find("{input}") >= 0:
|
||||||
|
retrieval_res = self.get_input()
|
||||||
|
input = (" - " + "\n - ".join(
|
||||||
|
[c for c in retrieval_res["content"] if isinstance(c, str)])) if "content" in retrieval_res else ""
|
||||||
|
prompt = re.sub(r"\{input\}", re.escape(input), prompt)
|
||||||
|
|
||||||
downstreams = self._canvas.get_component(self._id)["downstream"]
|
downstreams = self._canvas.get_component(self._id)["downstream"]
|
||||||
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
||||||
@ -124,8 +179,14 @@ class Generate(ComponentBase):
|
|||||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||||
return pd.DataFrame([res])
|
return pd.DataFrame([res])
|
||||||
|
|
||||||
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
self._param.gen_conf())
|
if len(msg) < 1:
|
||||||
|
msg.append({"role": "user", "content": ""})
|
||||||
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||||
|
if len(msg) < 2:
|
||||||
|
msg.append({"role": "user", "content": ""})
|
||||||
|
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||||
|
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||||
res = self.set_cite(retrieval_res, ans)
|
res = self.set_cite(retrieval_res, ans)
|
||||||
return pd.DataFrame([res])
|
return pd.DataFrame([res])
|
||||||
@ -141,9 +202,14 @@ class Generate(ComponentBase):
|
|||||||
self.set_output(res)
|
self.set_output(res)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
|
if len(msg) < 1:
|
||||||
|
msg.append({"role": "user", "content": ""})
|
||||||
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||||
|
if len(msg) < 2:
|
||||||
|
msg.append({"role": "user", "content": ""})
|
||||||
answer = ""
|
answer = ""
|
||||||
for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||||
self._param.gen_conf()):
|
|
||||||
res = {"content": ans, "reference": []}
|
res = {"content": ans, "reference": []}
|
||||||
answer = ans
|
answer = ans
|
||||||
yield res
|
yield res
|
||||||
@ -152,4 +218,17 @@ class Generate(ComponentBase):
|
|||||||
res = self.set_cite(retrieval_res, answer)
|
res = self.set_cite(retrieval_res, answer)
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
self.set_output(res)
|
self.set_output(Generate.be_output(res))
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
|
prompt = self._param.prompt
|
||||||
|
|
||||||
|
for para in self._param.debug_inputs:
|
||||||
|
kwargs[para["key"]] = para.get("value", "")
|
||||||
|
|
||||||
|
for n, v in kwargs.items():
|
||||||
|
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||||
|
|
||||||
|
ans = chat_mdl.chat(prompt, [{"role": "user", "content": kwargs.get("user", "")}], self._param.gen_conf())
|
||||||
|
return pd.DataFrame([ans])
|
||||||
|
|||||||
@ -13,10 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import requests
|
import requests
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -57,5 +57,5 @@ class GitHub(ComponentBase, ABC):
|
|||||||
return GitHub.be_output("")
|
return GitHub.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(github_res)
|
df = pd.DataFrame(github_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,10 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from serpapi import GoogleSearch
|
from serpapi import GoogleSearch
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -85,12 +85,12 @@ class Google(ComponentBase, ABC):
|
|||||||
"hl": self._param.language, "num": self._param.top_n})
|
"hl": self._param.language, "num": self._param.top_n})
|
||||||
google_res = [{"content": '<a href="' + i["link"] + '">' + i["title"] + '</a> ' + i["snippet"]} for i in
|
google_res = [{"content": '<a href="' + i["link"] + '">' + i["title"] + '</a> ' + i["snippet"]} for i in
|
||||||
client.get_dict()["organic_results"]]
|
client.get_dict()["organic_results"]]
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return Google.be_output("**ERROR**: Existing Unavailable Parameters!")
|
return Google.be_output("**ERROR**: Existing Unavailable Parameters!")
|
||||||
|
|
||||||
if not google_res:
|
if not google_res:
|
||||||
return Google.be_output("")
|
return Google.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(google_res)
|
df = pd.DataFrame(google_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,9 +13,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
from scholarly import scholarly
|
from scholarly import scholarly
|
||||||
|
|
||||||
@ -58,13 +58,13 @@ class GoogleScholar(ComponentBase, ABC):
|
|||||||
'pub_url'] + '"></a> ' + "\n author: " + ",".join(pub['bib']['author']) + '\n Abstract: ' + pub[
|
'pub_url'] + '"></a> ' + "\n author: " + ",".join(pub['bib']['author']) + '\n Abstract: ' + pub[
|
||||||
'bib'].get('abstract', 'no abstract')})
|
'bib'].get('abstract', 'no abstract')})
|
||||||
|
|
||||||
except StopIteration or Exception as e:
|
except StopIteration or Exception:
|
||||||
print("**ERROR** " + str(e))
|
logging.exception("GoogleScholar")
|
||||||
break
|
break
|
||||||
|
|
||||||
if not scholar_res:
|
if not scholar_res:
|
||||||
return GoogleScholar.be_output("")
|
return GoogleScholar.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(scholar_res)
|
df = pd.DataFrame(scholar_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
106
agent/component/invoke.py
Normal file
106
agent/component/invoke.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from abc import ABC
|
||||||
|
import requests
|
||||||
|
from deepdoc.parser import HtmlParser
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class InvokeParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Crawler component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.proxy = None
|
||||||
|
self.headers = ""
|
||||||
|
self.method = "get"
|
||||||
|
self.variables = []
|
||||||
|
self.url = ""
|
||||||
|
self.timeout = 60
|
||||||
|
self.clean_html = False
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
||||||
|
self.check_empty(self.url, "End point URL")
|
||||||
|
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||||
|
self.check_boolean(self.clean_html, "Clean HTML")
|
||||||
|
|
||||||
|
|
||||||
|
class Invoke(ComponentBase, ABC):
|
||||||
|
component_name = "Invoke"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
args = {}
|
||||||
|
for para in self._param.variables:
|
||||||
|
if para.get("component_id"):
|
||||||
|
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||||
|
if cpn.component_name.lower() == "answer":
|
||||||
|
args[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||||
|
continue
|
||||||
|
_, out = cpn.output(allow_partial=False)
|
||||||
|
args[para["key"]] = "\n".join(out["content"])
|
||||||
|
else:
|
||||||
|
args[para["key"]] = "\n".join(para["value"])
|
||||||
|
|
||||||
|
url = self._param.url.strip()
|
||||||
|
if url.find("http") != 0:
|
||||||
|
url = "http://" + url
|
||||||
|
|
||||||
|
method = self._param.method.lower()
|
||||||
|
headers = {}
|
||||||
|
if self._param.headers:
|
||||||
|
headers = json.loads(self._param.headers)
|
||||||
|
proxies = None
|
||||||
|
if re.sub(r"https?:?/?/?", "", self._param.proxy):
|
||||||
|
proxies = {"http": self._param.proxy, "https": self._param.proxy}
|
||||||
|
|
||||||
|
if method == 'get':
|
||||||
|
response = requests.get(url=url,
|
||||||
|
params=args,
|
||||||
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
|
if self._param.clean_html:
|
||||||
|
sections = HtmlParser()(None, response.content)
|
||||||
|
return Invoke.be_output("\n".join(sections))
|
||||||
|
|
||||||
|
return Invoke.be_output(response.text)
|
||||||
|
|
||||||
|
if method == 'put':
|
||||||
|
response = requests.put(url=url,
|
||||||
|
data=args,
|
||||||
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
|
if self._param.clean_html:
|
||||||
|
sections = HtmlParser()(None, response.content)
|
||||||
|
return Invoke.be_output("\n".join(sections))
|
||||||
|
return Invoke.be_output(response.text)
|
||||||
|
|
||||||
|
if method == 'post':
|
||||||
|
response = requests.post(url=url,
|
||||||
|
json=args,
|
||||||
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
|
if self._param.clean_html:
|
||||||
|
sections = HtmlParser()(None, response.content)
|
||||||
|
return Invoke.be_output("\n".join(sections))
|
||||||
|
return Invoke.be_output(response.text)
|
||||||
@ -13,12 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import re
|
import re
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from agent.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
from agent.settings import DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class KeywordExtractParam(GenerateParam):
|
class KeywordExtractParam(GenerateParam):
|
||||||
@ -50,16 +50,16 @@ class KeywordExtract(Generate, ABC):
|
|||||||
component_name = "KeywordExtract"
|
component_name = "KeywordExtract"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
q = ""
|
query = self.get_input()
|
||||||
for r, c in self._canvas.history[::-1]:
|
query = str(query["content"][0]) if "content" in query else ""
|
||||||
if r == "user":
|
|
||||||
q += c
|
|
||||||
break
|
|
||||||
|
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": query}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
|
|
||||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"ans: {ans}")
|
||||||
return KeywordExtract.be_output(ans)
|
return KeywordExtract.be_output(ans)
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
return self._run([], **kwargs)
|
||||||
@ -13,12 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from Bio import Entrez
|
from Bio import Entrez
|
||||||
import re
|
import re
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -65,5 +65,5 @@ class PubMed(ComponentBase, ABC):
|
|||||||
return PubMed.be_output("")
|
return PubMed.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(pubmed_res)
|
df = pd.DataFrame(pubmed_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
@ -70,11 +71,13 @@ class Relevant(Generate, ABC):
|
|||||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
|
|
||||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
logging.debug(ans)
|
||||||
if ans.lower().find("yes") >= 0:
|
if ans.lower().find("yes") >= 0:
|
||||||
return Relevant.be_output(self._param.yes)
|
return Relevant.be_output(self._param.yes)
|
||||||
if ans.lower().find("no") >= 0:
|
if ans.lower().find("no") >= 0:
|
||||||
return Relevant.be_output(self._param.no)
|
return Relevant.be_output(self._param.no)
|
||||||
assert False, f"Relevant component got: {ans}"
|
assert False, f"Relevant component got: {ans}"
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
return self._run([], **kwargs)
|
||||||
|
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
@ -20,7 +21,7 @@ import pandas as pd
|
|||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.settings import retrievaler
|
from api import settings
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -43,22 +44,19 @@ class RetrievalParam(ComponentParamBase):
|
|||||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
||||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||||
self.check_empty(self.kb_ids, "[Retrieval] Knowledge bases")
|
|
||||||
|
|
||||||
|
|
||||||
class Retrieval(ComponentBase, ABC):
|
class Retrieval(ComponentBase, ABC):
|
||||||
component_name = "Retrieval"
|
component_name = "Retrieval"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
query = []
|
query = self.get_input()
|
||||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
query = str(query["content"][0]) if "content" in query else ""
|
||||||
if role != "user":continue
|
|
||||||
query.append(cnt)
|
|
||||||
# query = "\n".join(query)
|
|
||||||
query = query[0]
|
|
||||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
return Retrieval.be_output("")
|
||||||
|
|
||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||||
|
|
||||||
@ -69,7 +67,7 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
if self._param.rerank_id:
|
if self._param.rerank_id:
|
||||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||||
|
|
||||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||||
1, self._param.top_n,
|
1, self._param.top_n,
|
||||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||||
aggs=False, rerank_mdl=rerank_mdl)
|
aggs=False, rerank_mdl=rerank_mdl)
|
||||||
@ -83,7 +81,7 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
df = pd.DataFrame(kbinfos["chunks"])
|
df = pd.DataFrame(kbinfos["chunks"])
|
||||||
df["content"] = df["content_with_weight"]
|
df["content"] = df["content_with_weight"]
|
||||||
del df["content_with_weight"]
|
del df["content_with_weight"]
|
||||||
print(">>>>>>>>>>>>>>>>>>>>>>>>>>\n", query, df)
|
logging.debug("{} {}".format(query, df))
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
@ -33,7 +34,7 @@ class RewriteQuestionParam(GenerateParam):
|
|||||||
def check(self):
|
def check(self):
|
||||||
super().check()
|
super().check()
|
||||||
|
|
||||||
def get_prompt(self):
|
def get_prompt(self, conv):
|
||||||
self.prompt = """
|
self.prompt = """
|
||||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||||
@ -43,6 +44,40 @@ class RewriteQuestionParam(GenerateParam):
|
|||||||
And return 5 versions of question and one is from translation.
|
And return 5 versions of question and one is from translation.
|
||||||
Just list the question. No other words are needed.
|
Just list the question. No other words are needed.
|
||||||
"""
|
"""
|
||||||
|
return f"""
|
||||||
|
Role: A helpful assistant
|
||||||
|
Task: Generate a full user question that would follow the conversation.
|
||||||
|
Requirements & Restrictions:
|
||||||
|
- Text generated MUST be in the same language of the original user's question.
|
||||||
|
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||||
|
- DON'T generate anything except a refined question.
|
||||||
|
|
||||||
|
######################
|
||||||
|
-Examples-
|
||||||
|
######################
|
||||||
|
# Example 1
|
||||||
|
## Conversation
|
||||||
|
USER: What is the name of Donald Trump's father?
|
||||||
|
ASSISTANT: Fred Trump.
|
||||||
|
USER: And his mother?
|
||||||
|
###############
|
||||||
|
Output: What's the name of Donald Trump's mother?
|
||||||
|
------------
|
||||||
|
# Example 2
|
||||||
|
## Conversation
|
||||||
|
USER: What is the name of Donald Trump's father?
|
||||||
|
ASSISTANT: Fred Trump.
|
||||||
|
USER: And his mother?
|
||||||
|
ASSISTANT: Mary Trump.
|
||||||
|
User: What's her full name?
|
||||||
|
###############
|
||||||
|
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||||
|
######################
|
||||||
|
# Real Data
|
||||||
|
## Conversation
|
||||||
|
{conv}
|
||||||
|
###############
|
||||||
|
"""
|
||||||
return self.prompt
|
return self.prompt
|
||||||
|
|
||||||
|
|
||||||
@ -56,19 +91,23 @@ class RewriteQuestion(Generate, ABC):
|
|||||||
self._loop = 0
|
self._loop = 0
|
||||||
raise Exception("Sorry! Nothing relevant found.")
|
raise Exception("Sorry! Nothing relevant found.")
|
||||||
self._loop += 1
|
self._loop += 1
|
||||||
q = "Question: "
|
|
||||||
for r, c in self._canvas.history[::-1]:
|
hist = self._canvas.get_history(4)
|
||||||
if r == "user":
|
conv = []
|
||||||
q += c
|
for m in hist:
|
||||||
break
|
if m["role"] not in ["user", "assistant"]:
|
||||||
|
continue
|
||||||
|
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||||
|
conv = "\n".join(conv)
|
||||||
|
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
ans = chat_mdl.chat(self._param.get_prompt(conv), [{"role": "user", "content": "Output: "}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
self._canvas.history.pop()
|
self._canvas.history.pop()
|
||||||
self._canvas.history.append(("user", ans))
|
self._canvas.history.append(("user", ans))
|
||||||
|
|
||||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
logging.debug(ans)
|
||||||
return RewriteQuestion.be_output(ans)
|
return RewriteQuestion.be_output(ans)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -41,19 +41,44 @@ class SwitchParam(ComponentParamBase):
|
|||||||
def check(self):
|
def check(self):
|
||||||
self.check_empty(self.conditions, "[Switch] conditions")
|
self.check_empty(self.conditions, "[Switch] conditions")
|
||||||
for cond in self.conditions:
|
for cond in self.conditions:
|
||||||
if not cond["to"]: raise ValueError(f"[Switch] 'To' can not be empty!")
|
if not cond["to"]:
|
||||||
|
raise ValueError("[Switch] 'To' can not be empty!")
|
||||||
|
|
||||||
|
|
||||||
class Switch(ComponentBase, ABC):
|
class Switch(ComponentBase, ABC):
|
||||||
component_name = "Switch"
|
component_name = "Switch"
|
||||||
|
|
||||||
|
def get_dependent_components(self):
|
||||||
|
res = []
|
||||||
|
for cond in self._param.conditions:
|
||||||
|
for item in cond["items"]:
|
||||||
|
if not item["cpn_id"]:
|
||||||
|
continue
|
||||||
|
if item["cpn_id"].find("begin") >= 0:
|
||||||
|
continue
|
||||||
|
cid = item["cpn_id"].split("@")[0]
|
||||||
|
res.append(cid)
|
||||||
|
|
||||||
|
return list(set(res))
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
for cond in self._param.conditions:
|
for cond in self._param.conditions:
|
||||||
res = []
|
res = []
|
||||||
for item in cond["items"]:
|
for item in cond["items"]:
|
||||||
out = self._canvas.get_component(item["cpn_id"])["obj"].output()[1]
|
if not item["cpn_id"]:
|
||||||
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
continue
|
||||||
res.append(self.process_operator(cpn_input, item["operator"], item["value"]))
|
cid = item["cpn_id"].split("@")[0]
|
||||||
|
if item["cpn_id"].find("@") > 0:
|
||||||
|
cpn_id, key = item["cpn_id"].split("@")
|
||||||
|
for p in self._canvas.get_component(cid)["obj"]._param.query:
|
||||||
|
if p["key"] == key:
|
||||||
|
res.append(self.process_operator(p.get("value",""), item["operator"], item.get("value", "")))
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
out = self._canvas.get_component(cid)["obj"].output()[1]
|
||||||
|
cpn_input = "" if "content" not in out.columns else " ".join([str(s) for s in out["content"]])
|
||||||
|
res.append(self.process_operator(cpn_input, item["operator"], item.get("value", "")))
|
||||||
|
|
||||||
if cond["logical_operator"] != "and" and any(res):
|
if cond["logical_operator"] != "and" and any(res):
|
||||||
return Switch.be_output(cond["to"])
|
return Switch.be_output(cond["to"])
|
||||||
|
|
||||||
@ -85,22 +110,22 @@ class Switch(ComponentBase, ABC):
|
|||||||
elif operator == ">":
|
elif operator == ">":
|
||||||
try:
|
try:
|
||||||
return True if float(input) > float(value) else False
|
return True if float(input) > float(value) else False
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return True if input > value else False
|
return True if input > value else False
|
||||||
elif operator == "<":
|
elif operator == "<":
|
||||||
try:
|
try:
|
||||||
return True if float(input) < float(value) else False
|
return True if float(input) < float(value) else False
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return True if input < value else False
|
return True if input < value else False
|
||||||
elif operator == "≥":
|
elif operator == "≥":
|
||||||
try:
|
try:
|
||||||
return True if float(input) >= float(value) else False
|
return True if float(input) >= float(value) else False
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return True if input >= value else False
|
return True if input >= value else False
|
||||||
elif operator == "≤":
|
elif operator == "≤":
|
||||||
try:
|
try:
|
||||||
return True if float(input) <= float(value) else False
|
return True if float(input) <= float(value) else False
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return True if input <= value else False
|
return True if input <= value else False
|
||||||
|
|
||||||
raise ValueError('Not supported operator' + operator)
|
raise ValueError('Not supported operator' + operator)
|
||||||
86
agent/component/template.py
Normal file
86
agent/component/template.py
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import re
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Generate component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.content = ""
|
||||||
|
self.parameters = []
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_empty(self.content, "[Template] Content")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class Template(ComponentBase):
|
||||||
|
component_name = "Template"
|
||||||
|
|
||||||
|
def get_dependent_components(self):
|
||||||
|
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||||
|
if para.get("component_id") \
|
||||||
|
and para["component_id"].lower().find("answer") < 0 \
|
||||||
|
and para["component_id"].lower().find("begin") < 0])
|
||||||
|
return list(cpnts)
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
content = self._param.content
|
||||||
|
|
||||||
|
self._param.inputs = []
|
||||||
|
for para in self._param.parameters:
|
||||||
|
if not para.get("component_id"):
|
||||||
|
continue
|
||||||
|
component_id = para["component_id"].split("@")[0]
|
||||||
|
if para["component_id"].lower().find("@") >= 0:
|
||||||
|
cpn_id, key = para["component_id"].split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] == key:
|
||||||
|
kwargs[para["key"]] = p.get("value", "")
|
||||||
|
self._param.inputs.append(
|
||||||
|
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||||
|
continue
|
||||||
|
|
||||||
|
cpn = self._canvas.get_component(component_id)["obj"]
|
||||||
|
if cpn.component_name.lower() == "answer":
|
||||||
|
hist = self._canvas.get_history(1)
|
||||||
|
if hist:
|
||||||
|
hist = hist[0]["content"]
|
||||||
|
else:
|
||||||
|
hist = ""
|
||||||
|
kwargs[para["key"]] = hist
|
||||||
|
continue
|
||||||
|
|
||||||
|
_, out = cpn.output(allow_partial=False)
|
||||||
|
if "content" not in out.columns:
|
||||||
|
kwargs[para["key"]] = ""
|
||||||
|
else:
|
||||||
|
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||||
|
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||||
|
|
||||||
|
for n, v in kwargs.items():
|
||||||
|
content = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), content)
|
||||||
|
|
||||||
|
return Template.be_output(content)
|
||||||
|
|
||||||
@ -13,12 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import random
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from functools import partial
|
|
||||||
import wikipedia
|
import wikipedia
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -65,5 +63,5 @@ class Wikipedia(ComponentBase, ABC):
|
|||||||
return Wikipedia.be_output("")
|
return Wikipedia.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(wiki_res)
|
df = pd.DataFrame(wiki_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
@ -74,8 +75,8 @@ class YahooFinance(ComponentBase, ABC):
|
|||||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||||
if self._param.news:
|
if self._param.news:
|
||||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||||
except Exception as e:
|
except Exception:
|
||||||
print("**ERROR** " + str(e))
|
logging.exception("YahooFinance got exception")
|
||||||
|
|
||||||
if not yohoo_res:
|
if not yohoo_res:
|
||||||
return YahooFinance.be_output("")
|
return YahooFinance.be_output("")
|
||||||
|
|||||||
@ -13,22 +13,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
# Logger
|
|
||||||
import os
|
|
||||||
|
|
||||||
from api.utils.file_utils import get_project_base_directory
|
|
||||||
from api.utils.log_utils import LoggerFactory, getLogger
|
|
||||||
|
|
||||||
DEBUG = 0
|
|
||||||
LoggerFactory.set_directory(
|
|
||||||
os.path.join(
|
|
||||||
get_project_base_directory(),
|
|
||||||
"logs",
|
|
||||||
"flow"))
|
|
||||||
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
|
||||||
LoggerFactory.LEVEL = 30
|
|
||||||
|
|
||||||
flow_logger = getLogger("flow")
|
|
||||||
database_logger = getLogger("database")
|
|
||||||
FLOAT_ZERO = 1e-8
|
FLOAT_ZERO = 1e-8
|
||||||
PARAM_MAXDEPTH = 5
|
PARAM_MAXDEPTH = 5
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1410
agent/templates/seo_blog.json
Normal file
1410
agent/templates/seo_blog.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -43,6 +43,7 @@ if __name__ == '__main__':
|
|||||||
else:
|
else:
|
||||||
print(ans["content"])
|
print(ans["content"])
|
||||||
|
|
||||||
if DEBUG: print(canvas.path)
|
if DEBUG:
|
||||||
|
print(canvas.path)
|
||||||
question = input("\n==================== User =====================\n> ")
|
question = input("\n==================== User =====================\n> ")
|
||||||
canvas.add_user_input(question)
|
canvas.add_user_input(question)
|
||||||
|
|||||||
@ -0,0 +1,2 @@
|
|||||||
|
from beartype.claw import beartype_this_package
|
||||||
|
beartype_this_package()
|
||||||
|
|||||||
@ -13,14 +13,16 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import logging
|
||||||
from importlib.util import module_from_spec, spec_from_file_location
|
from importlib.util import module_from_spec, spec_from_file_location
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from flask import Blueprint, Flask
|
from flask import Blueprint, Flask
|
||||||
from werkzeug.wrappers.request import Request
|
from werkzeug.wrappers.request import Request
|
||||||
from flask_cors import CORS
|
from flask_cors import CORS
|
||||||
|
from flasgger import Swagger
|
||||||
|
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||||
|
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
from api.db.db_models import close_connection
|
from api.db.db_models import close_connection
|
||||||
@ -29,32 +31,60 @@ from api.utils import CustomJSONEncoder, commands
|
|||||||
|
|
||||||
from flask_session import Session
|
from flask_session import Session
|
||||||
from flask_login import LoginManager
|
from flask_login import LoginManager
|
||||||
from api.settings import SECRET_KEY, stat_logger
|
from api import settings
|
||||||
from api.settings import API_VERSION, access_logger
|
|
||||||
from api.utils.api_utils import server_error_response
|
from api.utils.api_utils import server_error_response
|
||||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
from api.constants import API_VERSION
|
||||||
|
|
||||||
__all__ = ['app']
|
__all__ = ["app"]
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('flask.app')
|
|
||||||
for h in access_logger.handlers:
|
|
||||||
logger.addHandler(h)
|
|
||||||
|
|
||||||
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
CORS(app, supports_credentials=True,max_age=2592000)
|
|
||||||
|
# Add this at the beginning of your file to configure Swagger UI
|
||||||
|
swagger_config = {
|
||||||
|
"headers": [],
|
||||||
|
"specs": [
|
||||||
|
{
|
||||||
|
"endpoint": "apispec",
|
||||||
|
"route": "/apispec.json",
|
||||||
|
"rule_filter": lambda rule: True, # Include all endpoints
|
||||||
|
"model_filter": lambda tag: True, # Include all models
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"static_url_path": "/flasgger_static",
|
||||||
|
"swagger_ui": True,
|
||||||
|
"specs_route": "/apidocs/",
|
||||||
|
}
|
||||||
|
|
||||||
|
swagger = Swagger(
|
||||||
|
app,
|
||||||
|
config=swagger_config,
|
||||||
|
template={
|
||||||
|
"swagger": "2.0",
|
||||||
|
"info": {
|
||||||
|
"title": "RAGFlow API",
|
||||||
|
"description": "",
|
||||||
|
"version": "1.0.0",
|
||||||
|
},
|
||||||
|
"securityDefinitions": {
|
||||||
|
"ApiKeyAuth": {"type": "apiKey", "name": "Authorization", "in": "header"}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
CORS(app, supports_credentials=True, max_age=2592000)
|
||||||
app.url_map.strict_slashes = False
|
app.url_map.strict_slashes = False
|
||||||
app.json_encoder = CustomJSONEncoder
|
app.json_encoder = CustomJSONEncoder
|
||||||
app.errorhandler(Exception)(server_error_response)
|
app.errorhandler(Exception)(server_error_response)
|
||||||
|
|
||||||
|
|
||||||
## convince for dev and debug
|
## convince for dev and debug
|
||||||
#app.config["LOGIN_DISABLED"] = True
|
# app.config["LOGIN_DISABLED"] = True
|
||||||
app.config["SESSION_PERMANENT"] = False
|
app.config["SESSION_PERMANENT"] = False
|
||||||
app.config["SESSION_TYPE"] = "filesystem"
|
app.config["SESSION_TYPE"] = "filesystem"
|
||||||
app.config['MAX_CONTENT_LENGTH'] = int(os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024))
|
app.config["MAX_CONTENT_LENGTH"] = int(
|
||||||
|
os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024)
|
||||||
|
)
|
||||||
|
|
||||||
Session(app)
|
Session(app)
|
||||||
login_manager = LoginManager()
|
login_manager = LoginManager()
|
||||||
@ -64,17 +94,23 @@ commands.register_commands(app)
|
|||||||
|
|
||||||
|
|
||||||
def search_pages_path(pages_dir):
|
def search_pages_path(pages_dir):
|
||||||
app_path_list = [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
app_path_list = [
|
||||||
api_path_list = [path for path in pages_dir.glob('*sdk/*.py') if not path.name.startswith('.')]
|
path for path in pages_dir.glob("*_app.py") if not path.name.startswith(".")
|
||||||
|
]
|
||||||
|
api_path_list = [
|
||||||
|
path for path in pages_dir.glob("*sdk/*.py") if not path.name.startswith(".")
|
||||||
|
]
|
||||||
app_path_list.extend(api_path_list)
|
app_path_list.extend(api_path_list)
|
||||||
return app_path_list
|
return app_path_list
|
||||||
|
|
||||||
|
|
||||||
def register_page(page_path):
|
def register_page(page_path):
|
||||||
path = f'{page_path}'
|
path = f"{page_path}"
|
||||||
|
|
||||||
page_name = page_path.stem.rstrip('_app')
|
page_name = page_path.stem.rstrip("_app")
|
||||||
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name,))
|
module_name = ".".join(
|
||||||
|
page_path.parts[page_path.parts.index("api"): -1] + (page_name,)
|
||||||
|
)
|
||||||
|
|
||||||
spec = spec_from_file_location(module_name, page_path)
|
spec = spec_from_file_location(module_name, page_path)
|
||||||
page = module_from_spec(spec)
|
page = module_from_spec(spec)
|
||||||
@ -82,8 +118,10 @@ def register_page(page_path):
|
|||||||
page.manager = Blueprint(page_name, module_name)
|
page.manager = Blueprint(page_name, module_name)
|
||||||
sys.modules[module_name] = page
|
sys.modules[module_name] = page
|
||||||
spec.loader.exec_module(page)
|
spec.loader.exec_module(page)
|
||||||
page_name = getattr(page, 'page_name', page_name)
|
page_name = getattr(page, "page_name", page_name)
|
||||||
url_prefix = f'/api/{API_VERSION}/{page_name}' if "/sdk/" in path else f'/{API_VERSION}/{page_name}'
|
url_prefix = (
|
||||||
|
f"/api/{API_VERSION}" if "/sdk/" in path else f"/{API_VERSION}/{page_name}"
|
||||||
|
)
|
||||||
|
|
||||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||||
return url_prefix
|
return url_prefix
|
||||||
@ -91,31 +129,31 @@ def register_page(page_path):
|
|||||||
|
|
||||||
pages_dir = [
|
pages_dir = [
|
||||||
Path(__file__).parent,
|
Path(__file__).parent,
|
||||||
Path(__file__).parent.parent / 'api' / 'apps',
|
Path(__file__).parent.parent / "api" / "apps",
|
||||||
Path(__file__).parent.parent / 'api' / 'apps' / 'sdk',
|
Path(__file__).parent.parent / "api" / "apps" / "sdk",
|
||||||
]
|
]
|
||||||
|
|
||||||
client_urls_prefix = [
|
client_urls_prefix = [
|
||||||
register_page(path)
|
register_page(path) for dir in pages_dir for path in search_pages_path(dir)
|
||||||
for dir in pages_dir
|
|
||||||
for path in search_pages_path(dir)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@login_manager.request_loader
|
@login_manager.request_loader
|
||||||
def load_user(web_request):
|
def load_user(web_request):
|
||||||
jwt = Serializer(secret_key=SECRET_KEY)
|
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||||
authorization = web_request.headers.get("Authorization")
|
authorization = web_request.headers.get("Authorization")
|
||||||
if authorization:
|
if authorization:
|
||||||
try:
|
try:
|
||||||
access_token = str(jwt.loads(authorization))
|
access_token = str(jwt.loads(authorization))
|
||||||
user = UserService.query(access_token=access_token, status=StatusEnum.VALID.value)
|
user = UserService.query(
|
||||||
|
access_token=access_token, status=StatusEnum.VALID.value
|
||||||
|
)
|
||||||
if user:
|
if user:
|
||||||
return user[0]
|
return user[0]
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
except Exception as e:
|
except Exception:
|
||||||
stat_logger.exception(e)
|
logging.exception("load_user got exception")
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
@ -123,4 +161,4 @@ def load_user(web_request):
|
|||||||
|
|
||||||
@app.teardown_request
|
@app.teardown_request
|
||||||
def _db_close(exc):
|
def _db_close(exc):
|
||||||
close_connection()
|
close_connection()
|
||||||
|
|||||||
@ -22,43 +22,37 @@ from api.db.services.llm_service import TenantLLMService
|
|||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
from api.db import FileType, LLMType, ParserType, FileSource
|
from api.db import FileType, LLMType, ParserType, FileSource
|
||||||
from api.db.db_models import APIToken, API4Conversation, Task, File
|
from api.db.db_models import APIToken, Task, File
|
||||||
from api.db.services import duplicate_name
|
from api.db.services import duplicate_name
|
||||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||||
from api.db.services.dialog_service import DialogService, chat
|
from api.db.services.dialog_service import DialogService, chat, keyword_extraction
|
||||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.task_service import queue_tasks, TaskService
|
from api.db.services.task_service import queue_tasks, TaskService
|
||||||
from api.db.services.user_service import UserTenantService
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.settings import RetCode, retrievaler
|
from api import settings
|
||||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request, \
|
||||||
from itsdangerous import URLSafeTimedSerializer
|
generate_confirmation_token
|
||||||
|
|
||||||
from api.utils.file_utils import filename_type, thumbnail
|
from api.utils.file_utils import filename_type, thumbnail
|
||||||
from rag.nlp import keyword_extraction
|
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
from api.db.services.canvas_service import UserCanvasService
|
||||||
from agent.canvas import Canvas
|
from agent.canvas import Canvas
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
|
|
||||||
def generate_confirmation_token(tenent_id):
|
@manager.route('/new_token', methods=['POST']) # noqa: F821
|
||||||
serializer = URLSafeTimedSerializer(tenent_id)
|
|
||||||
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenent_id)[2:34]
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/new_token', methods=['POST'])
|
|
||||||
@login_required
|
@login_required
|
||||||
def new_token():
|
def new_token():
|
||||||
req = request.json
|
req = request.json
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
tenant_id = tenants[0].tenant_id
|
tenant_id = tenants[0].tenant_id
|
||||||
obj = {"tenant_id": tenant_id, "token": generate_confirmation_token(tenant_id),
|
obj = {"tenant_id": tenant_id, "token": generate_confirmation_token(tenant_id),
|
||||||
@ -74,20 +68,20 @@ def new_token():
|
|||||||
obj["dialog_id"] = req["dialog_id"]
|
obj["dialog_id"] = req["dialog_id"]
|
||||||
|
|
||||||
if not APITokenService.save(**obj):
|
if not APITokenService.save(**obj):
|
||||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
return get_data_error_result(message="Fail to new a dialog!")
|
||||||
|
|
||||||
return get_json_result(data=obj)
|
return get_json_result(data=obj)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/token_list', methods=['GET'])
|
@manager.route('/token_list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def token_list():
|
def token_list():
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
id = request.args["dialog_id"] if "dialog_id" in request.args else request.args["canvas_id"]
|
id = request.args["dialog_id"] if "dialog_id" in request.args else request.args["canvas_id"]
|
||||||
objs = APITokenService.query(tenant_id=tenants[0].tenant_id, dialog_id=id)
|
objs = APITokenService.query(tenant_id=tenants[0].tenant_id, dialog_id=id)
|
||||||
@ -96,7 +90,7 @@ def token_list():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@validate_request("tokens", "tenant_id")
|
@validate_request("tokens", "tenant_id")
|
||||||
@login_required
|
@login_required
|
||||||
def rm():
|
def rm():
|
||||||
@ -110,13 +104,13 @@ def rm():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/stats', methods=['GET'])
|
@manager.route('/stats', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def stats():
|
def stats():
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
objs = API4ConversationService.stats(
|
objs = API4ConversationService.stats(
|
||||||
tenants[0].tenant_id,
|
tenants[0].tenant_id,
|
||||||
request.args.get(
|
request.args.get(
|
||||||
@ -141,14 +135,13 @@ def stats():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/new_conversation', methods=['GET'])
|
@manager.route('/new_conversation', methods=['GET']) # noqa: F821
|
||||||
def set_conversation():
|
def set_conversation():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
req = request.json
|
|
||||||
try:
|
try:
|
||||||
if objs[0].source == "agent":
|
if objs[0].source == "agent":
|
||||||
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
||||||
@ -169,7 +162,7 @@ def set_conversation():
|
|||||||
else:
|
else:
|
||||||
e, dia = DialogService.get_by_id(objs[0].dialog_id)
|
e, dia = DialogService.get_by_id(objs[0].dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found")
|
return get_data_error_result(message="Dialog not found")
|
||||||
conv = {
|
conv = {
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
"dialog_id": dia.id,
|
"dialog_id": dia.id,
|
||||||
@ -182,19 +175,20 @@ def set_conversation():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion', methods=['POST'])
|
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||||
@validate_request("conversation_id", "messages")
|
@validate_request("conversation_id", "messages")
|
||||||
def completion():
|
def completion():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
req = request.json
|
req = request.json
|
||||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
if "quote" not in req: req["quote"] = False
|
if "quote" not in req:
|
||||||
|
req["quote"] = False
|
||||||
|
|
||||||
msg = []
|
msg = []
|
||||||
for m in req["messages"]:
|
for m in req["messages"]:
|
||||||
@ -203,7 +197,8 @@ def completion():
|
|||||||
if m["role"] == "assistant" and not msg:
|
if m["role"] == "assistant" and not msg:
|
||||||
continue
|
continue
|
||||||
msg.append(m)
|
msg.append(m)
|
||||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
if not msg[-1].get("id"):
|
||||||
|
msg[-1]["id"] = get_uuid()
|
||||||
message_id = msg[-1]["id"]
|
message_id = msg[-1]["id"]
|
||||||
|
|
||||||
def fillin_conv(ans):
|
def fillin_conv(ans):
|
||||||
@ -263,19 +258,20 @@ def completion():
|
|||||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||||
fillin_conv(ans)
|
fillin_conv(ans)
|
||||||
rename_field(ans)
|
rename_field(ans)
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans},
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
|
canvas.history.append(("assistant", final_ans["content"]))
|
||||||
if final_ans.get("reference"):
|
if final_ans.get("reference"):
|
||||||
canvas.reference.append(final_ans["reference"])
|
canvas.reference.append(final_ans["reference"])
|
||||||
cvs.dsl = json.loads(str(canvas))
|
cvs.dsl = json.loads(str(canvas))
|
||||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
resp = Response(sse(), mimetype="text/event-stream")
|
resp = Response(sse(), mimetype="text/event-stream")
|
||||||
resp.headers.add_header("Cache-control", "no-cache")
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
@ -295,12 +291,12 @@ def completion():
|
|||||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
rename_field(result)
|
rename_field(result)
|
||||||
return get_json_result(data=result)
|
return get_json_result(data=result)
|
||||||
|
|
||||||
#******************For dialog******************
|
# ******************For dialog******************
|
||||||
conv.message.append(msg[-1])
|
conv.message.append(msg[-1])
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
del req["conversation_id"]
|
del req["conversation_id"]
|
||||||
del req["messages"]
|
del req["messages"]
|
||||||
|
|
||||||
@ -315,14 +311,14 @@ def completion():
|
|||||||
for ans in chat(dia, msg, True, **req):
|
for ans in chat(dia, msg, True, **req):
|
||||||
fillin_conv(ans)
|
fillin_conv(ans)
|
||||||
rename_field(ans)
|
rename_field(ans)
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans},
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
if req.get("stream", True):
|
if req.get("stream", True):
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
@ -331,7 +327,7 @@ def completion():
|
|||||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
answer = None
|
answer = None
|
||||||
for ans in chat(dia, msg, **req):
|
for ans in chat(dia, msg, **req):
|
||||||
answer = ans
|
answer = ans
|
||||||
@ -345,25 +341,25 @@ def completion():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/conversation/<conversation_id>', methods=['GET'])
|
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def get(conversation_id):
|
def get(conversation_id):
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, conv = API4ConversationService.get_by_id(conversation_id)
|
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
||||||
return get_json_result(data=False, retmsg='Token is not valid for this conversation_id!"',
|
return get_json_result(data=False, message='Token is not valid for this conversation_id!"',
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR)
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
for referenct_i in conv['reference']:
|
for referenct_i in conv['reference']:
|
||||||
if referenct_i is None or len(referenct_i) == 0:
|
if referenct_i is None or len(referenct_i) == 0:
|
||||||
continue
|
continue
|
||||||
@ -376,14 +372,14 @@ def get(conversation_id):
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/document/upload', methods=['POST'])
|
@manager.route('/document/upload', methods=['POST']) # noqa: F821
|
||||||
@validate_request("kb_name")
|
@validate_request("kb_name")
|
||||||
def upload():
|
def upload():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
kb_name = request.form.get("kb_name").strip()
|
kb_name = request.form.get("kb_name").strip()
|
||||||
tenant_id = objs[0].tenant_id
|
tenant_id = objs[0].tenant_id
|
||||||
@ -392,19 +388,19 @@ def upload():
|
|||||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
kb_id = kb.id
|
kb_id = kb.id
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
if 'file' not in request.files:
|
if 'file' not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
file = request.files['file']
|
file = request.files['file']
|
||||||
if file.filename == '':
|
if file.filename == '':
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
root_folder = FileService.get_root_folder(tenant_id)
|
root_folder = FileService.get_root_folder(tenant_id)
|
||||||
pf_id = root_folder["id"]
|
pf_id = root_folder["id"]
|
||||||
@ -415,7 +411,7 @@ def upload():
|
|||||||
try:
|
try:
|
||||||
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
|
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Exceed the maximum file number of a free user!")
|
message="Exceed the maximum file number of a free user!")
|
||||||
|
|
||||||
filename = duplicate_name(
|
filename = duplicate_name(
|
||||||
DocumentService.query,
|
DocumentService.query,
|
||||||
@ -424,7 +420,7 @@ def upload():
|
|||||||
filetype = filename_type(filename)
|
filetype = filename_type(filename)
|
||||||
if not filetype:
|
if not filetype:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="This type of file has not been supported yet!")
|
message="This type of file has not been supported yet!")
|
||||||
|
|
||||||
location = filename
|
location = filename
|
||||||
while STORAGE_IMPL.obj_exist(kb_id, location):
|
while STORAGE_IMPL.obj_exist(kb_id, location):
|
||||||
@ -473,7 +469,7 @@ def upload():
|
|||||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||||
tenant_id = DocumentService.get_tenant_id(doc["id"])
|
tenant_id = DocumentService.get_tenant_id(doc["id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
# e, doc = DocumentService.get_by_id(doc["id"])
|
# e, doc = DocumentService.get_by_id(doc["id"])
|
||||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||||
@ -488,37 +484,37 @@ def upload():
|
|||||||
return get_json_result(data=doc_result.to_json())
|
return get_json_result(data=doc_result.to_json())
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/document/upload_and_parse', methods=['POST'])
|
@manager.route('/document/upload_and_parse', methods=['POST']) # noqa: F821
|
||||||
@validate_request("conversation_id")
|
@validate_request("conversation_id")
|
||||||
def upload_parse():
|
def upload_parse():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
if 'file' not in request.files:
|
if 'file' not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
file_objs = request.files.getlist('file')
|
file_objs = request.files.getlist('file')
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
if file_obj.filename == '':
|
if file_obj.filename == '':
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, objs[0].tenant_id)
|
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, objs[0].tenant_id)
|
||||||
return get_json_result(data=doc_ids)
|
return get_json_result(data=doc_ids)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list_chunks', methods=['POST'])
|
@manager.route('/list_chunks', methods=['POST']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def list_chunks():
|
def list_chunks():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
req = request.json
|
req = request.json
|
||||||
|
|
||||||
@ -532,15 +528,16 @@ def list_chunks():
|
|||||||
doc_id = req['doc_id']
|
doc_id = req['doc_id']
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg="Can't find doc_name or doc_id"
|
data=False, message="Can't find doc_name or doc_id"
|
||||||
)
|
)
|
||||||
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||||
|
|
||||||
res = retrievaler.chunk_list(doc_id=doc_id, tenant_id=tenant_id)
|
res = settings.retrievaler.chunk_list(doc_id, tenant_id, kb_ids)
|
||||||
res = [
|
res = [
|
||||||
{
|
{
|
||||||
"content": res_item["content_with_weight"],
|
"content": res_item["content_with_weight"],
|
||||||
"doc_name": res_item["docnm_kwd"],
|
"doc_name": res_item["docnm_kwd"],
|
||||||
"img_id": res_item["img_id"]
|
"image_id": res_item["img_id"]
|
||||||
} for res_item in res
|
} for res_item in res
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -550,14 +547,14 @@ def list_chunks():
|
|||||||
return get_json_result(data=res)
|
return get_json_result(data=res)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list_kb_docs', methods=['POST'])
|
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def list_kb_docs():
|
def list_kb_docs():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
req = request.json
|
req = request.json
|
||||||
tenant_id = objs[0].tenant_id
|
tenant_id = objs[0].tenant_id
|
||||||
@ -567,7 +564,7 @@ def list_kb_docs():
|
|||||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
kb_id = kb.id
|
kb_id = kb.id
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -589,28 +586,29 @@ def list_kb_docs():
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
@manager.route('/document/infos', methods=['POST'])
|
|
||||||
|
@manager.route('/document/infos', methods=['POST']) # noqa: F821
|
||||||
@validate_request("doc_ids")
|
@validate_request("doc_ids")
|
||||||
def docinfos():
|
def docinfos():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
req = request.json
|
req = request.json
|
||||||
doc_ids = req["doc_ids"]
|
doc_ids = req["doc_ids"]
|
||||||
docs = DocumentService.get_by_ids(doc_ids)
|
docs = DocumentService.get_by_ids(doc_ids)
|
||||||
return get_json_result(data=list(docs.dicts()))
|
return get_json_result(data=list(docs.dicts()))
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/document', methods=['DELETE'])
|
@manager.route('/document', methods=['DELETE']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def document_rm():
|
def document_rm():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
tenant_id = objs[0].tenant_id
|
tenant_id = objs[0].tenant_id
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -622,7 +620,7 @@ def document_rm():
|
|||||||
|
|
||||||
if not doc_ids:
|
if not doc_ids:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg="Can't find doc_names or doc_ids"
|
data=False, message="Can't find doc_names or doc_ids"
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -637,16 +635,16 @@ def document_rm():
|
|||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||||
|
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
|
|
||||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
@ -657,12 +655,12 @@ def document_rm():
|
|||||||
errors += str(e)
|
errors += str(e)
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
return get_json_result(data=False, message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion_aibotk', methods=['POST'])
|
@manager.route('/completion_aibotk', methods=['POST']) # noqa: F821
|
||||||
@validate_request("Authorization", "conversation_id", "word")
|
@validate_request("Authorization", "conversation_id", "word")
|
||||||
def completion_faq():
|
def completion_faq():
|
||||||
import base64
|
import base64
|
||||||
@ -672,16 +670,18 @@ def completion_faq():
|
|||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
if "quote" not in req: req["quote"] = True
|
if "quote" not in req:
|
||||||
|
req["quote"] = True
|
||||||
|
|
||||||
msg = []
|
msg = []
|
||||||
msg.append({"role": "user", "content": req["word"]})
|
msg.append({"role": "user", "content": req["word"]})
|
||||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
if not msg[-1].get("id"):
|
||||||
|
msg[-1]["id"] = get_uuid()
|
||||||
message_id = msg[-1]["id"]
|
message_id = msg[-1]["id"]
|
||||||
|
|
||||||
def fillin_conv(ans):
|
def fillin_conv(ans):
|
||||||
@ -757,7 +757,7 @@ def completion_faq():
|
|||||||
conv.message.append(msg[-1])
|
conv.message.append(msg[-1])
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
del req["conversation_id"]
|
del req["conversation_id"]
|
||||||
|
|
||||||
if not conv.reference:
|
if not conv.reference:
|
||||||
@ -802,17 +802,17 @@ def completion_faq():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/retrieval', methods=['POST'])
|
@manager.route('/retrieval', methods=['POST']) # noqa: F821
|
||||||
@validate_request("kb_id", "question")
|
@validate_request("kb_id", "question")
|
||||||
def retrieval():
|
def retrieval():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
req = request.json
|
req = request.json
|
||||||
kb_ids = req.get("kb_id",[])
|
kb_ids = req.get("kb_id", [])
|
||||||
doc_ids = req.get("doc_ids", [])
|
doc_ids = req.get("doc_ids", [])
|
||||||
question = req.get("question")
|
question = req.get("question")
|
||||||
page = int(req.get("page", 1))
|
page = int(req.get("page", 1))
|
||||||
@ -826,26 +826,26 @@ def retrieval():
|
|||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||||
if len(embd_nms) != 1:
|
if len(embd_nms) != 1:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Knowledge bases use different embedding models or does not exist."', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Knowledge bases use different embedding models or does not exist."',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
embd_mdl = TenantLLMService.model_instance(
|
embd_mdl = TenantLLMService.model_instance(
|
||||||
kbs[0].tenant_id, LLMType.EMBEDDING.value, llm_name=kbs[0].embd_id)
|
kbs[0].tenant_id, LLMType.EMBEDDING.value, llm_name=kbs[0].embd_id)
|
||||||
rerank_mdl = None
|
rerank_mdl = None
|
||||||
if req.get("rerank_id"):
|
if req.get("rerank_id"):
|
||||||
rerank_mdl = TenantLLMService.model_instance(
|
rerank_mdl = TenantLLMService.model_instance(
|
||||||
kbs[0].tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
kbs[0].tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||||
if req.get("keyword", False):
|
if req.get("keyword", False):
|
||||||
chat_mdl = TenantLLMService.model_instance(kbs[0].tenant_id, LLMType.CHAT)
|
chat_mdl = TenantLLMService.model_instance(kbs[0].tenant_id, LLMType.CHAT)
|
||||||
question += keyword_extraction(chat_mdl, question)
|
question += keyword_extraction(chat_mdl, question)
|
||||||
ranks = retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
ranks = settings.retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||||
similarity_threshold, vector_similarity_weight, top,
|
similarity_threshold, vector_similarity_weight, top,
|
||||||
doc_ids, rerank_mdl=rerank_mdl)
|
doc_ids, rerank_mdl=rerank_mdl)
|
||||||
for c in ranks["chunks"]:
|
for c in ranks["chunks"]:
|
||||||
if "vector" in c:
|
c.pop("vector", None)
|
||||||
del c["vector"]
|
|
||||||
return get_json_result(data=ranks)
|
return get_json_result(data=ranks)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e).find("not_found") > 0:
|
if str(e).find("not_found") > 0:
|
||||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||||
retcode=RetCode.DATA_ERROR)
|
code=settings.RetCode.DATA_ERROR)
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|||||||
@ -14,26 +14,25 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
import json
|
||||||
from functools import partial
|
import traceback
|
||||||
from flask import request, Response
|
from flask import request, Response
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||||
from api.db.services.dialog_service import full_question
|
|
||||||
from api.db.services.user_service import TenantService
|
|
||||||
from api.settings import RetCode
|
from api.settings import RetCode
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||||
from agent.canvas import Canvas
|
from agent.canvas import Canvas
|
||||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||||
|
from api.db.db_models import APIToken
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/templates', methods=['GET'])
|
@manager.route('/templates', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def templates():
|
def templates():
|
||||||
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.get_all()])
|
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.get_all()])
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def canvas_list():
|
def canvas_list():
|
||||||
return get_json_result(data=sorted([c.to_dict() for c in \
|
return get_json_result(data=sorted([c.to_dict() for c in \
|
||||||
@ -41,53 +40,68 @@ def canvas_list():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@validate_request("canvas_ids")
|
@validate_request("canvas_ids")
|
||||||
@login_required
|
@login_required
|
||||||
def rm():
|
def rm():
|
||||||
for i in request.json["canvas_ids"]:
|
for i in request.json["canvas_ids"]:
|
||||||
if not UserCanvasService.query(user_id=current_user.id,id=i):
|
if not UserCanvasService.query(user_id=current_user.id,id=i):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
UserCanvasService.delete_by_id(i)
|
UserCanvasService.delete_by_id(i)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST'])
|
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||||
@validate_request("dsl", "title")
|
@validate_request("dsl", "title")
|
||||||
@login_required
|
@login_required
|
||||||
def save():
|
def save():
|
||||||
req = request.json
|
req = request.json
|
||||||
req["user_id"] = current_user.id
|
req["user_id"] = current_user.id
|
||||||
if not isinstance(req["dsl"], str): req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
if not isinstance(req["dsl"], str):
|
||||||
|
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||||
|
|
||||||
req["dsl"] = json.loads(req["dsl"])
|
req["dsl"] = json.loads(req["dsl"])
|
||||||
if "id" not in req:
|
if "id" not in req:
|
||||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||||
return server_error_response(ValueError("Duplicated title."))
|
return get_data_error_result(f"{req['title'].strip()} already exists.")
|
||||||
req["id"] = get_uuid()
|
req["id"] = get_uuid()
|
||||||
if not UserCanvasService.save(**req):
|
if not UserCanvasService.save(**req):
|
||||||
return get_data_error_result(retmsg="Fail to save canvas.")
|
return get_data_error_result(message="Fail to save canvas.")
|
||||||
else:
|
else:
|
||||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
UserCanvasService.update_by_id(req["id"], req)
|
UserCanvasService.update_by_id(req["id"], req)
|
||||||
return get_json_result(data=req)
|
return get_json_result(data=req)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get/<canvas_id>', methods=['GET'])
|
@manager.route('/get/<canvas_id>', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get(canvas_id):
|
def get(canvas_id):
|
||||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="canvas not found.")
|
return get_data_error_result(message="canvas not found.")
|
||||||
|
return get_json_result(data=c.to_dict())
|
||||||
|
|
||||||
|
@manager.route('/getsse/<canvas_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||||
|
def getsse(canvas_id):
|
||||||
|
token = request.headers.get('Authorization').split()
|
||||||
|
if len(token) != 2:
|
||||||
|
return get_data_error_result(message='Authorization is not valid!"')
|
||||||
|
token = token[1]
|
||||||
|
objs = APIToken.query(beta=token)
|
||||||
|
if not objs:
|
||||||
|
return get_data_error_result(message='Token is not valid!"')
|
||||||
|
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="canvas not found.")
|
||||||
return get_json_result(data=c.to_dict())
|
return get_json_result(data=c.to_dict())
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion', methods=['POST'])
|
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||||
@validate_request("id")
|
@validate_request("id")
|
||||||
@login_required
|
@login_required
|
||||||
def run():
|
def run():
|
||||||
@ -95,11 +109,11 @@ def run():
|
|||||||
stream = req.get("stream", True)
|
stream = req.get("stream", True)
|
||||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="canvas not found.")
|
return get_data_error_result(message="canvas not found.")
|
||||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
if not isinstance(cvs.dsl, str):
|
if not isinstance(cvs.dsl, str):
|
||||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||||
@ -110,39 +124,40 @@ def run():
|
|||||||
canvas = Canvas(cvs.dsl, current_user.id)
|
canvas = Canvas(cvs.dsl, current_user.id)
|
||||||
if "message" in req:
|
if "message" in req:
|
||||||
canvas.messages.append({"role": "user", "content": req["message"], "id": message_id})
|
canvas.messages.append({"role": "user", "content": req["message"], "id": message_id})
|
||||||
if len([m for m in canvas.messages if m["role"] == "user"]) > 1:
|
|
||||||
ten = TenantService.get_by_user_id(current_user.id)[0]
|
|
||||||
req["message"] = full_question(ten["tenant_id"], ten["llm_id"], canvas.messages)
|
|
||||||
canvas.add_user_input(req["message"])
|
canvas.add_user_input(req["message"])
|
||||||
answer = canvas.run(stream=stream)
|
|
||||||
print(canvas)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
assert answer is not None, "Nothing. Is it over?"
|
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
assert isinstance(answer, partial), "Nothing. Is it over?"
|
|
||||||
|
|
||||||
def sse():
|
def sse():
|
||||||
nonlocal answer, cvs
|
nonlocal answer, cvs
|
||||||
try:
|
try:
|
||||||
for ans in answer():
|
for ans in canvas.run(stream=True):
|
||||||
|
if ans.get("running_status"):
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||||
|
"data": {"answer": ans["content"],
|
||||||
|
"running_status": True}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
continue
|
||||||
for k in ans.keys():
|
for k in ans.keys():
|
||||||
final_ans[k] = ans[k]
|
final_ans[k] = ans[k]
|
||||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
|
canvas.history.append(("assistant", final_ans["content"]))
|
||||||
if final_ans.get("reference"):
|
if final_ans.get("reference"):
|
||||||
canvas.reference.append(final_ans["reference"])
|
canvas.reference.append(final_ans["reference"])
|
||||||
cvs.dsl = json.loads(str(canvas))
|
cvs.dsl = json.loads(str(canvas))
|
||||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
cvs.dsl = json.loads(str(canvas))
|
||||||
|
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||||
|
traceback.print_exc()
|
||||||
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
resp = Response(sse(), mimetype="text/event-stream")
|
resp = Response(sse(), mimetype="text/event-stream")
|
||||||
resp.headers.add_header("Cache-control", "no-cache")
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
@ -151,16 +166,19 @@ def run():
|
|||||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
for answer in canvas.run(stream=False):
|
||||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
if answer.get("running_status"):
|
||||||
if final_ans.get("reference"):
|
continue
|
||||||
canvas.reference.append(final_ans["reference"])
|
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||||
cvs.dsl = json.loads(str(canvas))
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
if final_ans.get("reference"):
|
||||||
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
canvas.reference.append(final_ans["reference"])
|
||||||
|
cvs.dsl = json.loads(str(canvas))
|
||||||
|
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||||
|
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/reset', methods=['POST'])
|
@manager.route('/reset', methods=['POST']) # noqa: F821
|
||||||
@validate_request("id")
|
@validate_request("id")
|
||||||
@login_required
|
@login_required
|
||||||
def reset():
|
def reset():
|
||||||
@ -168,11 +186,11 @@ def reset():
|
|||||||
try:
|
try:
|
||||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="canvas not found.")
|
return get_data_error_result(message="canvas not found.")
|
||||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||||
canvas.reset()
|
canvas.reset()
|
||||||
@ -183,7 +201,51 @@ def reset():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/test_db_connect', methods=['POST'])
|
@manager.route('/input_elements', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def input_elements():
|
||||||
|
cvs_id = request.args.get("id")
|
||||||
|
cpn_id = request.args.get("component_id")
|
||||||
|
try:
|
||||||
|
e, user_canvas = UserCanvasService.get_by_id(cvs_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="canvas not found.")
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=cvs_id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||||
|
return get_json_result(data=canvas.get_component_input_elements(cpn_id))
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/debug', methods=['POST']) # noqa: F821
|
||||||
|
@validate_request("id", "component_id", "params")
|
||||||
|
@login_required
|
||||||
|
def debug():
|
||||||
|
req = request.json
|
||||||
|
for p in req["params"]:
|
||||||
|
assert p.get("key")
|
||||||
|
try:
|
||||||
|
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="canvas not found.")
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||||
|
canvas.get_component(req["component_id"])["obj"]._param.debug_inputs = req["params"]
|
||||||
|
df = canvas.get_component(req["component_id"])["obj"].debug()
|
||||||
|
return get_json_result(data=df.to_dict(orient="records"))
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/test_db_connect', methods=['POST']) # noqa: F821
|
||||||
@validate_request("db_type", "database", "username", "host", "port", "password")
|
@validate_request("db_type", "database", "username", "host", "port", "password")
|
||||||
@login_required
|
@login_required
|
||||||
def test_db_connect():
|
def test_db_connect():
|
||||||
@ -195,8 +257,26 @@ def test_db_connect():
|
|||||||
elif req["db_type"] == 'postgresql':
|
elif req["db_type"] == 'postgresql':
|
||||||
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||||
password=req["password"])
|
password=req["password"])
|
||||||
db.connect()
|
elif req["db_type"] == 'mssql':
|
||||||
|
import pyodbc
|
||||||
|
connection_string = (
|
||||||
|
f"DRIVER={{ODBC Driver 17 for SQL Server}};"
|
||||||
|
f"SERVER={req['host']},{req['port']};"
|
||||||
|
f"DATABASE={req['database']};"
|
||||||
|
f"UID={req['username']};"
|
||||||
|
f"PWD={req['password']};"
|
||||||
|
)
|
||||||
|
db = pyodbc.connect(connection_string)
|
||||||
|
cursor = db.cursor()
|
||||||
|
cursor.execute("SELECT 1")
|
||||||
|
cursor.close()
|
||||||
|
else:
|
||||||
|
return server_error_response("Unsupported database type.")
|
||||||
|
if req["db_type"] != 'mssql':
|
||||||
|
db.connect()
|
||||||
db.close()
|
db.close()
|
||||||
|
|
||||||
return get_json_result(data="Database Connection Successful!")
|
return get_json_result(data="Database Connection Successful!")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|||||||
@ -15,15 +15,13 @@
|
|||||||
#
|
#
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
import traceback
|
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
|
|
||||||
|
from api.db.services.dialog_service import keyword_extraction
|
||||||
from rag.app.qa import rmPrefix, beAdoc
|
from rag.app.qa import rmPrefix, beAdoc
|
||||||
from rag.nlp import search, rag_tokenizer, keyword_extraction
|
from rag.nlp import search, rag_tokenizer
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
from rag.utils import rmSpace
|
from rag.utils import rmSpace
|
||||||
from api.db import LLMType, ParserType
|
from api.db import LLMType, ParserType
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
@ -31,13 +29,13 @@ from api.db.services.llm_service import LLMBundle
|
|||||||
from api.db.services.user_service import UserTenantService
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.settings import RetCode, retrievaler, kg_retrievaler
|
from api import settings
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
import hashlib
|
import xxhash
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['POST'])
|
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id")
|
@validate_request("doc_id")
|
||||||
def list_chunk():
|
def list_chunk():
|
||||||
@ -49,16 +47,17 @@ def list_chunk():
|
|||||||
try:
|
try:
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||||
query = {
|
query = {
|
||||||
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||||
}
|
}
|
||||||
if "available_int" in req:
|
if "available_int" in req:
|
||||||
query["available_int"] = int(req["available_int"])
|
query["available_int"] = int(req["available_int"])
|
||||||
sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
|
sres = settings.retrievaler.search(query, search.index_name(tenant_id), kb_ids, highlight=True)
|
||||||
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||||
for id in sres.ids:
|
for id in sres.ids:
|
||||||
d = {
|
d = {
|
||||||
@ -69,60 +68,55 @@ def list_chunk():
|
|||||||
"doc_id": sres.field[id]["doc_id"],
|
"doc_id": sres.field[id]["doc_id"],
|
||||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||||
"img_id": sres.field[id].get("img_id", ""),
|
"question_kwd": sres.field[id].get("question_kwd", []),
|
||||||
"available_int": sres.field[id].get("available_int", 1),
|
"image_id": sres.field[id].get("img_id", ""),
|
||||||
"positions": sres.field[id].get("position_int", "").split("\t")
|
"available_int": int(sres.field[id].get("available_int", 1)),
|
||||||
|
"positions": sres.field[id].get("position_int", []),
|
||||||
}
|
}
|
||||||
if len(d["positions"]) % 5 == 0:
|
assert isinstance(d["positions"], list)
|
||||||
poss = []
|
assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5)
|
||||||
for i in range(0, len(d["positions"]), 5):
|
|
||||||
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
|
||||||
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
|
||||||
d["positions"] = poss
|
|
||||||
res["chunks"].append(d)
|
res["chunks"].append(d)
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=res)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e).find("not_found") > 0:
|
if str(e).find("not_found") > 0:
|
||||||
return get_json_result(data=False, retmsg=f'No chunk found!',
|
return get_json_result(data=False, message='No chunk found!',
|
||||||
retcode=RetCode.DATA_ERROR)
|
code=settings.RetCode.DATA_ERROR)
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET'])
|
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get():
|
def get():
|
||||||
chunk_id = request.args["chunk_id"]
|
chunk_id = request.args["chunk_id"]
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
res = ELASTICSEARCH.get(
|
tenant_id = tenants[0].tenant_id
|
||||||
chunk_id, search.index_name(
|
|
||||||
tenants[0].tenant_id))
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||||
if not res.get("found"):
|
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||||
return server_error_response("Chunk not found")
|
if chunk is None:
|
||||||
id = res["_id"]
|
return server_error_response(Exception("Chunk not found"))
|
||||||
res = res["_source"]
|
|
||||||
res["chunk_id"] = id
|
|
||||||
k = []
|
k = []
|
||||||
for n in res.keys():
|
for n in chunk.keys():
|
||||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||||
k.append(n)
|
k.append(n)
|
||||||
for n in k:
|
for n in k:
|
||||||
del res[n]
|
del chunk[n]
|
||||||
|
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=chunk)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e).find("NotFoundError") >= 0:
|
if str(e).find("NotFoundError") >= 0:
|
||||||
return get_json_result(data=False, retmsg=f'Chunk not found!',
|
return get_json_result(data=False, message='Chunk not found!',
|
||||||
retcode=RetCode.DATA_ERROR)
|
code=settings.RetCode.DATA_ERROR)
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST'])
|
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
||||||
"important_kwd")
|
"important_kwd", "question_kwd")
|
||||||
def set():
|
def set():
|
||||||
req = request.json
|
req = request.json
|
||||||
d = {
|
d = {
|
||||||
@ -132,20 +126,22 @@ def set():
|
|||||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||||
d["important_kwd"] = req["important_kwd"]
|
d["important_kwd"] = req["important_kwd"]
|
||||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||||
|
d["question_kwd"] = req["question_kwd"]
|
||||||
|
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["question_kwd"]))
|
||||||
if "available_int" in req:
|
if "available_int" in req:
|
||||||
d["available_int"] = req["available_int"]
|
d["available_int"] = req["available_int"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_id)
|
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_id)
|
||||||
|
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
|
|
||||||
if doc.parser_id == ParserType.QA:
|
if doc.parser_id == ParserType.QA:
|
||||||
arr = [
|
arr = [
|
||||||
@ -154,49 +150,51 @@ def set():
|
|||||||
req["content_with_weight"]) if len(t) > 1]
|
req["content_with_weight"]) if len(t) > 1]
|
||||||
if len(arr) != 2:
|
if len(arr) != 2:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Q&A must be separated by TAB/ENTER key.")
|
message="Q&A must be separated by TAB/ENTER key.")
|
||||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
||||||
d = beAdoc(d, arr[0], arr[1], not any(
|
d = beAdoc(d, arr[0], arr[1], not any(
|
||||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||||
|
|
||||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||||
d["q_%d_vec" % len(v)] = v.tolist()
|
d["q_%d_vec" % len(v)] = v.tolist()
|
||||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
settings.docStoreConn.update({"id": req["chunk_id"]}, d, search.index_name(tenant_id), doc.kb_id)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/switch', methods=['POST'])
|
@manager.route('/switch', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("chunk_ids", "available_int", "doc_id")
|
@validate_request("chunk_ids", "available_int", "doc_id")
|
||||||
def switch():
|
def switch():
|
||||||
req = request.json
|
req = request.json
|
||||||
try:
|
try:
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
if not ELASTICSEARCH.upsert([{"id": i, "available_int": int(req["available_int"])} for i in req["chunk_ids"]],
|
for cid in req["chunk_ids"]:
|
||||||
search.index_name(tenant_id)):
|
if not settings.docStoreConn.update({"id": cid},
|
||||||
return get_data_error_result(retmsg="Index updating failure")
|
{"available_int": int(req["available_int"])},
|
||||||
|
search.index_name(DocumentService.get_tenant_id(req["doc_id"])),
|
||||||
|
doc.kb_id):
|
||||||
|
return get_data_error_result(message="Index updating failure")
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("chunk_ids", "doc_id")
|
@validate_request("chunk_ids", "doc_id")
|
||||||
def rm():
|
def rm():
|
||||||
req = request.json
|
req = request.json
|
||||||
try:
|
try:
|
||||||
if not ELASTICSEARCH.deleteByQuery(
|
|
||||||
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
|
||||||
return get_data_error_result(retmsg="Index updating failure")
|
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
|
if not settings.docStoreConn.delete({"id": req["chunk_ids"]}, search.index_name(current_user.id), doc.kb_id):
|
||||||
|
return get_data_error_result(message="Index updating failure")
|
||||||
deleted_chunk_ids = req["chunk_ids"]
|
deleted_chunk_ids = req["chunk_ids"]
|
||||||
chunk_number = len(deleted_chunk_ids)
|
chunk_number = len(deleted_chunk_ids)
|
||||||
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||||
@ -205,41 +203,48 @@ def rm():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['POST'])
|
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "content_with_weight")
|
@validate_request("doc_id", "content_with_weight")
|
||||||
def create():
|
def create():
|
||||||
req = request.json
|
req = request.json
|
||||||
md5 = hashlib.md5()
|
chunck_id = xxhash.xxh64((req["content_with_weight"] + req["doc_id"]).encode("utf-8")).hexdigest()
|
||||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
|
||||||
chunck_id = md5.hexdigest()
|
|
||||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||||
"content_with_weight": req["content_with_weight"]}
|
"content_with_weight": req["content_with_weight"]}
|
||||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||||
d["important_kwd"] = req.get("important_kwd", [])
|
d["important_kwd"] = req.get("important_kwd", [])
|
||||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||||
|
d["question_kwd"] = req.get("question_kwd", [])
|
||||||
|
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req.get("question_kwd", [])))
|
||||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
d["kb_id"] = [doc.kb_id]
|
d["kb_id"] = [doc.kb_id]
|
||||||
d["docnm_kwd"] = doc.name
|
d["docnm_kwd"] = doc.name
|
||||||
|
d["title_tks"] = rag_tokenizer.tokenize(doc.name)
|
||||||
d["doc_id"] = doc.id
|
d["doc_id"] = doc.id
|
||||||
|
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="Knowledgebase not found!")
|
||||||
|
if kb.pagerank:
|
||||||
|
d["pagerank_fea"] = kb.pagerank
|
||||||
|
|
||||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||||
|
|
||||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||||
v = 0.1 * v[0] + 0.9 * v[1]
|
v = 0.1 * v[0] + 0.9 * v[1]
|
||||||
d["q_%d_vec" % len(v)] = v.tolist()
|
d["q_%d_vec" % len(v)] = v.tolist()
|
||||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
settings.docStoreConn.insert([d], search.index_name(tenant_id), doc.kb_id)
|
||||||
|
|
||||||
DocumentService.increment_chunk_num(
|
DocumentService.increment_chunk_num(
|
||||||
doc.id, doc.kb_id, c, 1, 0)
|
doc.id, doc.kb_id, c, 1, 0)
|
||||||
@ -248,7 +253,7 @@ def create():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/retrieval_test', methods=['POST'])
|
@manager.route('/retrieval_test', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id", "question")
|
@validate_request("kb_id", "question")
|
||||||
def retrieval_test():
|
def retrieval_test():
|
||||||
@ -256,28 +261,31 @@ def retrieval_test():
|
|||||||
page = int(req.get("page", 1))
|
page = int(req.get("page", 1))
|
||||||
size = int(req.get("size", 30))
|
size = int(req.get("size", 30))
|
||||||
question = req["question"]
|
question = req["question"]
|
||||||
kb_id = req["kb_id"]
|
kb_ids = req["kb_id"]
|
||||||
if isinstance(kb_id, str): kb_id = [kb_id]
|
if isinstance(kb_ids, str):
|
||||||
|
kb_ids = [kb_ids]
|
||||||
doc_ids = req.get("doc_ids", [])
|
doc_ids = req.get("doc_ids", [])
|
||||||
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
||||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||||
top = int(req.get("top_k", 1024))
|
top = int(req.get("top_k", 1024))
|
||||||
|
tenant_ids = []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
for kid in kb_id:
|
for kb_id in kb_ids:
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
if KnowledgebaseService.query(
|
if KnowledgebaseService.query(
|
||||||
tenant_id=tenant.tenant_id, id=kid):
|
tenant_id=tenant.tenant_id, id=kb_id):
|
||||||
|
tenant_ids.append(tenant.tenant_id)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id[0])
|
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
return get_data_error_result(message="Knowledgebase not found!")
|
||||||
|
|
||||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||||
|
|
||||||
@ -289,39 +297,57 @@ def retrieval_test():
|
|||||||
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
||||||
question += keyword_extraction(chat_mdl, question)
|
question += keyword_extraction(chat_mdl, question)
|
||||||
|
|
||||||
retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
|
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||||
ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_id, page, size,
|
ranks = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
|
||||||
similarity_threshold, vector_similarity_weight, top,
|
similarity_threshold, vector_similarity_weight, top,
|
||||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
|
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
|
||||||
for c in ranks["chunks"]:
|
for c in ranks["chunks"]:
|
||||||
if "vector" in c:
|
c.pop("vector", None)
|
||||||
del c["vector"]
|
|
||||||
|
|
||||||
return get_json_result(data=ranks)
|
return get_json_result(data=ranks)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e).find("not_found") > 0:
|
if str(e).find("not_found") > 0:
|
||||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||||
retcode=RetCode.DATA_ERROR)
|
code=settings.RetCode.DATA_ERROR)
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/knowledge_graph', methods=['GET'])
|
@manager.route('/knowledge_graph', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def knowledge_graph():
|
def knowledge_graph():
|
||||||
doc_id = request.args["doc_id"]
|
doc_id = request.args["doc_id"]
|
||||||
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||||
req = {
|
req = {
|
||||||
"doc_ids":[doc_id],
|
"doc_ids": [doc_id],
|
||||||
"knowledge_graph_kwd": ["graph", "mind_map"]
|
"knowledge_graph_kwd": ["graph", "mind_map"]
|
||||||
}
|
}
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
sres = settings.retrievaler.search(req, search.index_name(tenant_id), kb_ids)
|
||||||
sres = retrievaler.search(req, search.index_name(tenant_id))
|
|
||||||
obj = {"graph": {}, "mind_map": {}}
|
obj = {"graph": {}, "mind_map": {}}
|
||||||
for id in sres.ids[:2]:
|
for id in sres.ids[:2]:
|
||||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||||
try:
|
try:
|
||||||
obj[ty] = json.loads(sres.field[id]["content_with_weight"])
|
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||||
except Exception as e:
|
except Exception:
|
||||||
print(traceback.format_exc(), flush=True)
|
continue
|
||||||
|
|
||||||
|
if ty == 'mind_map':
|
||||||
|
node_dict = {}
|
||||||
|
|
||||||
|
def repeat_deal(content_json, node_dict):
|
||||||
|
if 'id' in content_json:
|
||||||
|
if content_json['id'] in node_dict:
|
||||||
|
node_name = content_json['id']
|
||||||
|
content_json['id'] += f"({node_dict[content_json['id']]})"
|
||||||
|
node_dict[node_name] += 1
|
||||||
|
else:
|
||||||
|
node_dict[content_json['id']] = 1
|
||||||
|
if 'children' in content_json and content_json['children']:
|
||||||
|
for item in content_json['children']:
|
||||||
|
repeat_deal(item, node_dict)
|
||||||
|
|
||||||
|
repeat_deal(content_json, node_dict)
|
||||||
|
|
||||||
|
obj[ty] = content_json
|
||||||
|
|
||||||
return get_json_result(data=obj)
|
return get_json_result(data=obj)
|
||||||
|
|
||||||
|
|||||||
@ -17,22 +17,23 @@ import json
|
|||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from api.db.db_models import APIToken
|
||||||
|
|
||||||
|
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||||
from api.db.services.user_service import UserTenantService
|
from api.db.services.user_service import UserTenantService
|
||||||
from flask import request, Response
|
from flask import request, Response
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
|
from api.db.services.dialog_service import DialogService, chat, ask
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
||||||
from api.settings import RetCode, retrievaler
|
from api import settings
|
||||||
from api.utils import get_uuid
|
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from graphrag.mind_map_extractor import MindMapExtractor
|
from graphrag.mind_map_extractor import MindMapExtractor
|
||||||
|
|
||||||
|
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||||
@manager.route('/set', methods=['POST'])
|
|
||||||
@login_required
|
@login_required
|
||||||
def set_conversation():
|
def set_conversation():
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -43,11 +44,11 @@ def set_conversation():
|
|||||||
del req["conversation_id"]
|
del req["conversation_id"]
|
||||||
try:
|
try:
|
||||||
if not ConversationService.update_by_id(conv_id, req):
|
if not ConversationService.update_by_id(conv_id, req):
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
e, conv = ConversationService.get_by_id(conv_id)
|
e, conv = ConversationService.get_by_id(conv_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Fail to update a conversation!")
|
message="Fail to update a conversation!")
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -56,7 +57,7 @@ def set_conversation():
|
|||||||
try:
|
try:
|
||||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found")
|
return get_data_error_result(message="Dialog not found")
|
||||||
conv = {
|
conv = {
|
||||||
"id": conv_id,
|
"id": conv_id,
|
||||||
"dialog_id": req["dialog_id"],
|
"dialog_id": req["dialog_id"],
|
||||||
@ -66,36 +67,78 @@ def set_conversation():
|
|||||||
ConversationService.save(**conv)
|
ConversationService.save(**conv)
|
||||||
e, conv = ConversationService.get_by_id(conv["id"])
|
e, conv = ConversationService.get_by_id(conv["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Fail to new a conversation!")
|
return get_data_error_result(message="Fail to new a conversation!")
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET'])
|
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get():
|
def get():
|
||||||
conv_id = request.args["conversation_id"]
|
conv_id = request.args["conversation_id"]
|
||||||
try:
|
try:
|
||||||
|
|
||||||
e, conv = ConversationService.get_by_id(conv_id)
|
e, conv = ConversationService.get_by_id(conv_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
|
avatar =None
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
||||||
|
if dialog and len(dialog)>0:
|
||||||
|
avatar = dialog[0].icon
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
data=False, message='Only owner of conversation authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
def get_value(d, k1, k2):
|
||||||
|
return d.get(k1, d.get(k2))
|
||||||
|
|
||||||
|
for ref in conv.reference:
|
||||||
|
if isinstance(ref, list):
|
||||||
|
continue
|
||||||
|
ref["chunks"] = [{
|
||||||
|
"id": get_value(ck, "chunk_id", "id"),
|
||||||
|
"content": get_value(ck, "content", "content_with_weight"),
|
||||||
|
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||||
|
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||||
|
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||||
|
"image_id": get_value(ck, "image_id", "img_id"),
|
||||||
|
"positions": get_value(ck, "positions", "position_int"),
|
||||||
|
} for ck in ref.get("chunks", [])]
|
||||||
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
|
conv["avatar"]=avatar
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
@manager.route('/getsse/<dialog_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||||
|
def getsse(dialog_id):
|
||||||
|
|
||||||
|
token = request.headers.get('Authorization').split()
|
||||||
|
if len(token) != 2:
|
||||||
|
return get_data_error_result(message='Authorization is not valid!"')
|
||||||
|
token = token[1]
|
||||||
|
objs = APIToken.query(beta=token)
|
||||||
|
if not objs:
|
||||||
|
return get_data_error_result(message='Token is not valid!"')
|
||||||
|
try:
|
||||||
|
e, conv = DialogService.get_by_id(dialog_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="Dialog not found!")
|
||||||
|
conv = conv.to_dict()
|
||||||
|
conv["avatar"]= conv["icon"]
|
||||||
|
del conv["icon"]
|
||||||
|
return get_json_result(data=conv)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def rm():
|
def rm():
|
||||||
conv_ids = request.json["conversation_ids"]
|
conv_ids = request.json["conversation_ids"]
|
||||||
@ -103,48 +146,46 @@ def rm():
|
|||||||
for cid in conv_ids:
|
for cid in conv_ids:
|
||||||
exist, conv = ConversationService.get_by_id(cid)
|
exist, conv = ConversationService.get_by_id(cid)
|
||||||
if not exist:
|
if not exist:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
data=False, message='Only owner of conversation authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
ConversationService.delete_by_id(cid)
|
ConversationService.delete_by_id(cid)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_convsersation():
|
def list_convsersation():
|
||||||
dialog_id = request.args["dialog_id"]
|
dialog_id = request.args["dialog_id"]
|
||||||
try:
|
try:
|
||||||
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
data=False, message='Only owner of dialog authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
convs = ConversationService.query(
|
convs = ConversationService.query(
|
||||||
dialog_id=dialog_id,
|
dialog_id=dialog_id,
|
||||||
order_by=ConversationService.model.create_time,
|
order_by=ConversationService.model.create_time,
|
||||||
reverse=True)
|
reverse=True)
|
||||||
|
|
||||||
convs = [d.to_dict() for d in convs]
|
convs = [d.to_dict() for d in convs]
|
||||||
return get_json_result(data=convs)
|
return get_json_result(data=convs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion', methods=['POST'])
|
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "messages")
|
@validate_request("conversation_id", "messages")
|
||||||
def completion():
|
def completion():
|
||||||
req = request.json
|
req = request.json
|
||||||
# req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
|
||||||
# {"role": "user", "content": "上海有吗?"}
|
|
||||||
# ]}
|
|
||||||
msg = []
|
msg = []
|
||||||
for m in req["messages"]:
|
for m in req["messages"]:
|
||||||
if m["role"] == "system":
|
if m["role"] == "system":
|
||||||
@ -156,41 +197,49 @@ def completion():
|
|||||||
try:
|
try:
|
||||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
conv.message = deepcopy(req["messages"])
|
conv.message = deepcopy(req["messages"])
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
del req["conversation_id"]
|
del req["conversation_id"]
|
||||||
del req["messages"]
|
del req["messages"]
|
||||||
|
|
||||||
if not conv.reference:
|
if not conv.reference:
|
||||||
conv.reference = []
|
conv.reference = []
|
||||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
else:
|
||||||
|
def get_value(d, k1, k2):
|
||||||
|
return d.get(k1, d.get(k2))
|
||||||
|
|
||||||
|
for ref in conv.reference:
|
||||||
|
if isinstance(ref, list):
|
||||||
|
continue
|
||||||
|
ref["chunks"] = [{
|
||||||
|
"id": get_value(ck, "chunk_id", "id"),
|
||||||
|
"content": get_value(ck, "content", "content_with_weight"),
|
||||||
|
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||||
|
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||||
|
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||||
|
"image_id": get_value(ck, "image_id", "img_id"),
|
||||||
|
"positions": get_value(ck, "positions", "position_int"),
|
||||||
|
} for ck in ref.get("chunks", [])]
|
||||||
|
|
||||||
|
if not conv.reference:
|
||||||
|
conv.reference = []
|
||||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
def fillin_conv(ans):
|
|
||||||
nonlocal conv, message_id
|
|
||||||
if not conv.reference:
|
|
||||||
conv.reference.append(ans["reference"])
|
|
||||||
else:
|
|
||||||
conv.reference[-1] = ans["reference"]
|
|
||||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
|
||||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
|
||||||
ans["id"] = message_id
|
|
||||||
|
|
||||||
def stream():
|
def stream():
|
||||||
nonlocal dia, msg, req, conv
|
nonlocal dia, msg, req, conv
|
||||||
try:
|
try:
|
||||||
for ans in chat(dia, msg, True, **req):
|
for ans in chat(dia, msg, True, **req):
|
||||||
fillin_conv(ans)
|
ans = structure_answer(conv, ans, message_id, conv.id)
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
traceback.print_exc()
|
||||||
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
if req.get("stream", True):
|
if req.get("stream", True):
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
@ -203,8 +252,7 @@ def completion():
|
|||||||
else:
|
else:
|
||||||
answer = None
|
answer = None
|
||||||
for ans in chat(dia, msg, **req):
|
for ans in chat(dia, msg, **req):
|
||||||
answer = ans
|
answer = structure_answer(conv, ans, message_id, req["conversation_id"])
|
||||||
fillin_conv(ans)
|
|
||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
break
|
break
|
||||||
return get_json_result(data=answer)
|
return get_json_result(data=answer)
|
||||||
@ -212,19 +260,19 @@ def completion():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/tts', methods=['POST'])
|
@manager.route('/tts', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def tts():
|
def tts():
|
||||||
req = request.json
|
req = request.json
|
||||||
text = req["text"]
|
text = req["text"]
|
||||||
|
|
||||||
tenants = TenantService.get_by_user_id(current_user.id)
|
tenants = TenantService.get_info_by(current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
tts_id = tenants[0]["tts_id"]
|
tts_id = tenants[0]["tts_id"]
|
||||||
if not tts_id:
|
if not tts_id:
|
||||||
return get_data_error_result(retmsg="No default TTS model is set")
|
return get_data_error_result(message="No default TTS model is set")
|
||||||
|
|
||||||
tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
|
tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
|
||||||
|
|
||||||
@ -234,7 +282,7 @@ def tts():
|
|||||||
for chunk in tts_mdl.tts(txt):
|
for chunk in tts_mdl.tts(txt):
|
||||||
yield chunk
|
yield chunk
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield ("data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e)}},
|
"data": {"answer": "**ERROR**: " + str(e)}},
|
||||||
ensure_ascii=False)).encode('utf-8')
|
ensure_ascii=False)).encode('utf-8')
|
||||||
|
|
||||||
@ -246,14 +294,14 @@ def tts():
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/delete_msg', methods=['POST'])
|
@manager.route('/delete_msg', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "message_id")
|
@validate_request("conversation_id", "message_id")
|
||||||
def delete_msg():
|
def delete_msg():
|
||||||
req = request.json
|
req = request.json
|
||||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
for i, msg in enumerate(conv["message"]):
|
for i, msg in enumerate(conv["message"]):
|
||||||
@ -269,14 +317,14 @@ def delete_msg():
|
|||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/thumbup', methods=['POST'])
|
@manager.route('/thumbup', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "message_id")
|
@validate_request("conversation_id", "message_id")
|
||||||
def thumbup():
|
def thumbup():
|
||||||
req = request.json
|
req = request.json
|
||||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
up_down = req.get("set")
|
up_down = req.get("set")
|
||||||
feedback = req.get("feedback", "")
|
feedback = req.get("feedback", "")
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
@ -284,32 +332,35 @@ def thumbup():
|
|||||||
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
||||||
if up_down:
|
if up_down:
|
||||||
msg["thumbup"] = True
|
msg["thumbup"] = True
|
||||||
if "feedback" in msg: del msg["feedback"]
|
if "feedback" in msg:
|
||||||
|
del msg["feedback"]
|
||||||
else:
|
else:
|
||||||
msg["thumbup"] = False
|
msg["thumbup"] = False
|
||||||
if feedback: msg["feedback"] = feedback
|
if feedback:
|
||||||
|
msg["feedback"] = feedback
|
||||||
break
|
break
|
||||||
|
|
||||||
ConversationService.update_by_id(conv["id"], conv)
|
ConversationService.update_by_id(conv["id"], conv)
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/ask', methods=['POST'])
|
@manager.route('/ask', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question", "kb_ids")
|
@validate_request("question", "kb_ids")
|
||||||
def ask_about():
|
def ask_about():
|
||||||
req = request.json
|
req = request.json
|
||||||
uid = current_user.id
|
uid = current_user.id
|
||||||
|
|
||||||
def stream():
|
def stream():
|
||||||
nonlocal req, uid
|
nonlocal req, uid
|
||||||
try:
|
try:
|
||||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
resp.headers.add_header("Cache-control", "no-cache")
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
@ -319,7 +370,7 @@ def ask_about():
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/mindmap', methods=['POST'])
|
@manager.route('/mindmap', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question", "kb_ids")
|
@validate_request("question", "kb_ids")
|
||||||
def mindmap():
|
def mindmap():
|
||||||
@ -327,13 +378,13 @@ def mindmap():
|
|||||||
kb_ids = req["kb_ids"]
|
kb_ids = req["kb_ids"]
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
return get_data_error_result(message="Knowledgebase not found!")
|
||||||
|
|
||||||
embd_mdl = TenantLLMService.model_instance(
|
embd_mdl = TenantLLMService.model_instance(
|
||||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||||
ranks = retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
ranks = settings.retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||||
0.3, 0.3, aggs=False)
|
0.3, 0.3, aggs=False)
|
||||||
mindmap = MindMapExtractor(chat_mdl)
|
mindmap = MindMapExtractor(chat_mdl)
|
||||||
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
||||||
if "error" in mind_map:
|
if "error" in mind_map:
|
||||||
@ -341,7 +392,7 @@ def mindmap():
|
|||||||
return get_json_result(data=mind_map)
|
return get_json_result(data=mind_map)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/related_questions', methods=['POST'])
|
@manager.route('/related_questions', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question")
|
@validate_request("question")
|
||||||
def related_questions():
|
def related_questions():
|
||||||
|
|||||||
@ -1,880 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import os
|
|
||||||
import pathlib
|
|
||||||
import re
|
|
||||||
import warnings
|
|
||||||
from functools import partial
|
|
||||||
from io import BytesIO
|
|
||||||
|
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
from flask import request, send_file
|
|
||||||
from flask_login import login_required, current_user
|
|
||||||
from httpx import HTTPError
|
|
||||||
|
|
||||||
from api.contants import NAME_LENGTH_LIMIT
|
|
||||||
from api.db import FileType, ParserType, FileSource, TaskStatus
|
|
||||||
from api.db import StatusEnum
|
|
||||||
from api.db.db_models import File
|
|
||||||
from api.db.services import duplicate_name
|
|
||||||
from api.db.services.document_service import DocumentService
|
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
|
||||||
from api.db.services.file_service import FileService
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
|
||||||
from api.db.services.user_service import TenantService
|
|
||||||
from api.settings import RetCode
|
|
||||||
from api.utils import get_uuid
|
|
||||||
from api.utils.api_utils import construct_json_result, construct_error_response
|
|
||||||
from api.utils.api_utils import construct_result, validate_request
|
|
||||||
from api.utils.file_utils import filename_type, thumbnail
|
|
||||||
from rag.app import book, laws, manual, naive, one, paper, presentation, qa, resume, table, picture, audio, email
|
|
||||||
from rag.nlp import search
|
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
|
||||||
|
|
||||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------ create a dataset ---------------------------------------
|
|
||||||
|
|
||||||
@manager.route("/", methods=["POST"])
|
|
||||||
@login_required # use login
|
|
||||||
@validate_request("name") # check name key
|
|
||||||
def create_dataset():
|
|
||||||
# Check if Authorization header is present
|
|
||||||
authorization_token = request.headers.get("Authorization")
|
|
||||||
if not authorization_token:
|
|
||||||
return construct_json_result(code=RetCode.AUTHENTICATION_ERROR, message="Authorization header is missing.")
|
|
||||||
|
|
||||||
# TODO: Login or API key
|
|
||||||
# objs = APIToken.query(token=authorization_token)
|
|
||||||
#
|
|
||||||
# # Authorization error
|
|
||||||
# if not objs:
|
|
||||||
# return construct_json_result(code=RetCode.AUTHENTICATION_ERROR, message="Token is invalid.")
|
|
||||||
#
|
|
||||||
# tenant_id = objs[0].tenant_id
|
|
||||||
|
|
||||||
tenant_id = current_user.id
|
|
||||||
request_body = request.json
|
|
||||||
|
|
||||||
# In case that there's no name
|
|
||||||
if "name" not in request_body:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Expected 'name' field in request body")
|
|
||||||
|
|
||||||
dataset_name = request_body["name"]
|
|
||||||
|
|
||||||
# empty dataset_name
|
|
||||||
if not dataset_name:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Empty dataset name")
|
|
||||||
|
|
||||||
# In case that there's space in the head or the tail
|
|
||||||
dataset_name = dataset_name.strip()
|
|
||||||
|
|
||||||
# In case that the length of the name exceeds the limit
|
|
||||||
dataset_name_length = len(dataset_name)
|
|
||||||
if dataset_name_length > NAME_LENGTH_LIMIT:
|
|
||||||
return construct_json_result(
|
|
||||||
code=RetCode.DATA_ERROR,
|
|
||||||
message=f"Dataset name: {dataset_name} with length {dataset_name_length} exceeds {NAME_LENGTH_LIMIT}!")
|
|
||||||
|
|
||||||
# In case that there are other fields in the data-binary
|
|
||||||
if len(request_body.keys()) > 1:
|
|
||||||
name_list = []
|
|
||||||
for key_name in request_body.keys():
|
|
||||||
if key_name != "name":
|
|
||||||
name_list.append(key_name)
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"fields: {name_list}, are not allowed in request body.")
|
|
||||||
|
|
||||||
# If there is a duplicate name, it will modify it to make it unique
|
|
||||||
request_body["name"] = duplicate_name(
|
|
||||||
KnowledgebaseService.query,
|
|
||||||
name=dataset_name,
|
|
||||||
tenant_id=tenant_id,
|
|
||||||
status=StatusEnum.VALID.value)
|
|
||||||
try:
|
|
||||||
request_body["id"] = get_uuid()
|
|
||||||
request_body["tenant_id"] = tenant_id
|
|
||||||
request_body["created_by"] = tenant_id
|
|
||||||
exist, t = TenantService.get_by_id(tenant_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_result(code=RetCode.AUTHENTICATION_ERROR, message="Tenant not found.")
|
|
||||||
request_body["embd_id"] = t.embd_id
|
|
||||||
if not KnowledgebaseService.save(**request_body):
|
|
||||||
# failed to create new dataset
|
|
||||||
return construct_result()
|
|
||||||
return construct_json_result(code=RetCode.SUCCESS,
|
|
||||||
data={"dataset_name": request_body["name"], "dataset_id": request_body["id"]})
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------list datasets-------------------------------------------------------
|
|
||||||
|
|
||||||
@manager.route("/", methods=["GET"])
|
|
||||||
@login_required
|
|
||||||
def list_datasets():
|
|
||||||
offset = request.args.get("offset", 0)
|
|
||||||
count = request.args.get("count", -1)
|
|
||||||
orderby = request.args.get("orderby", "create_time")
|
|
||||||
desc = request.args.get("desc", True)
|
|
||||||
try:
|
|
||||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
|
||||||
datasets = KnowledgebaseService.get_by_tenant_ids_by_offset(
|
|
||||||
[m["tenant_id"] for m in tenants], current_user.id, int(offset), int(count), orderby, desc)
|
|
||||||
return construct_json_result(data=datasets, code=RetCode.SUCCESS, message=f"List datasets successfully!")
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
except HTTPError as http_err:
|
|
||||||
return construct_json_result(http_err)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------delete a dataset ----------------------------
|
|
||||||
|
|
||||||
@manager.route("/<dataset_id>", methods=["DELETE"])
|
|
||||||
@login_required
|
|
||||||
def remove_dataset(dataset_id):
|
|
||||||
try:
|
|
||||||
datasets = KnowledgebaseService.query(created_by=current_user.id, id=dataset_id)
|
|
||||||
|
|
||||||
# according to the id, searching for the dataset
|
|
||||||
if not datasets:
|
|
||||||
return construct_json_result(message=f"The dataset cannot be found for your current account.",
|
|
||||||
code=RetCode.OPERATING_ERROR)
|
|
||||||
|
|
||||||
# Iterating the documents inside the dataset
|
|
||||||
for doc in DocumentService.query(kb_id=dataset_id):
|
|
||||||
if not DocumentService.remove_document(doc, datasets[0].tenant_id):
|
|
||||||
# the process of deleting failed
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message="There was an error during the document removal process. "
|
|
||||||
"Please check the status of the RAGFlow server and try the removal again.")
|
|
||||||
# delete the other files
|
|
||||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
|
||||||
File2DocumentService.delete_by_document_id(doc.id)
|
|
||||||
|
|
||||||
# delete the dataset
|
|
||||||
if not KnowledgebaseService.delete_by_id(dataset_id):
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message="There was an error during the dataset removal process. "
|
|
||||||
"Please check the status of the RAGFlow server and try the removal again.")
|
|
||||||
# success
|
|
||||||
return construct_json_result(code=RetCode.SUCCESS, message=f"Remove dataset: {dataset_id} successfully")
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------ get details of a dataset ----------------------------------------
|
|
||||||
|
|
||||||
@manager.route("/<dataset_id>", methods=["GET"])
|
|
||||||
@login_required
|
|
||||||
def get_dataset(dataset_id):
|
|
||||||
try:
|
|
||||||
dataset = KnowledgebaseService.get_detail(dataset_id)
|
|
||||||
if not dataset:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Can't find this dataset!")
|
|
||||||
return construct_json_result(data=dataset, code=RetCode.SUCCESS)
|
|
||||||
except Exception as e:
|
|
||||||
return construct_json_result(e)
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------ update a dataset --------------------------------------------
|
|
||||||
|
|
||||||
@manager.route("/<dataset_id>", methods=["PUT"])
|
|
||||||
@login_required
|
|
||||||
def update_dataset(dataset_id):
|
|
||||||
req = request.json
|
|
||||||
try:
|
|
||||||
# the request cannot be empty
|
|
||||||
if not req:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Please input at least one parameter that "
|
|
||||||
"you want to update!")
|
|
||||||
# check whether the dataset can be found
|
|
||||||
if not KnowledgebaseService.query(created_by=current_user.id, id=dataset_id):
|
|
||||||
return construct_json_result(message=f"Only the owner of knowledgebase is authorized for this operation!",
|
|
||||||
code=RetCode.OPERATING_ERROR)
|
|
||||||
|
|
||||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
# check whether there is this dataset
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message="This dataset cannot be found!")
|
|
||||||
|
|
||||||
if "name" in req:
|
|
||||||
name = req["name"].strip()
|
|
||||||
# check whether there is duplicate name
|
|
||||||
if name.lower() != dataset.name.lower() \
|
|
||||||
and len(KnowledgebaseService.query(name=name, tenant_id=current_user.id,
|
|
||||||
status=StatusEnum.VALID.value)) > 1:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"The name: {name.lower()} is already used by other "
|
|
||||||
f"datasets. Please choose a different name.")
|
|
||||||
|
|
||||||
dataset_updating_data = {}
|
|
||||||
chunk_num = req.get("chunk_num")
|
|
||||||
# modify the value of 11 parameters
|
|
||||||
|
|
||||||
# 2 parameters: embedding id and chunk method
|
|
||||||
# only if chunk_num is 0, the user can update the embedding id
|
|
||||||
if req.get("embedding_model_id"):
|
|
||||||
if chunk_num == 0:
|
|
||||||
dataset_updating_data["embd_id"] = req["embedding_model_id"]
|
|
||||||
else:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message="You have already parsed the document in this "
|
|
||||||
"dataset, so you cannot change the embedding "
|
|
||||||
"model.")
|
|
||||||
# only if chunk_num is 0, the user can update the chunk_method
|
|
||||||
if "chunk_method" in req:
|
|
||||||
type_value = req["chunk_method"]
|
|
||||||
if is_illegal_value_for_enum(type_value, ParserType):
|
|
||||||
return construct_json_result(message=f"Illegal value {type_value} for 'chunk_method' field.",
|
|
||||||
code=RetCode.DATA_ERROR)
|
|
||||||
if chunk_num != 0:
|
|
||||||
construct_json_result(code=RetCode.DATA_ERROR, message="You have already parsed the document "
|
|
||||||
"in this dataset, so you cannot "
|
|
||||||
"change the chunk method.")
|
|
||||||
dataset_updating_data["parser_id"] = req["template_type"]
|
|
||||||
|
|
||||||
# convert the photo parameter to avatar
|
|
||||||
if req.get("photo"):
|
|
||||||
dataset_updating_data["avatar"] = req["photo"]
|
|
||||||
|
|
||||||
# layout_recognize
|
|
||||||
if "layout_recognize" in req:
|
|
||||||
if "parser_config" not in dataset_updating_data:
|
|
||||||
dataset_updating_data['parser_config'] = {}
|
|
||||||
dataset_updating_data['parser_config']['layout_recognize'] = req['layout_recognize']
|
|
||||||
|
|
||||||
# TODO: updating use_raptor needs to construct a class
|
|
||||||
|
|
||||||
# 6 parameters
|
|
||||||
for key in ["name", "language", "description", "permission", "id", "token_num"]:
|
|
||||||
if key in req:
|
|
||||||
dataset_updating_data[key] = req.get(key)
|
|
||||||
|
|
||||||
# update
|
|
||||||
if not KnowledgebaseService.update_by_id(dataset.id, dataset_updating_data):
|
|
||||||
return construct_json_result(code=RetCode.OPERATING_ERROR, message="Failed to update! "
|
|
||||||
"Please check the status of RAGFlow "
|
|
||||||
"server and try again!")
|
|
||||||
|
|
||||||
exist, dataset = KnowledgebaseService.get_by_id(dataset.id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Failed to get the dataset "
|
|
||||||
"using the dataset ID.")
|
|
||||||
|
|
||||||
return construct_json_result(data=dataset.to_json(), code=RetCode.SUCCESS)
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------content management ----------------------------------------------
|
|
||||||
|
|
||||||
# ----------------------------upload files-----------------------------------------------------
|
|
||||||
@manager.route("/<dataset_id>/documents/", methods=["POST"])
|
|
||||||
@login_required
|
|
||||||
def upload_documents(dataset_id):
|
|
||||||
# no files
|
|
||||||
if not request.files:
|
|
||||||
return construct_json_result(
|
|
||||||
message="There is no file!", code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
# the number of uploading files exceeds the limit
|
|
||||||
file_objs = request.files.getlist("file")
|
|
||||||
num_file_objs = len(file_objs)
|
|
||||||
|
|
||||||
if num_file_objs > MAXIMUM_OF_UPLOADING_FILES:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"You try to upload {num_file_objs} files, "
|
|
||||||
f"which exceeds the maximum number of uploading files: {MAXIMUM_OF_UPLOADING_FILES}")
|
|
||||||
|
|
||||||
# no dataset
|
|
||||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(message="Can't find this dataset", code=RetCode.DATA_ERROR)
|
|
||||||
|
|
||||||
for file_obj in file_objs:
|
|
||||||
file_name = file_obj.filename
|
|
||||||
# no name
|
|
||||||
if not file_name:
|
|
||||||
return construct_json_result(
|
|
||||||
message="There is a file without name!", code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
# TODO: support the remote files
|
|
||||||
if 'http' in file_name:
|
|
||||||
return construct_json_result(code=RetCode.ARGUMENT_ERROR, message="Remote files have not unsupported.")
|
|
||||||
|
|
||||||
# get the root_folder
|
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
|
||||||
# get the id of the root_folder
|
|
||||||
parent_file_id = root_folder["id"] # document id
|
|
||||||
# this is for the new user, create '.knowledgebase' file
|
|
||||||
FileService.init_knowledgebase_docs(parent_file_id, current_user.id)
|
|
||||||
# go inside this folder, get the kb_root_folder
|
|
||||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
|
||||||
# link the file management to the kb_folder
|
|
||||||
kb_folder = FileService.new_a_file_from_kb(dataset.tenant_id, dataset.name, kb_root_folder["id"])
|
|
||||||
|
|
||||||
# grab all the errs
|
|
||||||
err = []
|
|
||||||
MAX_FILE_NUM_PER_USER = int(os.environ.get("MAX_FILE_NUM_PER_USER", 0))
|
|
||||||
uploaded_docs_json = []
|
|
||||||
for file in file_objs:
|
|
||||||
try:
|
|
||||||
# TODO: get this value from the database as some tenants have this limit while others don't
|
|
||||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(dataset.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message="Exceed the maximum file number of a free user!")
|
|
||||||
# deal with the duplicate name
|
|
||||||
filename = duplicate_name(
|
|
||||||
DocumentService.query,
|
|
||||||
name=file.filename,
|
|
||||||
kb_id=dataset.id)
|
|
||||||
|
|
||||||
# deal with the unsupported type
|
|
||||||
filetype = filename_type(filename)
|
|
||||||
if filetype == FileType.OTHER.value:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message="This type of file has not been supported yet!")
|
|
||||||
|
|
||||||
# upload to the minio
|
|
||||||
location = filename
|
|
||||||
while STORAGE_IMPL.obj_exist(dataset_id, location):
|
|
||||||
location += "_"
|
|
||||||
|
|
||||||
blob = file.read()
|
|
||||||
|
|
||||||
# the content is empty, raising a warning
|
|
||||||
if blob == b'':
|
|
||||||
warnings.warn(f"[WARNING]: The content of the file {filename} is empty.")
|
|
||||||
|
|
||||||
STORAGE_IMPL.put(dataset_id, location, blob)
|
|
||||||
|
|
||||||
doc = {
|
|
||||||
"id": get_uuid(),
|
|
||||||
"kb_id": dataset.id,
|
|
||||||
"parser_id": dataset.parser_id,
|
|
||||||
"parser_config": dataset.parser_config,
|
|
||||||
"created_by": current_user.id,
|
|
||||||
"type": filetype,
|
|
||||||
"name": filename,
|
|
||||||
"location": location,
|
|
||||||
"size": len(blob),
|
|
||||||
"thumbnail": thumbnail(filename, blob)
|
|
||||||
}
|
|
||||||
if doc["type"] == FileType.VISUAL:
|
|
||||||
doc["parser_id"] = ParserType.PICTURE.value
|
|
||||||
if doc["type"] == FileType.AURAL:
|
|
||||||
doc["parser_id"] = ParserType.AUDIO.value
|
|
||||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
|
||||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
|
||||||
if re.search(r"\.(eml)$", filename):
|
|
||||||
doc["parser_id"] = ParserType.EMAIL.value
|
|
||||||
DocumentService.insert(doc)
|
|
||||||
|
|
||||||
FileService.add_file_from_kb(doc, kb_folder["id"], dataset.tenant_id)
|
|
||||||
uploaded_docs_json.append(doc)
|
|
||||||
except Exception as e:
|
|
||||||
err.append(file.filename + ": " + str(e))
|
|
||||||
|
|
||||||
if err:
|
|
||||||
# return all the errors
|
|
||||||
return construct_json_result(message="\n".join(err), code=RetCode.SERVER_ERROR)
|
|
||||||
# success
|
|
||||||
return construct_json_result(data=uploaded_docs_json, code=RetCode.SUCCESS)
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------delete a file-----------------------------------------------------
|
|
||||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["DELETE"])
|
|
||||||
@login_required
|
|
||||||
def delete_document(document_id, dataset_id): # string
|
|
||||||
# get the root folder
|
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
|
||||||
# parent file's id
|
|
||||||
parent_file_id = root_folder["id"]
|
|
||||||
# consider the new user
|
|
||||||
FileService.init_knowledgebase_docs(parent_file_id, current_user.id)
|
|
||||||
# store all the errors that may have
|
|
||||||
errors = ""
|
|
||||||
try:
|
|
||||||
# whether there is this document
|
|
||||||
exist, doc = DocumentService.get_by_id(document_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(message=f"Document {document_id} not found!", code=RetCode.DATA_ERROR)
|
|
||||||
# whether this doc is authorized by this tenant
|
|
||||||
tenant_id = DocumentService.get_tenant_id(document_id)
|
|
||||||
if not tenant_id:
|
|
||||||
return construct_json_result(
|
|
||||||
message=f"You cannot delete this document {document_id} due to the authorization"
|
|
||||||
f" reason!", code=RetCode.AUTHENTICATION_ERROR)
|
|
||||||
|
|
||||||
# get the doc's id and location
|
|
||||||
real_dataset_id, location = File2DocumentService.get_storage_address(doc_id=document_id)
|
|
||||||
|
|
||||||
if real_dataset_id != dataset_id:
|
|
||||||
return construct_json_result(message=f"The document {document_id} is not in the dataset: {dataset_id}, "
|
|
||||||
f"but in the dataset: {real_dataset_id}.", code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
# there is an issue when removing
|
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
|
||||||
return construct_json_result(
|
|
||||||
message="There was an error during the document removal process. Please check the status of the "
|
|
||||||
"RAGFlow server and try the removal again.", code=RetCode.OPERATING_ERROR)
|
|
||||||
|
|
||||||
# fetch the File2Document record associated with the provided document ID.
|
|
||||||
file_to_doc = File2DocumentService.get_by_document_id(document_id)
|
|
||||||
# delete the associated File record.
|
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == file_to_doc[0].file_id])
|
|
||||||
# delete the File2Document record itself using the document ID. This removes the
|
|
||||||
# association between the document and the file after the File record has been deleted.
|
|
||||||
File2DocumentService.delete_by_document_id(document_id)
|
|
||||||
|
|
||||||
# delete it from minio
|
|
||||||
STORAGE_IMPL.rm(dataset_id, location)
|
|
||||||
except Exception as e:
|
|
||||||
errors += str(e)
|
|
||||||
if errors:
|
|
||||||
return construct_json_result(data=False, message=errors, code=RetCode.SERVER_ERROR)
|
|
||||||
|
|
||||||
return construct_json_result(data=True, code=RetCode.SUCCESS)
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------list files-----------------------------------------------------
|
|
||||||
@manager.route('/<dataset_id>/documents/', methods=['GET'])
|
|
||||||
@login_required
|
|
||||||
def list_documents(dataset_id):
|
|
||||||
if not dataset_id:
|
|
||||||
return construct_json_result(
|
|
||||||
data=False, message="Lack of 'dataset_id'", code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
# searching keywords
|
|
||||||
keywords = request.args.get("keywords", "")
|
|
||||||
|
|
||||||
offset = request.args.get("offset", 0)
|
|
||||||
count = request.args.get("count", -1)
|
|
||||||
order_by = request.args.get("order_by", "create_time")
|
|
||||||
descend = request.args.get("descend", True)
|
|
||||||
try:
|
|
||||||
docs, total = DocumentService.list_documents_in_dataset(dataset_id, int(offset), int(count), order_by,
|
|
||||||
descend, keywords)
|
|
||||||
|
|
||||||
return construct_json_result(data={"total": total, "docs": docs}, message=RetCode.SUCCESS)
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------update: enable rename-----------------------------------------------------
|
|
||||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["PUT"])
|
|
||||||
@login_required
|
|
||||||
def update_document(dataset_id, document_id):
|
|
||||||
req = request.json
|
|
||||||
try:
|
|
||||||
legal_parameters = set()
|
|
||||||
legal_parameters.add("name")
|
|
||||||
legal_parameters.add("enable")
|
|
||||||
legal_parameters.add("template_type")
|
|
||||||
|
|
||||||
for key in req.keys():
|
|
||||||
if key not in legal_parameters:
|
|
||||||
return construct_json_result(code=RetCode.ARGUMENT_ERROR, message=f"{key} is an illegal parameter.")
|
|
||||||
|
|
||||||
# The request body cannot be empty
|
|
||||||
if not req:
|
|
||||||
return construct_json_result(
|
|
||||||
code=RetCode.DATA_ERROR,
|
|
||||||
message="Please input at least one parameter that you want to update!")
|
|
||||||
|
|
||||||
# Check whether there is this dataset
|
|
||||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"This dataset {dataset_id} cannot be found!")
|
|
||||||
|
|
||||||
# The document does not exist
|
|
||||||
exist, document = DocumentService.get_by_id(document_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(message=f"This document {document_id} cannot be found!",
|
|
||||||
code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
# Deal with the different keys
|
|
||||||
updating_data = {}
|
|
||||||
if "name" in req:
|
|
||||||
new_name = req["name"]
|
|
||||||
updating_data["name"] = new_name
|
|
||||||
# Check whether the new_name is suitable
|
|
||||||
# 1. no name value
|
|
||||||
if not new_name:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR, message="There is no new name.")
|
|
||||||
|
|
||||||
# 2. In case that there's space in the head or the tail
|
|
||||||
new_name = new_name.strip()
|
|
||||||
|
|
||||||
# 3. Check whether the new_name has the same extension of file as before
|
|
||||||
if pathlib.Path(new_name.lower()).suffix != pathlib.Path(
|
|
||||||
document.name.lower()).suffix:
|
|
||||||
return construct_json_result(
|
|
||||||
data=False,
|
|
||||||
message="The extension of file cannot be changed",
|
|
||||||
code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
# 4. Check whether the new name has already been occupied by other file
|
|
||||||
for d in DocumentService.query(name=new_name, kb_id=document.kb_id):
|
|
||||||
if d.name == new_name:
|
|
||||||
return construct_json_result(
|
|
||||||
message="Duplicated document name in the same dataset.",
|
|
||||||
code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
if "enable" in req:
|
|
||||||
enable_value = req["enable"]
|
|
||||||
if is_illegal_value_for_enum(enable_value, StatusEnum):
|
|
||||||
return construct_json_result(message=f"Illegal value {enable_value} for 'enable' field.",
|
|
||||||
code=RetCode.DATA_ERROR)
|
|
||||||
updating_data["status"] = enable_value
|
|
||||||
|
|
||||||
# TODO: Chunk-method - update parameters inside the json object parser_config
|
|
||||||
if "template_type" in req:
|
|
||||||
type_value = req["template_type"]
|
|
||||||
if is_illegal_value_for_enum(type_value, ParserType):
|
|
||||||
return construct_json_result(message=f"Illegal value {type_value} for 'template_type' field.",
|
|
||||||
code=RetCode.DATA_ERROR)
|
|
||||||
updating_data["parser_id"] = req["template_type"]
|
|
||||||
|
|
||||||
# The process of updating
|
|
||||||
if not DocumentService.update_by_id(document_id, updating_data):
|
|
||||||
return construct_json_result(
|
|
||||||
code=RetCode.OPERATING_ERROR,
|
|
||||||
message="Failed to update document in the database! "
|
|
||||||
"Please check the status of RAGFlow server and try again!")
|
|
||||||
|
|
||||||
# name part: file service
|
|
||||||
if "name" in req:
|
|
||||||
# Get file by document id
|
|
||||||
file_information = File2DocumentService.get_by_document_id(document_id)
|
|
||||||
if file_information:
|
|
||||||
exist, file = FileService.get_by_id(file_information[0].file_id)
|
|
||||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
|
||||||
|
|
||||||
exist, document = DocumentService.get_by_id(document_id)
|
|
||||||
|
|
||||||
# Success
|
|
||||||
return construct_json_result(data=document.to_json(), message="Success", code=RetCode.SUCCESS)
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# Helper method to judge whether it's an illegal value
|
|
||||||
def is_illegal_value_for_enum(value, enum_class):
|
|
||||||
return value not in enum_class.__members__.values()
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------download a file-----------------------------------------------------
|
|
||||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["GET"])
|
|
||||||
@login_required
|
|
||||||
def download_document(dataset_id, document_id):
|
|
||||||
try:
|
|
||||||
# Check whether there is this dataset
|
|
||||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
|
||||||
|
|
||||||
# Check whether there is this document
|
|
||||||
exist, document = DocumentService.get_by_id(document_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(message=f"This document '{document_id}' cannot be found!",
|
|
||||||
code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
# The process of downloading
|
|
||||||
doc_id, doc_location = File2DocumentService.get_storage_address(doc_id=document_id) # minio address
|
|
||||||
file_stream = STORAGE_IMPL.get(doc_id, doc_location)
|
|
||||||
if not file_stream:
|
|
||||||
return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
|
|
||||||
|
|
||||||
file = BytesIO(file_stream)
|
|
||||||
|
|
||||||
# Use send_file with a proper filename and MIME type
|
|
||||||
return send_file(
|
|
||||||
file,
|
|
||||||
as_attachment=True,
|
|
||||||
download_name=document.name,
|
|
||||||
mimetype='application/octet-stream' # Set a default MIME type
|
|
||||||
)
|
|
||||||
|
|
||||||
# Error
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------start parsing a document-----------------------------------------------------
|
|
||||||
# helper method for parsing
|
|
||||||
# callback method
|
|
||||||
def doc_parse_callback(doc_id, prog=None, msg=""):
|
|
||||||
cancel = DocumentService.do_cancel(doc_id)
|
|
||||||
if cancel:
|
|
||||||
raise Exception("The parsing process has been cancelled!")
|
|
||||||
|
|
||||||
"""
|
|
||||||
def doc_parse(binary, doc_name, parser_name, tenant_id, doc_id):
|
|
||||||
match parser_name:
|
|
||||||
case "book":
|
|
||||||
book.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "laws":
|
|
||||||
laws.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "manual":
|
|
||||||
manual.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "naive":
|
|
||||||
# It's the mode by default, which is general in the front-end
|
|
||||||
naive.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "one":
|
|
||||||
one.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "paper":
|
|
||||||
paper.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "picture":
|
|
||||||
picture.chunk(doc_name, binary=binary, tenant_id=tenant_id, lang="Chinese",
|
|
||||||
callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "presentation":
|
|
||||||
presentation.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "qa":
|
|
||||||
qa.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "resume":
|
|
||||||
resume.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "table":
|
|
||||||
table.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "audio":
|
|
||||||
audio.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case "email":
|
|
||||||
email.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
|
||||||
case _:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/<dataset_id>/documents/<document_id>/status", methods=["POST"])
|
|
||||||
@login_required
|
|
||||||
def parse_document(dataset_id, document_id):
|
|
||||||
try:
|
|
||||||
# valid dataset
|
|
||||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
|
||||||
|
|
||||||
return parsing_document_internal(document_id)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------start parsing documents-----------------------------------------------------
|
|
||||||
@manager.route("/<dataset_id>/documents/status", methods=["POST"])
|
|
||||||
@login_required
|
|
||||||
def parse_documents(dataset_id):
|
|
||||||
doc_ids = request.json["doc_ids"]
|
|
||||||
try:
|
|
||||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
|
||||||
# two conditions
|
|
||||||
if not doc_ids:
|
|
||||||
# documents inside the dataset
|
|
||||||
docs, total = DocumentService.list_documents_in_dataset(dataset_id, 0, -1, "create_time",
|
|
||||||
True, "")
|
|
||||||
doc_ids = [doc["id"] for doc in docs]
|
|
||||||
|
|
||||||
message = ""
|
|
||||||
# for loop
|
|
||||||
for id in doc_ids:
|
|
||||||
res = parsing_document_internal(id)
|
|
||||||
res_body = res.json
|
|
||||||
if res_body["code"] == RetCode.SUCCESS:
|
|
||||||
message += res_body["message"]
|
|
||||||
else:
|
|
||||||
return res
|
|
||||||
return construct_json_result(data=True, code=RetCode.SUCCESS, message=message)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# helper method for parsing the document
|
|
||||||
def parsing_document_internal(id):
|
|
||||||
message = ""
|
|
||||||
try:
|
|
||||||
# Check whether there is this document
|
|
||||||
exist, document = DocumentService.get_by_id(id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(message=f"This document '{id}' cannot be found!",
|
|
||||||
code=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
tenant_id = DocumentService.get_tenant_id(id)
|
|
||||||
if not tenant_id:
|
|
||||||
return construct_json_result(message="Tenant not found!", code=RetCode.AUTHENTICATION_ERROR)
|
|
||||||
|
|
||||||
info = {"run": "1", "progress": 0}
|
|
||||||
info["progress_msg"] = ""
|
|
||||||
info["chunk_num"] = 0
|
|
||||||
info["token_num"] = 0
|
|
||||||
|
|
||||||
DocumentService.update_by_id(id, info)
|
|
||||||
|
|
||||||
ELASTICSEARCH.deleteByQuery(Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
|
||||||
|
|
||||||
_, doc_attributes = DocumentService.get_by_id(id)
|
|
||||||
doc_attributes = doc_attributes.to_dict()
|
|
||||||
doc_id = doc_attributes["id"]
|
|
||||||
|
|
||||||
bucket, doc_name = File2DocumentService.get_storage_address(doc_id=doc_id)
|
|
||||||
binary = STORAGE_IMPL.get(bucket, doc_name)
|
|
||||||
parser_name = doc_attributes["parser_id"]
|
|
||||||
if binary:
|
|
||||||
res = doc_parse(binary, doc_name, parser_name, tenant_id, doc_id)
|
|
||||||
if res is False:
|
|
||||||
message += f"The parser id: {parser_name} of the document {doc_id} is not supported; "
|
|
||||||
else:
|
|
||||||
message += f"Empty data in the document: {doc_name}; "
|
|
||||||
# failed in parsing
|
|
||||||
if doc_attributes["status"] == TaskStatus.FAIL.value:
|
|
||||||
message += f"Failed in parsing the document: {doc_id}; "
|
|
||||||
return construct_json_result(code=RetCode.SUCCESS, message=message)
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------stop parsing a doc-----------------------------------------------------
|
|
||||||
@manager.route("<dataset_id>/documents/<document_id>/status", methods=["DELETE"])
|
|
||||||
@login_required
|
|
||||||
def stop_parsing_document(dataset_id, document_id):
|
|
||||||
try:
|
|
||||||
# valid dataset
|
|
||||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
|
||||||
|
|
||||||
return stop_parsing_document_internal(document_id)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------stop parsing docs-----------------------------------------------------
|
|
||||||
@manager.route("<dataset_id>/documents/status", methods=["DELETE"])
|
|
||||||
@login_required
|
|
||||||
def stop_parsing_documents(dataset_id):
|
|
||||||
doc_ids = request.json["doc_ids"]
|
|
||||||
try:
|
|
||||||
# valid dataset?
|
|
||||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
|
||||||
if not doc_ids:
|
|
||||||
# documents inside the dataset
|
|
||||||
docs, total = DocumentService.list_documents_in_dataset(dataset_id, 0, -1, "create_time",
|
|
||||||
True, "")
|
|
||||||
doc_ids = [doc["id"] for doc in docs]
|
|
||||||
|
|
||||||
message = ""
|
|
||||||
# for loop
|
|
||||||
for id in doc_ids:
|
|
||||||
res = stop_parsing_document_internal(id)
|
|
||||||
res_body = res.json
|
|
||||||
if res_body["code"] == RetCode.SUCCESS:
|
|
||||||
message += res_body["message"]
|
|
||||||
else:
|
|
||||||
return res
|
|
||||||
return construct_json_result(data=True, code=RetCode.SUCCESS, message=message)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# Helper method
|
|
||||||
def stop_parsing_document_internal(document_id):
|
|
||||||
try:
|
|
||||||
# valid doc?
|
|
||||||
exist, doc = DocumentService.get_by_id(document_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(message=f"This document '{document_id}' cannot be found!",
|
|
||||||
code=RetCode.ARGUMENT_ERROR)
|
|
||||||
doc_attributes = doc.to_dict()
|
|
||||||
|
|
||||||
# only when the status is parsing, we need to stop it
|
|
||||||
if doc_attributes["status"] == TaskStatus.RUNNING.value:
|
|
||||||
tenant_id = DocumentService.get_tenant_id(document_id)
|
|
||||||
if not tenant_id:
|
|
||||||
return construct_json_result(message="Tenant not found!", code=RetCode.AUTHENTICATION_ERROR)
|
|
||||||
|
|
||||||
# update successfully?
|
|
||||||
if not DocumentService.update_by_id(document_id, {"status": "2"}): # cancel
|
|
||||||
return construct_json_result(
|
|
||||||
code=RetCode.OPERATING_ERROR,
|
|
||||||
message="There was an error during the stopping parsing the document process. "
|
|
||||||
"Please check the status of the RAGFlow server and try the update again."
|
|
||||||
)
|
|
||||||
|
|
||||||
_, doc_attributes = DocumentService.get_by_id(document_id)
|
|
||||||
doc_attributes = doc_attributes.to_dict()
|
|
||||||
|
|
||||||
# failed in stop parsing
|
|
||||||
if doc_attributes["status"] == TaskStatus.RUNNING.value:
|
|
||||||
return construct_json_result(message=f"Failed in parsing the document: {document_id}; ", code=RetCode.SUCCESS)
|
|
||||||
return construct_json_result(code=RetCode.SUCCESS, message="")
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
# ----------------------------show the status of the file-----------------------------------------------------
|
|
||||||
@manager.route("/<dataset_id>/documents/<document_id>/status", methods=["GET"])
|
|
||||||
@login_required
|
|
||||||
def show_parsing_status(dataset_id, document_id):
|
|
||||||
try:
|
|
||||||
# valid dataset
|
|
||||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"This dataset: '{dataset_id}' cannot be found!")
|
|
||||||
# valid document
|
|
||||||
exist, _ = DocumentService.get_by_id(document_id)
|
|
||||||
if not exist:
|
|
||||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
|
||||||
message=f"This document: '{document_id}' is not a valid document.")
|
|
||||||
|
|
||||||
_, doc = DocumentService.get_by_id(document_id) # get doc object
|
|
||||||
doc_attributes = doc.to_dict()
|
|
||||||
|
|
||||||
return construct_json_result(
|
|
||||||
data={"progress": doc_attributes["progress"], "status": TaskStatus(doc_attributes["status"]).name},
|
|
||||||
code=RetCode.SUCCESS
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
return construct_error_response(e)
|
|
||||||
|
|
||||||
# ----------------------------list the chunks of the file-----------------------------------------------------
|
|
||||||
|
|
||||||
# -- --------------------------delete the chunk-----------------------------------------------------
|
|
||||||
|
|
||||||
# ----------------------------edit the status of the chunk-----------------------------------------------------
|
|
||||||
|
|
||||||
# ----------------------------insert a new chunk-----------------------------------------------------
|
|
||||||
|
|
||||||
# ----------------------------upload a file-----------------------------------------------------
|
|
||||||
|
|
||||||
# ----------------------------get a specific chunk-----------------------------------------------------
|
|
||||||
|
|
||||||
# ----------------------------retrieval test-----------------------------------------------------
|
|
||||||
@ -20,27 +20,27 @@ from api.db.services.dialog_service import DialogService
|
|||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api.settings import RetCode
|
from api import settings
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST'])
|
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def set_dialog():
|
def set_dialog():
|
||||||
req = request.json
|
req = request.json
|
||||||
dialog_id = req.get("dialog_id")
|
dialog_id = req.get("dialog_id")
|
||||||
name = req.get("name", "New Dialog")
|
name = req.get("name", "New Dialog")
|
||||||
description = req.get("description", "A helpful Dialog")
|
description = req.get("description", "A helpful dialog")
|
||||||
icon = req.get("icon", "")
|
icon = req.get("icon", "")
|
||||||
top_n = req.get("top_n", 6)
|
top_n = req.get("top_n", 6)
|
||||||
top_k = req.get("top_k", 1024)
|
top_k = req.get("top_k", 1024)
|
||||||
rerank_id = req.get("rerank_id", "")
|
rerank_id = req.get("rerank_id", "")
|
||||||
if not rerank_id: req["rerank_id"] = ""
|
if not rerank_id:
|
||||||
|
req["rerank_id"] = ""
|
||||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||||
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
|
||||||
llm_setting = req.get("llm_setting", {})
|
llm_setting = req.get("llm_setting", {})
|
||||||
default_prompt = {
|
default_prompt = {
|
||||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||||
@ -68,17 +68,23 @@ def set_dialog():
|
|||||||
continue
|
continue
|
||||||
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
message="Parameter '{}' is not used".format(p["key"]))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, tenant = TenantService.get_by_id(current_user.id)
|
e, tenant = TenantService.get_by_id(current_user.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids"))
|
||||||
|
embd_count = len(set([kb.embd_id for kb in kbs]))
|
||||||
|
if embd_count != 1:
|
||||||
|
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
||||||
|
|
||||||
llm_id = req.get("llm_id", tenant.llm_id)
|
llm_id = req.get("llm_id", tenant.llm_id)
|
||||||
if not dialog_id:
|
if not dialog_id:
|
||||||
if not req.get("kb_ids"):
|
if not req.get("kb_ids"):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Fail! Please select knowledgebase!")
|
message="Fail! Please select knowledgebase!")
|
||||||
|
|
||||||
dia = {
|
dia = {
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
"tenant_id": current_user.id,
|
"tenant_id": current_user.id,
|
||||||
@ -96,20 +102,20 @@ def set_dialog():
|
|||||||
"icon": icon
|
"icon": icon
|
||||||
}
|
}
|
||||||
if not DialogService.save(**dia):
|
if not DialogService.save(**dia):
|
||||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
return get_data_error_result(message="Fail to new a dialog!")
|
||||||
e, dia = DialogService.get_by_id(dia["id"])
|
e, dia = DialogService.get_by_id(dia["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
return get_data_error_result(message="Fail to new a dialog!")
|
||||||
return get_json_result(data=dia.to_json())
|
return get_json_result(data=dia.to_json())
|
||||||
else:
|
else:
|
||||||
del req["dialog_id"]
|
del req["dialog_id"]
|
||||||
if "kb_names" in req:
|
if "kb_names" in req:
|
||||||
del req["kb_names"]
|
del req["kb_names"]
|
||||||
if not DialogService.update_by_id(dialog_id, req):
|
if not DialogService.update_by_id(dialog_id, req):
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
e, dia = DialogService.get_by_id(dialog_id)
|
e, dia = DialogService.get_by_id(dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Fail to update a dialog!")
|
return get_data_error_result(message="Fail to update a dialog!")
|
||||||
dia = dia.to_dict()
|
dia = dia.to_dict()
|
||||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||||
return get_json_result(data=dia)
|
return get_json_result(data=dia)
|
||||||
@ -117,14 +123,14 @@ def set_dialog():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET'])
|
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get():
|
def get():
|
||||||
dialog_id = request.args["dialog_id"]
|
dialog_id = request.args["dialog_id"]
|
||||||
try:
|
try:
|
||||||
e, dia = DialogService.get_by_id(dialog_id)
|
e, dia = DialogService.get_by_id(dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
dia = dia.to_dict()
|
dia = dia.to_dict()
|
||||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||||
return get_json_result(data=dia)
|
return get_json_result(data=dia)
|
||||||
@ -143,7 +149,7 @@ def get_kb_names(kb_ids):
|
|||||||
return ids, nms
|
return ids, nms
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_dialogs():
|
def list_dialogs():
|
||||||
try:
|
try:
|
||||||
@ -160,7 +166,7 @@ def list_dialogs():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("dialog_ids")
|
@validate_request("dialog_ids")
|
||||||
def rm():
|
def rm():
|
||||||
@ -174,8 +180,8 @@ def rm():
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
data=False, message='Only owner of dialog authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
dialog_list.append({"id": id,"status":StatusEnum.INVALID.value})
|
dialog_list.append({"id": id,"status":StatusEnum.INVALID.value})
|
||||||
DialogService.update_many_by_id(dialog_list)
|
DialogService.update_many_by_id(dialog_list)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|||||||
@ -13,63 +13,58 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License
|
# limitations under the License
|
||||||
#
|
#
|
||||||
import datetime
|
import os.path
|
||||||
import hashlib
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import pathlib
|
import pathlib
|
||||||
import re
|
import re
|
||||||
import traceback
|
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
from copy import deepcopy
|
|
||||||
from io import BytesIO
|
|
||||||
|
|
||||||
import flask
|
import flask
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
from api.db.db_models import Task, File
|
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||||
from api.db.services.dialog_service import DialogService, ConversationService
|
from rag.nlp import search
|
||||||
|
|
||||||
|
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||||
|
from api.db.db_models import File, Task
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.task_service import queue_tasks
|
||||||
from api.db.services.task_service import TaskService, queue_tasks
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
|
||||||
from graphrag.mind_map_extractor import MindMapExtractor
|
|
||||||
from rag.app import naive
|
|
||||||
from rag.nlp import search
|
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
from api.db.services import duplicate_name
|
from api.db.services import duplicate_name
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.db.services.task_service import TaskService
|
||||||
from api.utils import get_uuid
|
|
||||||
from api.db import FileType, TaskStatus, ParserType, FileSource, LLMType
|
|
||||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||||
from api.settings import RetCode, stat_logger
|
from api.utils.api_utils import (
|
||||||
|
server_error_response,
|
||||||
|
get_data_error_result,
|
||||||
|
validate_request,
|
||||||
|
)
|
||||||
|
from api.utils import get_uuid
|
||||||
|
from api import settings
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
from api.utils.file_utils import filename_type, thumbnail, get_project_base_directory
|
from api.utils.file_utils import filename_type, thumbnail, get_project_base_directory
|
||||||
from api.utils.web_utils import html2pdf, is_valid_url
|
from api.utils.web_utils import html2pdf, is_valid_url
|
||||||
|
from api.constants import IMG_BASE64_PREFIX
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/upload', methods=['POST'])
|
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id")
|
@validate_request("kb_id")
|
||||||
def upload():
|
def upload():
|
||||||
kb_id = request.form.get("kb_id")
|
kb_id = request.form.get("kb_id")
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
if 'file' not in request.files:
|
if 'file' not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
file_objs = request.files.getlist('file')
|
file_objs = request.files.getlist('file')
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
if file_obj.filename == '':
|
if file_obj.filename == '':
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
@ -78,29 +73,30 @@ def upload():
|
|||||||
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
||||||
if err:
|
if err:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
data=False, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/web_crawl', methods=['POST'])
|
@manager.route('/web_crawl', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id", "name", "url")
|
@validate_request("kb_id", "name", "url")
|
||||||
def web_crawl():
|
def web_crawl():
|
||||||
kb_id = request.form.get("kb_id")
|
kb_id = request.form.get("kb_id")
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
name = request.form.get("name")
|
name = request.form.get("name")
|
||||||
url = request.form.get("url")
|
url = request.form.get("url")
|
||||||
if not is_valid_url(url):
|
if not is_valid_url(url):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='The URL format is invalid', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='The URL format is invalid', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
raise LookupError("Can't find this knowledgebase!")
|
raise LookupError("Can't find this knowledgebase!")
|
||||||
|
|
||||||
blob = html2pdf(url)
|
blob = html2pdf(url)
|
||||||
if not blob: return server_error_response(ValueError("Download failure."))
|
if not blob:
|
||||||
|
return server_error_response(ValueError("Download failure."))
|
||||||
|
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
root_folder = FileService.get_root_folder(current_user.id)
|
||||||
pf_id = root_folder["id"]
|
pf_id = root_folder["id"]
|
||||||
@ -148,7 +144,7 @@ def web_crawl():
|
|||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['POST'])
|
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("name", "kb_id")
|
@validate_request("name", "kb_id")
|
||||||
def create():
|
def create():
|
||||||
@ -156,17 +152,17 @@ def create():
|
|||||||
kb_id = req["kb_id"]
|
kb_id = req["kb_id"]
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
|
|
||||||
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated document name in the same knowledgebase.")
|
message="Duplicated document name in the same knowledgebase.")
|
||||||
|
|
||||||
doc = DocumentService.insert({
|
doc = DocumentService.insert({
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
@ -184,13 +180,13 @@ def create():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_docs():
|
def list_docs():
|
||||||
kb_id = request.args.get("kb_id")
|
kb_id = request.args.get("kb_id")
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
if KnowledgebaseService.query(
|
if KnowledgebaseService.query(
|
||||||
@ -198,8 +194,8 @@ def list_docs():
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
keywords = request.args.get("keywords", "")
|
keywords = request.args.get("keywords", "")
|
||||||
|
|
||||||
page_number = int(request.args.get("page", 1))
|
page_number = int(request.args.get("page", 1))
|
||||||
@ -209,83 +205,108 @@ def list_docs():
|
|||||||
try:
|
try:
|
||||||
docs, tol = DocumentService.get_by_kb_id(
|
docs, tol = DocumentService.get_by_kb_id(
|
||||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||||
|
|
||||||
|
for doc_item in docs:
|
||||||
|
if doc_item['thumbnail'] and not doc_item['thumbnail'].startswith(IMG_BASE64_PREFIX):
|
||||||
|
doc_item['thumbnail'] = f"/v1/document/image/{kb_id}-{doc_item['thumbnail']}"
|
||||||
|
|
||||||
return get_json_result(data={"total": tol, "docs": docs})
|
return get_json_result(data={"total": tol, "docs": docs})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/infos', methods=['POST'])
|
@manager.route('/infos', methods=['POST']) # noqa: F821
|
||||||
|
@login_required
|
||||||
def docinfos():
|
def docinfos():
|
||||||
req = request.json
|
req = request.json
|
||||||
doc_ids = req["doc_ids"]
|
doc_ids = req["doc_ids"]
|
||||||
|
for doc_id in doc_ids:
|
||||||
|
if not DocumentService.accessible(doc_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
docs = DocumentService.get_by_ids(doc_ids)
|
docs = DocumentService.get_by_ids(doc_ids)
|
||||||
return get_json_result(data=list(docs.dicts()))
|
return get_json_result(data=list(docs.dicts()))
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/thumbnails', methods=['GET'])
|
@manager.route('/thumbnails', methods=['GET']) # noqa: F821
|
||||||
#@login_required
|
# @login_required
|
||||||
def thumbnails():
|
def thumbnails():
|
||||||
doc_ids = request.args.get("doc_ids").split(",")
|
doc_ids = request.args.get("doc_ids").split(",")
|
||||||
if not doc_ids:
|
if not doc_ids:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "Document ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='Lack of "Document ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
docs = DocumentService.get_thumbnails(doc_ids)
|
docs = DocumentService.get_thumbnails(doc_ids)
|
||||||
|
|
||||||
|
for doc_item in docs:
|
||||||
|
if doc_item['thumbnail'] and not doc_item['thumbnail'].startswith(IMG_BASE64_PREFIX):
|
||||||
|
doc_item['thumbnail'] = f"/v1/document/image/{doc_item['kb_id']}-{doc_item['thumbnail']}"
|
||||||
|
|
||||||
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/change_status', methods=['POST'])
|
@manager.route('/change_status', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "status")
|
@validate_request("doc_id", "status")
|
||||||
def change_status():
|
def change_status():
|
||||||
req = request.json
|
req = request.json
|
||||||
if str(req["status"]) not in ["0", "1"]:
|
if str(req["status"]) not in ["0", "1"]:
|
||||||
get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
retmsg='"Status" must be either 0 or 1!',
|
message='"Status" must be either 0 or 1!',
|
||||||
retcode=RetCode.ARGUMENT_ERROR)
|
code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
|
|
||||||
if not DocumentService.update_by_id(
|
if not DocumentService.update_by_id(
|
||||||
req["doc_id"], {"status": str(req["status"])}):
|
req["doc_id"], {"status": str(req["status"])}):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document update)!")
|
message="Database error (Document update)!")
|
||||||
|
|
||||||
if str(req["status"]) == "0":
|
status = int(req["status"])
|
||||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
settings.docStoreConn.update({"doc_id": req["doc_id"]}, {"available_int": status},
|
||||||
scripts="ctx._source.available_int=0;",
|
search.index_name(kb.tenant_id), doc.kb_id)
|
||||||
idxnm=search.index_name(
|
|
||||||
kb.tenant_id)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
|
||||||
scripts="ctx._source.available_int=1;",
|
|
||||||
idxnm=search.index_name(
|
|
||||||
kb.tenant_id)
|
|
||||||
)
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id")
|
@validate_request("doc_id")
|
||||||
def rm():
|
def rm():
|
||||||
req = request.json
|
req = request.json
|
||||||
doc_ids = req["doc_id"]
|
doc_ids = req["doc_id"]
|
||||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
if isinstance(doc_ids, str):
|
||||||
|
doc_ids = [doc_ids]
|
||||||
|
|
||||||
|
for doc_id in doc_ids:
|
||||||
|
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
|
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
root_folder = FileService.get_root_folder(current_user.id)
|
||||||
pf_id = root_folder["id"]
|
pf_id = root_folder["id"]
|
||||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||||
@ -294,16 +315,17 @@ def rm():
|
|||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||||
|
|
||||||
|
TaskService.filter_delete([Task.doc_id == doc_id])
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
|
|
||||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
@ -314,33 +336,43 @@ def rm():
|
|||||||
errors += str(e)
|
errors += str(e)
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
return get_json_result(data=False, message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/run', methods=['POST'])
|
@manager.route('/run', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_ids", "run")
|
@validate_request("doc_ids", "run")
|
||||||
def run():
|
def run():
|
||||||
req = request.json
|
req = request.json
|
||||||
|
for doc_id in req["doc_ids"]:
|
||||||
|
if not DocumentService.accessible(doc_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
for id in req["doc_ids"]:
|
for id in req["doc_ids"]:
|
||||||
info = {"run": str(req["run"]), "progress": 0}
|
info = {"run": str(req["run"]), "progress": 0}
|
||||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
if str(req["run"]) == TaskStatus.RUNNING.value and req.get("delete", False):
|
||||||
info["progress_msg"] = ""
|
info["progress_msg"] = ""
|
||||||
info["chunk_num"] = 0
|
info["chunk_num"] = 0
|
||||||
info["token_num"] = 0
|
info["token_num"] = 0
|
||||||
DocumentService.update_by_id(id, info)
|
DocumentService.update_by_id(id, info)
|
||||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
|
||||||
tenant_id = DocumentService.get_tenant_id(id)
|
tenant_id = DocumentService.get_tenant_id(id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
ELASTICSEARCH.deleteByQuery(
|
e, doc = DocumentService.get_by_id(id)
|
||||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
if not e:
|
||||||
|
return get_data_error_result(message="Document not found!")
|
||||||
|
if req.get("delete", False):
|
||||||
|
TaskService.filter_delete([Task.doc_id == id])
|
||||||
|
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||||
|
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), doc.kb_id)
|
||||||
|
|
||||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||||
TaskService.filter_delete([Task.doc_id == id])
|
|
||||||
e, doc = DocumentService.get_by_id(id)
|
e, doc = DocumentService.get_by_id(id)
|
||||||
doc = doc.to_dict()
|
doc = doc.to_dict()
|
||||||
doc["tenant_id"] = tenant_id
|
doc["tenant_id"] = tenant_id
|
||||||
@ -352,30 +384,36 @@ def run():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rename', methods=['POST'])
|
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "name")
|
@validate_request("doc_id", "name")
|
||||||
def rename():
|
def rename():
|
||||||
req = request.json
|
req = request.json
|
||||||
|
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||||
doc.name.lower()).suffix:
|
doc.name.lower()).suffix:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
retmsg="The extension of file can't be changed",
|
message="The extension of file can't be changed",
|
||||||
retcode=RetCode.ARGUMENT_ERROR)
|
code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||||
if d.name == req["name"]:
|
if d.name == req["name"]:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated document name in the same knowledgebase.")
|
message="Duplicated document name in the same knowledgebase.")
|
||||||
|
|
||||||
if not DocumentService.update_by_id(
|
if not DocumentService.update_by_id(
|
||||||
req["doc_id"], {"name": req["name"]}):
|
req["doc_id"], {"name": req["name"]}):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document rename)!")
|
message="Database error (Document rename)!")
|
||||||
|
|
||||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||||
if informs:
|
if informs:
|
||||||
@ -387,13 +425,13 @@ def rename():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get/<doc_id>', methods=['GET'])
|
@manager.route('/get/<doc_id>', methods=['GET']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def get(doc_id):
|
def get(doc_id):
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
|
|
||||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||||
@ -412,15 +450,22 @@ def get(doc_id):
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/change_parser', methods=['POST'])
|
@manager.route('/change_parser', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "parser_id")
|
@validate_request("doc_id", "parser_id")
|
||||||
def change_parser():
|
def change_parser():
|
||||||
req = request.json
|
req = request.json
|
||||||
|
|
||||||
|
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
if doc.parser_id.lower() == req["parser_id"].lower():
|
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||||
if "parser_config" in req:
|
if "parser_config" in req:
|
||||||
if req["parser_config"] == doc.parser_config:
|
if req["parser_config"] == doc.parser_config:
|
||||||
@ -428,37 +473,41 @@ def change_parser():
|
|||||||
else:
|
else:
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
if doc.type == FileType.VISUAL or re.search(
|
if ((doc.type == FileType.VISUAL and req["parser_id"] != "picture")
|
||||||
r"\.(ppt|pptx|pages)$", doc.name):
|
or (re.search(
|
||||||
return get_data_error_result(retmsg="Not supported yet!")
|
r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation")):
|
||||||
|
return get_data_error_result(message="Not supported yet!")
|
||||||
|
|
||||||
e = DocumentService.update_by_id(doc.id,
|
e = DocumentService.update_by_id(doc.id,
|
||||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||||
"run": TaskStatus.UNSTART.value})
|
"run": TaskStatus.UNSTART.value})
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
if "parser_config" in req:
|
if "parser_config" in req:
|
||||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||||
if doc.token_num > 0:
|
if doc.token_num > 0:
|
||||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||||
doc.process_duation * -1)
|
doc.process_duation * -1)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
ELASTICSEARCH.deleteByQuery(
|
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/image/<image_id>', methods=['GET'])
|
@manager.route('/image/<image_id>', methods=['GET']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def get_image(image_id):
|
def get_image(image_id):
|
||||||
try:
|
try:
|
||||||
|
arr = image_id.split("-")
|
||||||
|
if len(arr) != 2:
|
||||||
|
return get_data_error_result(message="Image not found.")
|
||||||
bkt, nm = image_id.split("-")
|
bkt, nm = image_id.split("-")
|
||||||
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
||||||
response.headers.set('Content-Type', 'image/JPEG')
|
response.headers.set('Content-Type', 'image/JPEG')
|
||||||
@ -467,20 +516,80 @@ def get_image(image_id):
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/upload_and_parse', methods=['POST'])
|
@manager.route('/upload_and_parse', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id")
|
@validate_request("conversation_id")
|
||||||
def upload_and_parse():
|
def upload_and_parse():
|
||||||
if 'file' not in request.files:
|
if 'file' not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
file_objs = request.files.getlist('file')
|
file_objs = request.files.getlist('file')
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
if file_obj.filename == '':
|
if file_obj.filename == '':
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
||||||
|
|
||||||
return get_json_result(data=doc_ids)
|
return get_json_result(data=doc_ids)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/parse', methods=['POST']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def parse():
|
||||||
|
url = request.json.get("url") if request.json else ""
|
||||||
|
if url:
|
||||||
|
if not is_valid_url(url):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='The URL format is invalid', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
download_path = os.path.join(get_project_base_directory(), "logs/downloads")
|
||||||
|
os.makedirs(download_path, exist_ok=True)
|
||||||
|
from seleniumwire.webdriver import Chrome, ChromeOptions
|
||||||
|
options = ChromeOptions()
|
||||||
|
options.add_argument('--headless')
|
||||||
|
options.add_argument('--disable-gpu')
|
||||||
|
options.add_argument('--no-sandbox')
|
||||||
|
options.add_argument('--disable-dev-shm-usage')
|
||||||
|
options.add_experimental_option('prefs', {
|
||||||
|
'download.default_directory': download_path,
|
||||||
|
'download.prompt_for_download': False,
|
||||||
|
'download.directory_upgrade': True,
|
||||||
|
'safebrowsing.enabled': True
|
||||||
|
})
|
||||||
|
driver = Chrome(options=options)
|
||||||
|
driver.get(url)
|
||||||
|
res_headers = [r.response.headers for r in driver.requests if r and r.response]
|
||||||
|
if len(res_headers) > 1:
|
||||||
|
sections = RAGFlowHtmlParser().parser_txt(driver.page_source)
|
||||||
|
driver.quit()
|
||||||
|
return get_json_result(data="\n".join(sections))
|
||||||
|
|
||||||
|
class File:
|
||||||
|
filename: str
|
||||||
|
filepath: str
|
||||||
|
|
||||||
|
def __init__(self, filename, filepath):
|
||||||
|
self.filename = filename
|
||||||
|
self.filepath = filepath
|
||||||
|
|
||||||
|
def read(self):
|
||||||
|
with open(self.filepath, "rb") as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
|
r = re.search(r"filename=\"([^\"]+)\"", str(res_headers))
|
||||||
|
if not r or not r.group(1):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message="Can't not identify downloaded file", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
f = File(r.group(1), os.path.join(download_path, r.group(1)))
|
||||||
|
txt = FileService.parse_docs([f], current_user.id)
|
||||||
|
return get_json_result(data=txt)
|
||||||
|
|
||||||
|
if 'file' not in request.files:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
file_objs = request.files.getlist('file')
|
||||||
|
txt = FileService.parse_docs(file_objs, current_user.id)
|
||||||
|
|
||||||
|
return get_json_result(data=txt)
|
||||||
|
|||||||
@ -13,9 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License
|
# limitations under the License
|
||||||
#
|
#
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
|
|
||||||
from api.db.db_models import File2Document
|
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
|
|
||||||
@ -26,13 +24,11 @@ from api.utils.api_utils import server_error_response, get_data_error_result, va
|
|||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.db import FileType
|
from api.db import FileType
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.settings import RetCode
|
from api import settings
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
from rag.nlp import search
|
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/convert', methods=['POST'])
|
@manager.route('/convert', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("file_ids", "kb_ids")
|
@validate_request("file_ids", "kb_ids")
|
||||||
def convert():
|
def convert():
|
||||||
@ -54,13 +50,13 @@ def convert():
|
|||||||
doc_id = inform.document_id
|
doc_id = inform.document_id
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
File2DocumentService.delete_by_file_id(id)
|
File2DocumentService.delete_by_file_id(id)
|
||||||
|
|
||||||
# insert
|
# insert
|
||||||
@ -68,11 +64,11 @@ def convert():
|
|||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
e, file = FileService.get_by_id(id)
|
e, file = FileService.get_by_id(id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this file!")
|
message="Can't find this file!")
|
||||||
|
|
||||||
doc = DocumentService.insert({
|
doc = DocumentService.insert({
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
@ -96,7 +92,7 @@ def convert():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("file_ids")
|
@validate_request("file_ids")
|
||||||
def rm():
|
def rm():
|
||||||
@ -104,26 +100,26 @@ def rm():
|
|||||||
file_ids = req["file_ids"]
|
file_ids = req["file_ids"]
|
||||||
if not file_ids:
|
if not file_ids:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "Files ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='Lack of "Files ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
try:
|
try:
|
||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
informs = File2DocumentService.get_by_file_id(file_id)
|
informs = File2DocumentService.get_by_file_id(file_id)
|
||||||
if not informs:
|
if not informs:
|
||||||
return get_data_error_result(retmsg="Inform not found!")
|
return get_data_error_result(message="Inform not found!")
|
||||||
for inform in informs:
|
for inform in informs:
|
||||||
if not inform:
|
if not inform:
|
||||||
return get_data_error_result(retmsg="Inform not found!")
|
return get_data_error_result(message="Inform not found!")
|
||||||
File2DocumentService.delete_by_file_id(file_id)
|
File2DocumentService.delete_by_file_id(file_id)
|
||||||
doc_id = inform.document_id
|
doc_id = inform.document_id
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|||||||
@ -18,7 +18,6 @@ import pathlib
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
import flask
|
import flask
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
@ -29,15 +28,13 @@ from api.utils import get_uuid
|
|||||||
from api.db import FileType, FileSource
|
from api.db import FileType, FileSource
|
||||||
from api.db.services import duplicate_name
|
from api.db.services import duplicate_name
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.settings import RetCode
|
from api import settings
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
from api.utils.file_utils import filename_type
|
from api.utils.file_utils import filename_type
|
||||||
from rag.nlp import search
|
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/upload', methods=['POST'])
|
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
# @validate_request("parent_id")
|
# @validate_request("parent_id")
|
||||||
def upload():
|
def upload():
|
||||||
@ -49,24 +46,24 @@ def upload():
|
|||||||
|
|
||||||
if 'file' not in request.files:
|
if 'file' not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
file_objs = request.files.getlist('file')
|
file_objs = request.files.getlist('file')
|
||||||
|
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
if file_obj.filename == '':
|
if file_obj.filename == '':
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
file_res = []
|
file_res = []
|
||||||
try:
|
try:
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
e, file = FileService.get_by_id(pf_id)
|
e, file = FileService.get_by_id(pf_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this folder!")
|
message="Can't find this folder!")
|
||||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Exceed the maximum file number of a free user!")
|
message="Exceed the maximum file number of a free user!")
|
||||||
|
|
||||||
# split file name path
|
# split file name path
|
||||||
if not file_obj.filename:
|
if not file_obj.filename:
|
||||||
@ -85,13 +82,13 @@ def upload():
|
|||||||
if file_len != len_id_list:
|
if file_len != len_id_list:
|
||||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
|
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names,
|
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names,
|
||||||
len_id_list)
|
len_id_list)
|
||||||
else:
|
else:
|
||||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
|
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names,
|
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names,
|
||||||
len_id_list)
|
len_id_list)
|
||||||
|
|
||||||
@ -123,7 +120,7 @@ def upload():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['POST'])
|
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("name")
|
@validate_request("name")
|
||||||
def create():
|
def create():
|
||||||
@ -137,10 +134,10 @@ def create():
|
|||||||
try:
|
try:
|
||||||
if not FileService.is_parent_folder_exist(pf_id):
|
if not FileService.is_parent_folder_exist(pf_id):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg="Parent Folder Doesn't Exist!", retcode=RetCode.OPERATING_ERROR)
|
data=False, message="Parent Folder Doesn't Exist!", code=settings.RetCode.OPERATING_ERROR)
|
||||||
if FileService.query(name=req["name"], parent_id=pf_id):
|
if FileService.query(name=req["name"], parent_id=pf_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated folder name in the same folder.")
|
message="Duplicated folder name in the same folder.")
|
||||||
|
|
||||||
if input_file_type == FileType.FOLDER.value:
|
if input_file_type == FileType.FOLDER.value:
|
||||||
file_type = FileType.FOLDER.value
|
file_type = FileType.FOLDER.value
|
||||||
@ -163,7 +160,7 @@ def create():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_files():
|
def list_files():
|
||||||
pf_id = request.args.get("parent_id")
|
pf_id = request.args.get("parent_id")
|
||||||
@ -181,21 +178,21 @@ def list_files():
|
|||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(pf_id)
|
e, file = FileService.get_by_id(pf_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
|
|
||||||
files, total = FileService.get_by_pf_id(
|
files, total = FileService.get_by_pf_id(
|
||||||
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
||||||
|
|
||||||
parent_folder = FileService.get_parent_folder(pf_id)
|
parent_folder = FileService.get_parent_folder(pf_id)
|
||||||
if not FileService.get_parent_folder(pf_id):
|
if not FileService.get_parent_folder(pf_id):
|
||||||
return get_json_result(retmsg="File not found!")
|
return get_json_result(message="File not found!")
|
||||||
|
|
||||||
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/root_folder', methods=['GET'])
|
@manager.route('/root_folder', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get_root_folder():
|
def get_root_folder():
|
||||||
try:
|
try:
|
||||||
@ -205,14 +202,14 @@ def get_root_folder():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/parent_folder', methods=['GET'])
|
@manager.route('/parent_folder', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get_parent_folder():
|
def get_parent_folder():
|
||||||
file_id = request.args.get("file_id")
|
file_id = request.args.get("file_id")
|
||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
|
|
||||||
parent_folder = FileService.get_parent_folder(file_id)
|
parent_folder = FileService.get_parent_folder(file_id)
|
||||||
return get_json_result(data={"parent_folder": parent_folder.to_json()})
|
return get_json_result(data={"parent_folder": parent_folder.to_json()})
|
||||||
@ -220,14 +217,14 @@ def get_parent_folder():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/all_parent_folder', methods=['GET'])
|
@manager.route('/all_parent_folder', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get_all_parent_folders():
|
def get_all_parent_folders():
|
||||||
file_id = request.args.get("file_id")
|
file_id = request.args.get("file_id")
|
||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
|
|
||||||
parent_folders = FileService.get_all_parent_folders(file_id)
|
parent_folders = FileService.get_all_parent_folders(file_id)
|
||||||
parent_folders_res = []
|
parent_folders_res = []
|
||||||
@ -238,7 +235,7 @@ def get_all_parent_folders():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("file_ids")
|
@validate_request("file_ids")
|
||||||
def rm():
|
def rm():
|
||||||
@ -248,9 +245,9 @@ def rm():
|
|||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File or Folder not found!")
|
return get_data_error_result(message="File or Folder not found!")
|
||||||
if not file.tenant_id:
|
if not file.tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
if file.source_type == FileSource.KNOWLEDGEBASE:
|
if file.source_type == FileSource.KNOWLEDGEBASE:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -259,13 +256,13 @@ def rm():
|
|||||||
for inner_file_id in file_id_list:
|
for inner_file_id in file_id_list:
|
||||||
e, file = FileService.get_by_id(inner_file_id)
|
e, file = FileService.get_by_id(inner_file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File not found!")
|
return get_data_error_result(message="File not found!")
|
||||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||||
else:
|
else:
|
||||||
if not FileService.delete(file):
|
if not FileService.delete(file):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (File removal)!")
|
message="Database error (File removal)!")
|
||||||
|
|
||||||
# delete file2document
|
# delete file2document
|
||||||
informs = File2DocumentService.get_by_file_id(file_id)
|
informs = File2DocumentService.get_by_file_id(file_id)
|
||||||
@ -273,13 +270,13 @@ def rm():
|
|||||||
doc_id = inform.document_id
|
doc_id = inform.document_id
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
File2DocumentService.delete_by_file_id(file_id)
|
File2DocumentService.delete_by_file_id(file_id)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
@ -287,7 +284,7 @@ def rm():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rename', methods=['POST'])
|
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("file_id", "name")
|
@validate_request("file_id", "name")
|
||||||
def rename():
|
def rename():
|
||||||
@ -295,45 +292,50 @@ def rename():
|
|||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(req["file_id"])
|
e, file = FileService.get_by_id(req["file_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File not found!")
|
return get_data_error_result(message="File not found!")
|
||||||
if file.type != FileType.FOLDER.value \
|
if file.type != FileType.FOLDER.value \
|
||||||
and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||||
file.name.lower()).suffix:
|
file.name.lower()).suffix:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
retmsg="The extension of file can't be changed",
|
message="The extension of file can't be changed",
|
||||||
retcode=RetCode.ARGUMENT_ERROR)
|
code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
for file in FileService.query(name=req["name"], pf_id=file.parent_id):
|
for file in FileService.query(name=req["name"], pf_id=file.parent_id):
|
||||||
if file.name == req["name"]:
|
if file.name == req["name"]:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated file name in the same folder.")
|
message="Duplicated file name in the same folder.")
|
||||||
|
|
||||||
if not FileService.update_by_id(
|
if not FileService.update_by_id(
|
||||||
req["file_id"], {"name": req["name"]}):
|
req["file_id"], {"name": req["name"]}):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (File rename)!")
|
message="Database error (File rename)!")
|
||||||
|
|
||||||
informs = File2DocumentService.get_by_file_id(req["file_id"])
|
informs = File2DocumentService.get_by_file_id(req["file_id"])
|
||||||
if informs:
|
if informs:
|
||||||
if not DocumentService.update_by_id(
|
if not DocumentService.update_by_id(
|
||||||
informs[0].document_id, {"name": req["name"]}):
|
informs[0].document_id, {"name": req["name"]}):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document rename)!")
|
message="Database error (Document rename)!")
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get/<file_id>', methods=['GET'])
|
@manager.route('/get/<file_id>', methods=['GET']) # noqa: F821
|
||||||
# @login_required
|
@login_required
|
||||||
def get(file_id):
|
def get(file_id):
|
||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
|
||||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
blob = STORAGE_IMPL.get(file.parent_id, file.location)
|
||||||
|
if not blob:
|
||||||
|
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||||
|
blob = STORAGE_IMPL.get(b, n)
|
||||||
|
|
||||||
|
response = flask.make_response(blob)
|
||||||
ext = re.search(r"\.([^.]+)$", file.name)
|
ext = re.search(r"\.([^.]+)$", file.name)
|
||||||
if ext:
|
if ext:
|
||||||
if file.type == FileType.VISUAL.value:
|
if file.type == FileType.VISUAL.value:
|
||||||
@ -348,7 +350,7 @@ def get(file_id):
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/mv', methods=['POST'])
|
@manager.route('/mv', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("src_file_ids", "dest_file_id")
|
@validate_request("src_file_ids", "dest_file_id")
|
||||||
def move():
|
def move():
|
||||||
@ -359,12 +361,12 @@ def move():
|
|||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File or Folder not found!")
|
return get_data_error_result(message="File or Folder not found!")
|
||||||
if not file.tenant_id:
|
if not file.tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
fe, _ = FileService.get_by_id(parent_id)
|
fe, _ = FileService.get_by_id(parent_id)
|
||||||
if not fe:
|
if not fe:
|
||||||
return get_data_error_result(retmsg="Parent Folder not found!")
|
return get_data_error_result(message="Parent Folder not found!")
|
||||||
FileService.move_file(file_ids, parent_id)
|
FileService.move_file(file_ids, parent_id)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@ -13,7 +13,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
@ -22,26 +21,35 @@ from api.db.services.document_service import DocumentService
|
|||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request, not_allowed_parameters
|
||||||
from api.utils import get_uuid, get_format_time
|
from api.utils import get_uuid
|
||||||
from api.db import StatusEnum, UserTenantRole, FileSource
|
from api.db import StatusEnum, FileSource
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.db_models import Knowledgebase, File
|
from api.db.db_models import File
|
||||||
from api.settings import stat_logger, RetCode
|
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
|
from api import settings
|
||||||
from rag.nlp import search
|
from rag.nlp import search
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
from api.constants import DATASET_NAME_LIMIT
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['post'])
|
@manager.route('/create', methods=['post']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("name")
|
@validate_request("name")
|
||||||
def create():
|
def create():
|
||||||
req = request.json
|
req = request.json
|
||||||
req["name"] = req["name"].strip()
|
dataset_name = req["name"]
|
||||||
req["name"] = duplicate_name(
|
if not isinstance(dataset_name, str):
|
||||||
|
return get_data_error_result(message="Dataset name must be string.")
|
||||||
|
if dataset_name == "":
|
||||||
|
return get_data_error_result(message="Dataset name can't be empty.")
|
||||||
|
if len(dataset_name) >= DATASET_NAME_LIMIT:
|
||||||
|
return get_data_error_result(
|
||||||
|
message=f"Dataset name length is {len(dataset_name)} which is large than {DATASET_NAME_LIMIT}")
|
||||||
|
|
||||||
|
dataset_name = dataset_name.strip()
|
||||||
|
dataset_name = duplicate_name(
|
||||||
KnowledgebaseService.query,
|
KnowledgebaseService.query,
|
||||||
name=req["name"],
|
name=dataset_name,
|
||||||
tenant_id=current_user.id,
|
tenant_id=current_user.id,
|
||||||
status=StatusEnum.VALID.value)
|
status=StatusEnum.VALID.value)
|
||||||
try:
|
try:
|
||||||
@ -50,7 +58,7 @@ def create():
|
|||||||
req["created_by"] = current_user.id
|
req["created_by"] = current_user.id
|
||||||
e, t = TenantService.get_by_id(current_user.id)
|
e, t = TenantService.get_by_id(current_user.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Tenant not found.")
|
return get_data_error_result(message="Tenant not found.")
|
||||||
req["embd_id"] = t.embd_id
|
req["embd_id"] = t.embd_id
|
||||||
if not KnowledgebaseService.save(**req):
|
if not KnowledgebaseService.save(**req):
|
||||||
return get_data_error_result()
|
return get_data_error_result()
|
||||||
@ -59,43 +67,61 @@ def create():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/update', methods=['post'])
|
@manager.route('/update', methods=['post']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
||||||
|
@not_allowed_parameters("id", "tenant_id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||||
def update():
|
def update():
|
||||||
req = request.json
|
req = request.json
|
||||||
req["name"] = req["name"].strip()
|
req["name"] = req["name"].strip()
|
||||||
|
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
if not KnowledgebaseService.query(
|
if not KnowledgebaseService.query(
|
||||||
created_by=current_user.id, id=req["kb_id"]):
|
created_by=current_user.id, id=req["kb_id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
|
|
||||||
if req["name"].lower() != kb.name.lower() \
|
if req["name"].lower() != kb.name.lower() \
|
||||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
and len(
|
||||||
|
KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated knowledgebase name.")
|
message="Duplicated knowledgebase name.")
|
||||||
|
|
||||||
del req["kb_id"]
|
del req["kb_id"]
|
||||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||||
return get_data_error_result()
|
return get_data_error_result()
|
||||||
|
|
||||||
|
if kb.pagerank != req.get("pagerank", 0):
|
||||||
|
if req.get("pagerank", 0) > 0:
|
||||||
|
settings.docStoreConn.update({"kb_id": kb.id}, {"pagerank_fea": req["pagerank"]},
|
||||||
|
search.index_name(kb.tenant_id), kb.id)
|
||||||
|
else:
|
||||||
|
# Elasticsearch requires pagerank_fea be non-zero!
|
||||||
|
settings.docStoreConn.update({"exist": "pagerank_fea"}, {"remove": "pagerank_fea"},
|
||||||
|
search.index_name(kb.tenant_id), kb.id)
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Knowledgebase rename)!")
|
message="Database error (Knowledgebase rename)!")
|
||||||
|
|
||||||
return get_json_result(data=kb.to_json())
|
return get_json_result(data=kb.to_json())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/detail', methods=['GET'])
|
@manager.route('/detail', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def detail():
|
def detail():
|
||||||
kb_id = request.args["kb_id"]
|
kb_id = request.args["kb_id"]
|
||||||
@ -107,56 +133,68 @@ def detail():
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
kb = KnowledgebaseService.get_detail(kb_id)
|
kb = KnowledgebaseService.get_detail(kb_id)
|
||||||
if not kb:
|
if not kb:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
return get_json_result(data=kb)
|
return get_json_result(data=kb)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_kbs():
|
def list_kbs():
|
||||||
page_number = request.args.get("page", 1)
|
keywords = request.args.get("keywords", "")
|
||||||
items_per_page = request.args.get("page_size", 150)
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 150))
|
||||||
orderby = request.args.get("orderby", "create_time")
|
orderby = request.args.get("orderby", "create_time")
|
||||||
desc = request.args.get("desc", True)
|
desc = request.args.get("desc", True)
|
||||||
try:
|
try:
|
||||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||||
kbs = KnowledgebaseService.get_by_tenant_ids(
|
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||||
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc)
|
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc, keywords)
|
||||||
return get_json_result(data=kbs)
|
return get_json_result(data={"kbs": kbs, "total": total})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['post'])
|
@manager.route('/rm', methods=['post']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id")
|
@validate_request("kb_id")
|
||||||
def rm():
|
def rm():
|
||||||
req = request.json
|
req = request.json
|
||||||
|
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
kbs = KnowledgebaseService.query(
|
kbs = KnowledgebaseService.query(
|
||||||
created_by=current_user.id, id=req["kb_id"])
|
created_by=current_user.id, id=req["kb_id"])
|
||||||
if not kbs:
|
if not kbs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
||||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
File2DocumentService.delete_by_document_id(doc.id)
|
File2DocumentService.delete_by_document_id(doc.id)
|
||||||
|
FileService.filter_delete(
|
||||||
|
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||||
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Knowledgebase removal)!")
|
message="Database error (Knowledgebase removal)!")
|
||||||
|
for kb in kbs:
|
||||||
|
settings.docStoreConn.delete({"kb_id": kb.id}, search.index_name(kb.tenant_id), kb.id)
|
||||||
|
settings.docStoreConn.deleteIdx(search.index_name(kb.tenant_id), kb.id)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|||||||
@ -13,12 +13,13 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
||||||
from api.settings import LIGHTEN
|
from api import settings
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.db import StatusEnum, LLMType
|
from api.db import StatusEnum, LLMType
|
||||||
from api.db.db_models import TenantLLM
|
from api.db.db_models import TenantLLM
|
||||||
@ -27,7 +28,7 @@ from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/factories', methods=['GET'])
|
@manager.route('/factories', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def factories():
|
def factories():
|
||||||
try:
|
try:
|
||||||
@ -49,7 +50,7 @@ def factories():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set_api_key', methods=['POST'])
|
@manager.route('/set_api_key', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("llm_factory", "api_key")
|
@validate_request("llm_factory", "api_key")
|
||||||
def set_api_key():
|
def set_api_key():
|
||||||
@ -58,7 +59,7 @@ def set_api_key():
|
|||||||
chat_passed, embd_passed, rerank_passed = False, False, False
|
chat_passed, embd_passed, rerank_passed = False, False, False
|
||||||
factory = req["llm_factory"]
|
factory = req["llm_factory"]
|
||||||
msg = ""
|
msg = ""
|
||||||
for llm in LLMService.query(fid=factory)[:3]:
|
for llm in LLMService.query(fid=factory):
|
||||||
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
||||||
mdl = EmbeddingModel[factory](
|
mdl = EmbeddingModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
@ -73,14 +74,14 @@ def set_api_key():
|
|||||||
mdl = ChatModel[factory](
|
mdl = ChatModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
try:
|
try:
|
||||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
||||||
{"temperature": 0.9,'max_tokens':50})
|
{"temperature": 0.9, 'max_tokens': 50})
|
||||||
if m.find("**ERROR**") >=0:
|
if m.find("**ERROR**") >= 0:
|
||||||
raise Exception(m)
|
raise Exception(m)
|
||||||
|
chat_passed = True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||||
e)
|
e)
|
||||||
chat_passed = True
|
|
||||||
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
||||||
mdl = RerankModel[factory](
|
mdl = RerankModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
@ -88,13 +89,17 @@ def set_api_key():
|
|||||||
arr, tc = mdl.similarity("What's the weather?", ["Is it sunny today?"])
|
arr, tc = mdl.similarity("What's the weather?", ["Is it sunny today?"])
|
||||||
if len(arr) == 0 or tc == 0:
|
if len(arr) == 0 or tc == 0:
|
||||||
raise Exception("Fail")
|
raise Exception("Fail")
|
||||||
|
rerank_passed = True
|
||||||
|
logging.debug(f'passed model rerank {llm.llm_name}')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||||
e)
|
e)
|
||||||
rerank_passed = True
|
if any([embd_passed, chat_passed, rerank_passed]):
|
||||||
|
msg = ''
|
||||||
|
break
|
||||||
|
|
||||||
if msg:
|
if msg:
|
||||||
return get_data_error_result(retmsg=msg)
|
return get_data_error_result(message=msg)
|
||||||
|
|
||||||
llm_config = {
|
llm_config = {
|
||||||
"api_key": req["api_key"],
|
"api_key": req["api_key"],
|
||||||
@ -105,6 +110,7 @@ def set_api_key():
|
|||||||
llm_config[n] = req[n]
|
llm_config[n] = req[n]
|
||||||
|
|
||||||
for llm in LLMService.query(fid=factory):
|
for llm in LLMService.query(fid=factory):
|
||||||
|
llm_config["max_tokens"]=llm.max_tokens
|
||||||
if not TenantLLMService.filter_update(
|
if not TenantLLMService.filter_update(
|
||||||
[TenantLLM.tenant_id == current_user.id,
|
[TenantLLM.tenant_id == current_user.id,
|
||||||
TenantLLM.llm_factory == factory,
|
TenantLLM.llm_factory == factory,
|
||||||
@ -116,13 +122,14 @@ def set_api_key():
|
|||||||
llm_name=llm.llm_name,
|
llm_name=llm.llm_name,
|
||||||
model_type=llm.model_type,
|
model_type=llm.model_type,
|
||||||
api_key=llm_config["api_key"],
|
api_key=llm_config["api_key"],
|
||||||
api_base=llm_config["api_base"]
|
api_base=llm_config["api_base"],
|
||||||
|
max_tokens=llm_config["max_tokens"]
|
||||||
)
|
)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/add_llm', methods=['POST'])
|
@manager.route('/add_llm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("llm_factory")
|
@validate_request("llm_factory")
|
||||||
def add_llm():
|
def add_llm():
|
||||||
@ -153,23 +160,23 @@ def add_llm():
|
|||||||
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
||||||
|
|
||||||
elif factory == "LocalAI":
|
elif factory == "LocalAI":
|
||||||
llm_name = req["llm_name"]+"___LocalAI"
|
llm_name = req["llm_name"] + "___LocalAI"
|
||||||
api_key = "xxxxxxxxxxxxxxx"
|
api_key = "xxxxxxxxxxxxxxx"
|
||||||
|
|
||||||
elif factory == "HuggingFace":
|
elif factory == "HuggingFace":
|
||||||
llm_name = req["llm_name"]+"___HuggingFace"
|
llm_name = req["llm_name"] + "___HuggingFace"
|
||||||
api_key = "xxxxxxxxxxxxxxx"
|
api_key = "xxxxxxxxxxxxxxx"
|
||||||
|
|
||||||
elif factory == "OpenAI-API-Compatible":
|
elif factory == "OpenAI-API-Compatible":
|
||||||
llm_name = req["llm_name"]+"___OpenAI-API"
|
llm_name = req["llm_name"] + "___OpenAI-API"
|
||||||
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||||
|
|
||||||
elif factory =="XunFei Spark":
|
elif factory == "XunFei Spark":
|
||||||
llm_name = req["llm_name"]
|
llm_name = req["llm_name"]
|
||||||
if req["model_type"] == "chat":
|
if req["model_type"] == "chat":
|
||||||
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
||||||
elif req["model_type"] == "tts":
|
elif req["model_type"] == "tts":
|
||||||
api_key = apikey_json(["spark_app_id", "spark_api_secret","spark_api_key"])
|
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
||||||
|
|
||||||
elif factory == "BaiduYiyan":
|
elif factory == "BaiduYiyan":
|
||||||
llm_name = req["llm_name"]
|
llm_name = req["llm_name"]
|
||||||
@ -183,6 +190,10 @@ def add_llm():
|
|||||||
llm_name = req["llm_name"]
|
llm_name = req["llm_name"]
|
||||||
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
||||||
|
|
||||||
|
elif factory == "Azure-OpenAI":
|
||||||
|
llm_name = req["llm_name"]
|
||||||
|
api_key = apikey_json(["api_key", "api_version"])
|
||||||
|
|
||||||
else:
|
else:
|
||||||
llm_name = req["llm_name"]
|
llm_name = req["llm_name"]
|
||||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||||
@ -193,18 +204,19 @@ def add_llm():
|
|||||||
"model_type": req["model_type"],
|
"model_type": req["model_type"],
|
||||||
"llm_name": llm_name,
|
"llm_name": llm_name,
|
||||||
"api_base": req.get("api_base", ""),
|
"api_base": req.get("api_base", ""),
|
||||||
"api_key": api_key
|
"api_key": api_key,
|
||||||
|
"max_tokens": req.get("max_tokens")
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = ""
|
msg = ""
|
||||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||||
mdl = EmbeddingModel[factory](
|
mdl = EmbeddingModel[factory](
|
||||||
key=llm['api_key'],
|
key=llm['api_key'],
|
||||||
model_name=llm["llm_name"],
|
model_name=llm["llm_name"],
|
||||||
base_url=llm["api_base"])
|
base_url=llm["api_base"])
|
||||||
try:
|
try:
|
||||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||||
if len(arr[0]) == 0 or tc == 0:
|
if len(arr[0]) == 0:
|
||||||
raise Exception("Fail")
|
raise Exception("Fail")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||||
@ -216,7 +228,7 @@ def add_llm():
|
|||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||||
"temperature": 0.9})
|
"temperature": 0.9})
|
||||||
if not tc:
|
if not tc:
|
||||||
raise Exception(m)
|
raise Exception(m)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -224,28 +236,26 @@ def add_llm():
|
|||||||
e)
|
e)
|
||||||
elif llm["model_type"] == LLMType.RERANK:
|
elif llm["model_type"] == LLMType.RERANK:
|
||||||
mdl = RerankModel[factory](
|
mdl = RerankModel[factory](
|
||||||
key=llm["api_key"],
|
key=llm["api_key"],
|
||||||
model_name=llm["llm_name"],
|
model_name=llm["llm_name"],
|
||||||
base_url=llm["api_base"]
|
base_url=llm["api_base"]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!"])
|
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||||
if len(arr) == 0 or tc == 0:
|
if len(arr) == 0:
|
||||||
raise Exception("Not known.")
|
raise Exception("Not known.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||||
e)
|
e)
|
||||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||||
mdl = CvModel[factory](
|
mdl = CvModel[factory](
|
||||||
key=llm["api_key"],
|
key=llm["api_key"],
|
||||||
model_name=llm["llm_name"],
|
model_name=llm["llm_name"],
|
||||||
base_url=llm["api_base"]
|
base_url=llm["api_base"]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
img_url = (
|
img_url = (
|
||||||
"https://upload.wikimedia.org/wikipedia/comm"
|
"https://www.8848seo.cn/zb_users/upload/2022/07/20220705101240_99378.jpg"
|
||||||
"ons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/256"
|
|
||||||
"0px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
|
||||||
)
|
)
|
||||||
res = requests.get(img_url)
|
res = requests.get(img_url)
|
||||||
if res.status_code == 200:
|
if res.status_code == 200:
|
||||||
@ -270,36 +280,38 @@ def add_llm():
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
if msg:
|
if msg:
|
||||||
return get_data_error_result(retmsg=msg)
|
return get_data_error_result(message=msg)
|
||||||
|
|
||||||
if not TenantLLMService.filter_update(
|
if not TenantLLMService.filter_update(
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory,
|
||||||
|
TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||||
TenantLLMService.save(**llm)
|
TenantLLMService.save(**llm)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/delete_llm', methods=['POST'])
|
@manager.route('/delete_llm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("llm_factory", "llm_name")
|
@validate_request("llm_factory", "llm_name")
|
||||||
def delete_llm():
|
def delete_llm():
|
||||||
req = request.json
|
req = request.json
|
||||||
TenantLLMService.filter_delete(
|
TenantLLMService.filter_delete(
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"],
|
||||||
|
TenantLLM.llm_name == req["llm_name"]])
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/delete_factory', methods=['POST'])
|
@manager.route('/delete_factory', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("llm_factory")
|
@validate_request("llm_factory")
|
||||||
def delete_factory():
|
def delete_factory():
|
||||||
req = request.json
|
req = request.json
|
||||||
TenantLLMService.filter_delete(
|
TenantLLMService.filter_delete(
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/my_llms', methods=['GET'])
|
@manager.route('/my_llms', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def my_llms():
|
def my_llms():
|
||||||
try:
|
try:
|
||||||
@ -320,11 +332,11 @@ def my_llms():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_app():
|
def list_app():
|
||||||
self_deploied = ["Youdao","FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
self_deploied = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
||||||
weighted = ["Youdao","FastEmbed", "BAAI"] if LIGHTEN else []
|
weighted = ["Youdao", "FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
|
||||||
model_type = request.args.get("model_type")
|
model_type = request.args.get("model_type")
|
||||||
try:
|
try:
|
||||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||||
@ -335,15 +347,17 @@ def list_app():
|
|||||||
for m in llms:
|
for m in llms:
|
||||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
|
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
|
||||||
|
|
||||||
llm_set = set([m["llm_name"] for m in llms])
|
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||||
for o in objs:
|
for o in objs:
|
||||||
if not o.api_key:continue
|
if not o.api_key:
|
||||||
if o.llm_name in llm_set:continue
|
continue
|
||||||
|
if o.llm_name + "@" + o.llm_factory in llm_set:
|
||||||
|
continue
|
||||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||||
|
|
||||||
res = {}
|
res = {}
|
||||||
for m in llms:
|
for m in llms:
|
||||||
if model_type and m["model_type"].find(model_type)<0:
|
if model_type and m["model_type"].find(model_type) < 0:
|
||||||
continue
|
continue
|
||||||
if m["fid"] not in res:
|
if m["fid"] not in res:
|
||||||
res[m["fid"]] = []
|
res[m["fid"]] = []
|
||||||
@ -351,4 +365,4 @@ def list_app():
|
|||||||
|
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=res)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
39
api/apps/sdk/agent.py
Normal file
39
api/apps/sdk/agent.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from api.db.services.canvas_service import UserCanvasService
|
||||||
|
from api.utils.api_utils import get_error_data_result, token_required
|
||||||
|
from api.utils.api_utils import get_result
|
||||||
|
from flask import request
|
||||||
|
|
||||||
|
@manager.route('/agents', methods=['GET']) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def list_agents(tenant_id):
|
||||||
|
id = request.args.get("id")
|
||||||
|
title = request.args.get("title")
|
||||||
|
if id or title:
|
||||||
|
canvas = UserCanvasService.query(id=id, title=title, user_id=tenant_id)
|
||||||
|
if not canvas:
|
||||||
|
return get_error_data_result("The agent doesn't exist.")
|
||||||
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 30))
|
||||||
|
orderby = request.args.get("orderby", "update_time")
|
||||||
|
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||||
|
desc = False
|
||||||
|
else:
|
||||||
|
desc = True
|
||||||
|
canvas = UserCanvasService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,title)
|
||||||
|
return get_result(data=canvas)
|
||||||
@ -1,304 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
from flask import request
|
|
||||||
|
|
||||||
from api.db import StatusEnum
|
|
||||||
from api.db.db_models import TenantLLM
|
|
||||||
from api.db.services.dialog_service import DialogService
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
|
||||||
from api.db.services.llm_service import LLMService, TenantLLMService
|
|
||||||
from api.db.services.user_service import TenantService
|
|
||||||
from api.settings import RetCode
|
|
||||||
from api.utils import get_uuid
|
|
||||||
from api.utils.api_utils import get_data_error_result, token_required
|
|
||||||
from api.utils.api_utils import get_json_result
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/save', methods=['POST'])
|
|
||||||
@token_required
|
|
||||||
def save(tenant_id):
|
|
||||||
req = request.json
|
|
||||||
# dataset
|
|
||||||
if req.get("knowledgebases") == []:
|
|
||||||
return get_data_error_result(retmsg="knowledgebases can not be empty list")
|
|
||||||
kb_list = []
|
|
||||||
if req.get("knowledgebases"):
|
|
||||||
for kb in req.get("knowledgebases"):
|
|
||||||
if not kb["id"]:
|
|
||||||
return get_data_error_result(retmsg="knowledgebase needs id")
|
|
||||||
if not KnowledgebaseService.query(id=kb["id"], tenant_id=tenant_id):
|
|
||||||
return get_data_error_result(retmsg="you do not own the knowledgebase")
|
|
||||||
# if not DocumentService.query(kb_id=kb["id"]):
|
|
||||||
# return get_data_error_result(retmsg="There is a invalid knowledgebase")
|
|
||||||
kb_list.append(kb["id"])
|
|
||||||
req["kb_ids"] = kb_list
|
|
||||||
# llm
|
|
||||||
llm = req.get("llm")
|
|
||||||
if llm:
|
|
||||||
if "model_name" in llm:
|
|
||||||
req["llm_id"] = llm.pop("model_name")
|
|
||||||
req["llm_setting"] = req.pop("llm")
|
|
||||||
e, tenant = TenantService.get_by_id(tenant_id)
|
|
||||||
if not e:
|
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
|
||||||
# prompt
|
|
||||||
prompt = req.get("prompt")
|
|
||||||
key_mapping = {"parameters": "variables",
|
|
||||||
"prologue": "opener",
|
|
||||||
"quote": "show_quote",
|
|
||||||
"system": "prompt",
|
|
||||||
"rerank_id": "rerank_model",
|
|
||||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
|
||||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
|
||||||
if prompt:
|
|
||||||
for new_key, old_key in key_mapping.items():
|
|
||||||
if old_key in prompt:
|
|
||||||
prompt[new_key] = prompt.pop(old_key)
|
|
||||||
for key in key_list:
|
|
||||||
if key in prompt:
|
|
||||||
req[key] = prompt.pop(key)
|
|
||||||
req["prompt_config"] = req.pop("prompt")
|
|
||||||
# create
|
|
||||||
if "id" not in req:
|
|
||||||
# dataset
|
|
||||||
if not kb_list:
|
|
||||||
return get_data_error_result(retmsg="knowledgebases are required!")
|
|
||||||
# init
|
|
||||||
req["id"] = get_uuid()
|
|
||||||
req["description"] = req.get("description", "A helpful Assistant")
|
|
||||||
req["icon"] = req.get("avatar", "")
|
|
||||||
req["top_n"] = req.get("top_n", 6)
|
|
||||||
req["top_k"] = req.get("top_k", 1024)
|
|
||||||
req["rerank_id"] = req.get("rerank_id", "")
|
|
||||||
if req.get("llm_id"):
|
|
||||||
if not TenantLLMService.query(llm_name=req["llm_id"]):
|
|
||||||
return get_data_error_result(retmsg="the model_name does not exist.")
|
|
||||||
else:
|
|
||||||
req["llm_id"] = tenant.llm_id
|
|
||||||
if not req.get("name"):
|
|
||||||
return get_data_error_result(retmsg="name is required.")
|
|
||||||
if DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
|
||||||
return get_data_error_result(retmsg="Duplicated assistant name in creating dataset.")
|
|
||||||
# tenant_id
|
|
||||||
if req.get("tenant_id"):
|
|
||||||
return get_data_error_result(retmsg="tenant_id must not be provided.")
|
|
||||||
req["tenant_id"] = tenant_id
|
|
||||||
# prompt more parameter
|
|
||||||
default_prompt = {
|
|
||||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
|
||||||
以下是知识库:
|
|
||||||
{knowledge}
|
|
||||||
以上是知识库。""",
|
|
||||||
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
|
||||||
"parameters": [
|
|
||||||
{"key": "knowledge", "optional": False}
|
|
||||||
],
|
|
||||||
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
|
||||||
}
|
|
||||||
key_list_2 = ["system", "prologue", "parameters", "empty_response"]
|
|
||||||
if "prompt_config" not in req:
|
|
||||||
req['prompt_config'] = {}
|
|
||||||
for key in key_list_2:
|
|
||||||
temp = req['prompt_config'].get(key)
|
|
||||||
if not temp:
|
|
||||||
req['prompt_config'][key] = default_prompt[key]
|
|
||||||
for p in req['prompt_config']["parameters"]:
|
|
||||||
if p["optional"]:
|
|
||||||
continue
|
|
||||||
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
|
|
||||||
return get_data_error_result(
|
|
||||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
|
||||||
# save
|
|
||||||
if not DialogService.save(**req):
|
|
||||||
return get_data_error_result(retmsg="Fail to new an assistant!")
|
|
||||||
# response
|
|
||||||
e, res = DialogService.get_by_id(req["id"])
|
|
||||||
if not e:
|
|
||||||
return get_data_error_result(retmsg="Fail to new an assistant!")
|
|
||||||
res = res.to_json()
|
|
||||||
renamed_dict = {}
|
|
||||||
for key, value in res["prompt_config"].items():
|
|
||||||
new_key = key_mapping.get(key, key)
|
|
||||||
renamed_dict[new_key] = value
|
|
||||||
res["prompt"] = renamed_dict
|
|
||||||
del res["prompt_config"]
|
|
||||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
|
||||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
|
||||||
"top_n": res["top_n"],
|
|
||||||
"rerank_model": res['rerank_id']}
|
|
||||||
res["prompt"].update(new_dict)
|
|
||||||
for key in key_list:
|
|
||||||
del res[key]
|
|
||||||
res["llm"] = res.pop("llm_setting")
|
|
||||||
res["llm"]["model_name"] = res.pop("llm_id")
|
|
||||||
del res["kb_ids"]
|
|
||||||
res["knowledgebases"] = req["knowledgebases"]
|
|
||||||
res["avatar"] = res.pop("icon")
|
|
||||||
return get_json_result(data=res)
|
|
||||||
else:
|
|
||||||
# authorization
|
|
||||||
if not DialogService.query(tenant_id=tenant_id, id=req["id"], status=StatusEnum.VALID.value):
|
|
||||||
return get_json_result(data=False, retmsg='You do not own the assistant', retcode=RetCode.OPERATING_ERROR)
|
|
||||||
# prompt
|
|
||||||
if not req["id"]:
|
|
||||||
return get_data_error_result(retmsg="id can not be empty")
|
|
||||||
e, res = DialogService.get_by_id(req["id"])
|
|
||||||
res = res.to_json()
|
|
||||||
if "llm_id" in req:
|
|
||||||
if not TenantLLMService.query(llm_name=req["llm_id"]):
|
|
||||||
return get_data_error_result(retmsg="the model_name does not exist.")
|
|
||||||
if "name" in req:
|
|
||||||
if not req.get("name"):
|
|
||||||
return get_data_error_result(retmsg="name is not empty.")
|
|
||||||
if req["name"].lower() != res["name"].lower() \
|
|
||||||
and len(
|
|
||||||
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
|
||||||
return get_data_error_result(retmsg="Duplicated assistant name in updating dataset.")
|
|
||||||
if "prompt_config" in req:
|
|
||||||
res["prompt_config"].update(req["prompt_config"])
|
|
||||||
for p in res["prompt_config"]["parameters"]:
|
|
||||||
if p["optional"]:
|
|
||||||
continue
|
|
||||||
if res["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
|
||||||
return get_data_error_result(retmsg="Parameter '{}' is not used".format(p["key"]))
|
|
||||||
if "llm_setting" in req:
|
|
||||||
res["llm_setting"].update(req["llm_setting"])
|
|
||||||
req["prompt_config"] = res["prompt_config"]
|
|
||||||
req["llm_setting"] = res["llm_setting"]
|
|
||||||
# avatar
|
|
||||||
if "avatar" in req:
|
|
||||||
req["icon"] = req.pop("avatar")
|
|
||||||
assistant_id = req.pop("id")
|
|
||||||
if "knowledgebases" in req:
|
|
||||||
req.pop("knowledgebases")
|
|
||||||
if not DialogService.update_by_id(assistant_id, req):
|
|
||||||
return get_data_error_result(retmsg="Assistant not found!")
|
|
||||||
return get_json_result(data=True)
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/delete', methods=['DELETE'])
|
|
||||||
@token_required
|
|
||||||
def delete(tenant_id):
|
|
||||||
req = request.args
|
|
||||||
if "id" not in req:
|
|
||||||
return get_data_error_result(retmsg="id is required")
|
|
||||||
id = req['id']
|
|
||||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
|
||||||
return get_json_result(data=False, retmsg='you do not own the assistant.', retcode=RetCode.OPERATING_ERROR)
|
|
||||||
|
|
||||||
temp_dict = {"status": StatusEnum.INVALID.value}
|
|
||||||
DialogService.update_by_id(req["id"], temp_dict)
|
|
||||||
return get_json_result(data=True)
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET'])
|
|
||||||
@token_required
|
|
||||||
def get(tenant_id):
|
|
||||||
req = request.args
|
|
||||||
if "id" in req:
|
|
||||||
id = req["id"]
|
|
||||||
ass = DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value)
|
|
||||||
if not ass:
|
|
||||||
return get_json_result(data=False, retmsg='You do not own the assistant.', retcode=RetCode.OPERATING_ERROR)
|
|
||||||
if "name" in req:
|
|
||||||
name = req["name"]
|
|
||||||
if ass[0].name != name:
|
|
||||||
return get_json_result(data=False, retmsg='name does not match id.', retcode=RetCode.OPERATING_ERROR)
|
|
||||||
res = ass[0].to_json()
|
|
||||||
else:
|
|
||||||
if "name" in req:
|
|
||||||
name = req["name"]
|
|
||||||
ass = DialogService.query(name=name, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
|
||||||
if not ass:
|
|
||||||
return get_json_result(data=False, retmsg='You do not own the assistant.',
|
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
|
||||||
res = ass[0].to_json()
|
|
||||||
else:
|
|
||||||
return get_data_error_result(retmsg="At least one of `id` or `name` must be provided.")
|
|
||||||
renamed_dict = {}
|
|
||||||
key_mapping = {"parameters": "variables",
|
|
||||||
"prologue": "opener",
|
|
||||||
"quote": "show_quote",
|
|
||||||
"system": "prompt",
|
|
||||||
"rerank_id": "rerank_model",
|
|
||||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
|
||||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
|
||||||
for key, value in res["prompt_config"].items():
|
|
||||||
new_key = key_mapping.get(key, key)
|
|
||||||
renamed_dict[new_key] = value
|
|
||||||
res["prompt"] = renamed_dict
|
|
||||||
del res["prompt_config"]
|
|
||||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
|
||||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
|
||||||
"top_n": res["top_n"],
|
|
||||||
"rerank_model": res['rerank_id']}
|
|
||||||
res["prompt"].update(new_dict)
|
|
||||||
for key in key_list:
|
|
||||||
del res[key]
|
|
||||||
res["llm"] = res.pop("llm_setting")
|
|
||||||
res["llm"]["model_name"] = res.pop("llm_id")
|
|
||||||
kb_list = []
|
|
||||||
for kb_id in res["kb_ids"]:
|
|
||||||
kb = KnowledgebaseService.query(id=kb_id)
|
|
||||||
kb_list.append(kb[0].to_json())
|
|
||||||
del res["kb_ids"]
|
|
||||||
res["knowledgebases"] = kb_list
|
|
||||||
res["avatar"] = res.pop("icon")
|
|
||||||
return get_json_result(data=res)
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
|
||||||
@token_required
|
|
||||||
def list_assistants(tenant_id):
|
|
||||||
assts = DialogService.query(
|
|
||||||
tenant_id=tenant_id,
|
|
||||||
status=StatusEnum.VALID.value,
|
|
||||||
reverse=True,
|
|
||||||
order_by=DialogService.model.create_time)
|
|
||||||
assts = [d.to_dict() for d in assts]
|
|
||||||
list_assts = []
|
|
||||||
renamed_dict = {}
|
|
||||||
key_mapping = {"parameters": "variables",
|
|
||||||
"prologue": "opener",
|
|
||||||
"quote": "show_quote",
|
|
||||||
"system": "prompt",
|
|
||||||
"rerank_id": "rerank_model",
|
|
||||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
|
||||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
|
||||||
for res in assts:
|
|
||||||
for key, value in res["prompt_config"].items():
|
|
||||||
new_key = key_mapping.get(key, key)
|
|
||||||
renamed_dict[new_key] = value
|
|
||||||
res["prompt"] = renamed_dict
|
|
||||||
del res["prompt_config"]
|
|
||||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
|
||||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
|
||||||
"top_n": res["top_n"],
|
|
||||||
"rerank_model": res['rerank_id']}
|
|
||||||
res["prompt"].update(new_dict)
|
|
||||||
for key in key_list:
|
|
||||||
del res[key]
|
|
||||||
res["llm"] = res.pop("llm_setting")
|
|
||||||
res["llm"]["model_name"] = res.pop("llm_id")
|
|
||||||
kb_list = []
|
|
||||||
for kb_id in res["kb_ids"]:
|
|
||||||
kb = KnowledgebaseService.query(id=kb_id)
|
|
||||||
kb_list.append(kb[0].to_json())
|
|
||||||
del res["kb_ids"]
|
|
||||||
res["knowledgebases"] = kb_list
|
|
||||||
res["avatar"] = res.pop("icon")
|
|
||||||
list_assts.append(res)
|
|
||||||
return get_json_result(data=list_assts)
|
|
||||||
318
api/apps/sdk/chat.py
Normal file
318
api/apps/sdk/chat.py
Normal file
@ -0,0 +1,318 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from flask import request
|
||||||
|
from api import settings
|
||||||
|
from api.db import StatusEnum
|
||||||
|
from api.db.services.dialog_service import DialogService
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.db.services.llm_service import TenantLLMService
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from api.utils import get_uuid
|
||||||
|
from api.utils.api_utils import get_error_data_result, token_required
|
||||||
|
from api.utils.api_utils import get_result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/chats', methods=['POST']) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def create(tenant_id):
|
||||||
|
req=request.json
|
||||||
|
ids= req.get("dataset_ids")
|
||||||
|
if not ids:
|
||||||
|
return get_error_data_result(message="`dataset_ids` is required")
|
||||||
|
for kb_id in ids:
|
||||||
|
kbs = KnowledgebaseService.accessible(kb_id=kb_id,user_id=tenant_id)
|
||||||
|
if not kbs:
|
||||||
|
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||||
|
kbs = KnowledgebaseService.query(id=kb_id)
|
||||||
|
kb = kbs[0]
|
||||||
|
if kb.chunk_num == 0:
|
||||||
|
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||||
|
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||||
|
embd_count = list(set([kb.embd_id for kb in kbs]))
|
||||||
|
if len(embd_count) != 1:
|
||||||
|
return get_result(message='Datasets use different embedding models."',code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
req["kb_ids"] = ids
|
||||||
|
# llm
|
||||||
|
llm = req.get("llm")
|
||||||
|
if llm:
|
||||||
|
if "model_name" in llm:
|
||||||
|
req["llm_id"] = llm.pop("model_name")
|
||||||
|
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
||||||
|
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||||
|
req["llm_setting"] = req.pop("llm")
|
||||||
|
e, tenant = TenantService.get_by_id(tenant_id)
|
||||||
|
if not e:
|
||||||
|
return get_error_data_result(message="Tenant not found!")
|
||||||
|
# prompt
|
||||||
|
prompt = req.get("prompt")
|
||||||
|
key_mapping = {"parameters": "variables",
|
||||||
|
"prologue": "opener",
|
||||||
|
"quote": "show_quote",
|
||||||
|
"system": "prompt",
|
||||||
|
"rerank_id": "rerank_model",
|
||||||
|
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||||
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||||
|
if prompt:
|
||||||
|
for new_key, old_key in key_mapping.items():
|
||||||
|
if old_key in prompt:
|
||||||
|
prompt[new_key] = prompt.pop(old_key)
|
||||||
|
for key in key_list:
|
||||||
|
if key in prompt:
|
||||||
|
req[key] = prompt.pop(key)
|
||||||
|
req["prompt_config"] = req.pop("prompt")
|
||||||
|
# init
|
||||||
|
req["id"] = get_uuid()
|
||||||
|
req["description"] = req.get("description", "A helpful Assistant")
|
||||||
|
req["icon"] = req.get("avatar", "")
|
||||||
|
req["top_n"] = req.get("top_n", 6)
|
||||||
|
req["top_k"] = req.get("top_k", 1024)
|
||||||
|
req["rerank_id"] = req.get("rerank_id", "")
|
||||||
|
if req.get("rerank_id"):
|
||||||
|
value_rerank_model = ["BAAI/bge-reranker-v2-m3","maidalun1020/bce-reranker-base_v1"]
|
||||||
|
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
||||||
|
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||||
|
if not req.get("llm_id"):
|
||||||
|
req["llm_id"] = tenant.llm_id
|
||||||
|
if not req.get("name"):
|
||||||
|
return get_error_data_result(message="`name` is required.")
|
||||||
|
if DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_error_data_result(message="Duplicated chat name in creating chat.")
|
||||||
|
# tenant_id
|
||||||
|
if req.get("tenant_id"):
|
||||||
|
return get_error_data_result(message="`tenant_id` must not be provided.")
|
||||||
|
req["tenant_id"] = tenant_id
|
||||||
|
# prompt more parameter
|
||||||
|
default_prompt = {
|
||||||
|
"system": """You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the knowledge base!" Answers need to consider chat history.
|
||||||
|
Here is the knowledge base:
|
||||||
|
{knowledge}
|
||||||
|
The above is the knowledge base.""",
|
||||||
|
"prologue": "Hi! I'm your assistant, what can I do for you?",
|
||||||
|
"parameters": [
|
||||||
|
{"key": "knowledge", "optional": False}
|
||||||
|
],
|
||||||
|
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
|
||||||
|
"quote":True,
|
||||||
|
"tts":False,
|
||||||
|
"refine_multiturn":True
|
||||||
|
}
|
||||||
|
key_list_2 = ["system", "prologue", "parameters", "empty_response","quote","tts","refine_multiturn"]
|
||||||
|
if "prompt_config" not in req:
|
||||||
|
req['prompt_config'] = {}
|
||||||
|
for key in key_list_2:
|
||||||
|
temp = req['prompt_config'].get(key)
|
||||||
|
if (not temp and key == 'system') or (key not in req["prompt_config"]):
|
||||||
|
req['prompt_config'][key] = default_prompt[key]
|
||||||
|
for p in req['prompt_config']["parameters"]:
|
||||||
|
if p["optional"]:
|
||||||
|
continue
|
||||||
|
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
|
||||||
|
return get_error_data_result(
|
||||||
|
message="Parameter '{}' is not used".format(p["key"]))
|
||||||
|
# save
|
||||||
|
if not DialogService.save(**req):
|
||||||
|
return get_error_data_result(message="Fail to new a chat!")
|
||||||
|
# response
|
||||||
|
e, res = DialogService.get_by_id(req["id"])
|
||||||
|
if not e:
|
||||||
|
return get_error_data_result(message="Fail to new a chat!")
|
||||||
|
res = res.to_json()
|
||||||
|
renamed_dict = {}
|
||||||
|
for key, value in res["prompt_config"].items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_dict[new_key] = value
|
||||||
|
res["prompt"] = renamed_dict
|
||||||
|
del res["prompt_config"]
|
||||||
|
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||||
|
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||||
|
"top_n": res["top_n"],
|
||||||
|
"rerank_model": res['rerank_id']}
|
||||||
|
res["prompt"].update(new_dict)
|
||||||
|
for key in key_list:
|
||||||
|
del res[key]
|
||||||
|
res["llm"] = res.pop("llm_setting")
|
||||||
|
res["llm"]["model_name"] = res.pop("llm_id")
|
||||||
|
del res["kb_ids"]
|
||||||
|
res["dataset_ids"] = req["dataset_ids"]
|
||||||
|
res["avatar"] = res.pop("icon")
|
||||||
|
return get_result(data=res)
|
||||||
|
|
||||||
|
@manager.route('/chats/<chat_id>', methods=['PUT']) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def update(tenant_id,chat_id):
|
||||||
|
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_error_data_result(message='You do not own the chat')
|
||||||
|
req =request.json
|
||||||
|
ids = req.get("dataset_ids")
|
||||||
|
if "show_quotation" in req:
|
||||||
|
req["do_refer"]=req.pop("show_quotation")
|
||||||
|
if "dataset_ids" in req:
|
||||||
|
if not ids:
|
||||||
|
return get_error_data_result("`dataset_ids` can't be empty")
|
||||||
|
if ids:
|
||||||
|
for kb_id in ids:
|
||||||
|
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||||
|
if not kbs:
|
||||||
|
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||||
|
kbs = KnowledgebaseService.query(id=kb_id)
|
||||||
|
kb = kbs[0]
|
||||||
|
if kb.chunk_num == 0:
|
||||||
|
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||||
|
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||||
|
embd_count=list(set([kb.embd_id for kb in kbs]))
|
||||||
|
if len(embd_count) != 1 :
|
||||||
|
return get_result(
|
||||||
|
message='Datasets use different embedding models."',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
req["kb_ids"] = ids
|
||||||
|
llm = req.get("llm")
|
||||||
|
if llm:
|
||||||
|
if "model_name" in llm:
|
||||||
|
req["llm_id"] = llm.pop("model_name")
|
||||||
|
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
||||||
|
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||||
|
req["llm_setting"] = req.pop("llm")
|
||||||
|
e, tenant = TenantService.get_by_id(tenant_id)
|
||||||
|
if not e:
|
||||||
|
return get_error_data_result(message="Tenant not found!")
|
||||||
|
# prompt
|
||||||
|
prompt = req.get("prompt")
|
||||||
|
key_mapping = {"parameters": "variables",
|
||||||
|
"prologue": "opener",
|
||||||
|
"quote": "show_quote",
|
||||||
|
"system": "prompt",
|
||||||
|
"rerank_id": "rerank_model",
|
||||||
|
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||||
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||||
|
if prompt:
|
||||||
|
for new_key, old_key in key_mapping.items():
|
||||||
|
if old_key in prompt:
|
||||||
|
prompt[new_key] = prompt.pop(old_key)
|
||||||
|
for key in key_list:
|
||||||
|
if key in prompt:
|
||||||
|
req[key] = prompt.pop(key)
|
||||||
|
req["prompt_config"] = req.pop("prompt")
|
||||||
|
e, res = DialogService.get_by_id(chat_id)
|
||||||
|
res = res.to_json()
|
||||||
|
if req.get("rerank_id"):
|
||||||
|
value_rerank_model = ["BAAI/bge-reranker-v2-m3","maidalun1020/bce-reranker-base_v1"]
|
||||||
|
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
||||||
|
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||||
|
if "name" in req:
|
||||||
|
if not req.get("name"):
|
||||||
|
return get_error_data_result(message="`name` is not empty.")
|
||||||
|
if req["name"].lower() != res["name"].lower() \
|
||||||
|
and len(
|
||||||
|
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||||
|
return get_error_data_result(message="Duplicated chat name in updating dataset.")
|
||||||
|
if "prompt_config" in req:
|
||||||
|
res["prompt_config"].update(req["prompt_config"])
|
||||||
|
for p in res["prompt_config"]["parameters"]:
|
||||||
|
if p["optional"]:
|
||||||
|
continue
|
||||||
|
if res["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||||
|
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
|
||||||
|
if "llm_setting" in req:
|
||||||
|
res["llm_setting"].update(req["llm_setting"])
|
||||||
|
req["prompt_config"] = res["prompt_config"]
|
||||||
|
req["llm_setting"] = res["llm_setting"]
|
||||||
|
# avatar
|
||||||
|
if "avatar" in req:
|
||||||
|
req["icon"] = req.pop("avatar")
|
||||||
|
if "dataset_ids" in req:
|
||||||
|
req.pop("dataset_ids")
|
||||||
|
if not DialogService.update_by_id(chat_id, req):
|
||||||
|
return get_error_data_result(message="Chat not found!")
|
||||||
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/chats', methods=['DELETE']) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def delete(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
if not req:
|
||||||
|
ids=None
|
||||||
|
else:
|
||||||
|
ids=req.get("ids")
|
||||||
|
if not ids:
|
||||||
|
id_list = []
|
||||||
|
dias=DialogService.query(tenant_id=tenant_id,status=StatusEnum.VALID.value)
|
||||||
|
for dia in dias:
|
||||||
|
id_list.append(dia.id)
|
||||||
|
else:
|
||||||
|
id_list=ids
|
||||||
|
for id in id_list:
|
||||||
|
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||||
|
return get_error_data_result(message=f"You don't own the chat {id}")
|
||||||
|
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||||
|
DialogService.update_by_id(id, temp_dict)
|
||||||
|
return get_result()
|
||||||
|
|
||||||
|
@manager.route('/chats', methods=['GET']) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def list_chat(tenant_id):
|
||||||
|
id = request.args.get("id")
|
||||||
|
name = request.args.get("name")
|
||||||
|
chat = DialogService.query(id=id,name=name,status=StatusEnum.VALID.value,tenant_id=tenant_id)
|
||||||
|
if not chat:
|
||||||
|
return get_error_data_result(message="The chat doesn't exist")
|
||||||
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 30))
|
||||||
|
orderby = request.args.get("orderby", "create_time")
|
||||||
|
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||||
|
desc = False
|
||||||
|
else:
|
||||||
|
desc = True
|
||||||
|
chats = DialogService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,name)
|
||||||
|
if not chats:
|
||||||
|
return get_result(data=[])
|
||||||
|
list_assts = []
|
||||||
|
renamed_dict = {}
|
||||||
|
key_mapping = {"parameters": "variables",
|
||||||
|
"prologue": "opener",
|
||||||
|
"quote": "show_quote",
|
||||||
|
"system": "prompt",
|
||||||
|
"rerank_id": "rerank_model",
|
||||||
|
"vector_similarity_weight": "keywords_similarity_weight",
|
||||||
|
"do_refer":"show_quotation"}
|
||||||
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||||
|
for res in chats:
|
||||||
|
for key, value in res["prompt_config"].items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_dict[new_key] = value
|
||||||
|
res["prompt"] = renamed_dict
|
||||||
|
del res["prompt_config"]
|
||||||
|
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||||
|
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||||
|
"top_n": res["top_n"],
|
||||||
|
"rerank_model": res['rerank_id']}
|
||||||
|
res["prompt"].update(new_dict)
|
||||||
|
for key in key_list:
|
||||||
|
del res[key]
|
||||||
|
res["llm"] = res.pop("llm_setting")
|
||||||
|
res["llm"]["model_name"] = res.pop("llm_id")
|
||||||
|
kb_list = []
|
||||||
|
for kb_id in res["kb_ids"]:
|
||||||
|
kb = KnowledgebaseService.query(id=kb_id)
|
||||||
|
if not kb :
|
||||||
|
return get_error_data_result(message=f"Don't exist the kb {kb_id}")
|
||||||
|
kb_list.append(kb[0].to_json())
|
||||||
|
del res["kb_ids"]
|
||||||
|
res["datasets"] = kb_list
|
||||||
|
res["avatar"] = res.pop("icon")
|
||||||
|
list_assts.append(res)
|
||||||
|
return get_result(data=list_assts)
|
||||||
@ -1,224 +1,531 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
|
from api.db import StatusEnum, FileSource
|
||||||
from api.db import StatusEnum, FileSource
|
from api.db.db_models import File
|
||||||
from api.db.db_models import File
|
from api.db.services.document_service import DocumentService
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||||
from api.db.services.user_service import TenantService
|
from api.db.services.user_service import TenantService
|
||||||
from api.settings import RetCode
|
from api import settings
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_json_result, token_required, get_data_error_result
|
from api.utils.api_utils import (
|
||||||
|
get_result,
|
||||||
|
token_required,
|
||||||
@manager.route('/save', methods=['POST'])
|
get_error_data_result,
|
||||||
@token_required
|
valid,
|
||||||
def save(tenant_id):
|
get_parser_config,
|
||||||
req = request.json
|
)
|
||||||
e, t = TenantService.get_by_id(tenant_id)
|
|
||||||
if "id" not in req:
|
|
||||||
if "tenant_id" in req or "embedding_model" in req:
|
@manager.route("/datasets", methods=["POST"]) # noqa: F821
|
||||||
return get_data_error_result(
|
@token_required
|
||||||
retmsg="Tenant_id or embedding_model must not be provided")
|
def create(tenant_id):
|
||||||
if "name" not in req:
|
"""
|
||||||
return get_data_error_result(
|
Create a new dataset.
|
||||||
retmsg="Name is not empty!")
|
---
|
||||||
req['id'] = get_uuid()
|
tags:
|
||||||
req["name"] = req["name"].strip()
|
- Datasets
|
||||||
if req["name"] == "":
|
security:
|
||||||
return get_data_error_result(
|
- ApiKeyAuth: []
|
||||||
retmsg="Name is not empty string!")
|
parameters:
|
||||||
if KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
- in: header
|
||||||
return get_data_error_result(
|
name: Authorization
|
||||||
retmsg="Duplicated knowledgebase name in creating dataset.")
|
type: string
|
||||||
req["tenant_id"] = req['created_by'] = tenant_id
|
required: true
|
||||||
req['embedding_model'] = t.embd_id
|
description: Bearer token for authentication.
|
||||||
key_mapping = {
|
- in: body
|
||||||
"chunk_num": "chunk_count",
|
name: body
|
||||||
"doc_num": "document_count",
|
description: Dataset creation parameters.
|
||||||
"parser_id": "parse_method",
|
required: true
|
||||||
"embd_id": "embedding_model"
|
schema:
|
||||||
}
|
type: object
|
||||||
mapped_keys = {new_key: req[old_key] for new_key, old_key in key_mapping.items() if old_key in req}
|
required:
|
||||||
req.update(mapped_keys)
|
- name
|
||||||
if not KnowledgebaseService.save(**req):
|
properties:
|
||||||
return get_data_error_result(retmsg="Create dataset error.(Database error)")
|
name:
|
||||||
renamed_data = {}
|
type: string
|
||||||
e, k = KnowledgebaseService.get_by_id(req["id"])
|
description: Name of the dataset.
|
||||||
for key, value in k.to_dict().items():
|
permission:
|
||||||
new_key = key_mapping.get(key, key)
|
type: string
|
||||||
renamed_data[new_key] = value
|
enum: ['me', 'team']
|
||||||
return get_json_result(data=renamed_data)
|
description: Dataset permission.
|
||||||
else:
|
language:
|
||||||
invalid_keys = {"embd_id", "chunk_num", "doc_num", "parser_id"}
|
type: string
|
||||||
if any(key in req for key in invalid_keys):
|
enum: ['Chinese', 'English']
|
||||||
return get_data_error_result(retmsg="The input parameters are invalid.")
|
description: Language of the dataset.
|
||||||
|
chunk_method:
|
||||||
if "tenant_id" in req:
|
type: string
|
||||||
if req["tenant_id"] != tenant_id:
|
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||||
return get_data_error_result(
|
"presentation", "picture", "one", "knowledge_graph", "email"]
|
||||||
retmsg="Can't change tenant_id.")
|
description: Chunking method.
|
||||||
|
parser_config:
|
||||||
if "embedding_model" in req:
|
type: object
|
||||||
if req["embedding_model"] != t.embd_id:
|
description: Parser configuration.
|
||||||
return get_data_error_result(
|
responses:
|
||||||
retmsg="Can't change embedding_model.")
|
200:
|
||||||
req.pop("embedding_model")
|
description: Successful operation.
|
||||||
|
schema:
|
||||||
if not KnowledgebaseService.query(
|
type: object
|
||||||
created_by=tenant_id, id=req["id"]):
|
properties:
|
||||||
return get_json_result(
|
data:
|
||||||
data=False, retmsg='You do not own the dataset.',
|
type: object
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
"""
|
||||||
|
req = request.json
|
||||||
if not req["id"]:
|
e, t = TenantService.get_by_id(tenant_id)
|
||||||
return get_data_error_result(
|
permission = req.get("permission")
|
||||||
retmsg="id can not be empty.")
|
language = req.get("language")
|
||||||
e, kb = KnowledgebaseService.get_by_id(req["id"])
|
chunk_method = req.get("chunk_method")
|
||||||
|
parser_config = req.get("parser_config")
|
||||||
if "chunk_count" in req:
|
valid_permission = ["me", "team"]
|
||||||
if req["chunk_count"] != kb.chunk_num:
|
valid_language = ["Chinese", "English"]
|
||||||
return get_data_error_result(
|
valid_chunk_method = [
|
||||||
retmsg="Can't change chunk_count.")
|
"naive",
|
||||||
req.pop("chunk_count")
|
"manual",
|
||||||
|
"qa",
|
||||||
if "document_count" in req:
|
"table",
|
||||||
if req['document_count'] != kb.doc_num:
|
"paper",
|
||||||
return get_data_error_result(
|
"book",
|
||||||
retmsg="Can't change document_count.")
|
"laws",
|
||||||
req.pop("document_count")
|
"presentation",
|
||||||
|
"picture",
|
||||||
if "parse_method" in req:
|
"one",
|
||||||
if kb.chunk_num != 0 and req['parse_method'] != kb.parser_id:
|
"knowledge_graph",
|
||||||
return get_data_error_result(
|
"email",
|
||||||
retmsg="If chunk count is not 0, parse method is not changable.")
|
]
|
||||||
req['parser_id'] = req.pop('parse_method')
|
check_validation = valid(
|
||||||
if "name" in req:
|
permission,
|
||||||
req["name"] = req["name"].strip()
|
valid_permission,
|
||||||
if req["name"].lower() != kb.name.lower() \
|
language,
|
||||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id,
|
valid_language,
|
||||||
status=StatusEnum.VALID.value)) > 0:
|
chunk_method,
|
||||||
return get_data_error_result(
|
valid_chunk_method,
|
||||||
retmsg="Duplicated knowledgebase name in updating dataset.")
|
)
|
||||||
|
if check_validation:
|
||||||
del req["id"]
|
return check_validation
|
||||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||||
return get_data_error_result(retmsg="Update dataset error.(Database error)")
|
if "tenant_id" in req:
|
||||||
return get_json_result(data=True)
|
return get_error_data_result(message="`tenant_id` must not be provided")
|
||||||
|
if "chunk_count" in req or "document_count" in req:
|
||||||
|
return get_error_data_result(
|
||||||
@manager.route('/delete', methods=['DELETE'])
|
message="`chunk_count` or `document_count` must not be provided"
|
||||||
@token_required
|
)
|
||||||
def delete(tenant_id):
|
if "name" not in req:
|
||||||
req = request.args
|
return get_error_data_result(message="`name` is not empty!")
|
||||||
if "id" not in req:
|
req["id"] = get_uuid()
|
||||||
return get_data_error_result(
|
req["name"] = req["name"].strip()
|
||||||
retmsg="id is required")
|
if req["name"] == "":
|
||||||
kbs = KnowledgebaseService.query(
|
return get_error_data_result(message="`name` is not empty string!")
|
||||||
created_by=tenant_id, id=req["id"])
|
if KnowledgebaseService.query(
|
||||||
if not kbs:
|
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||||
return get_json_result(
|
):
|
||||||
data=False, retmsg='You do not own the dataset',
|
return get_error_data_result(
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
message="Duplicated dataset name in creating dataset."
|
||||||
|
)
|
||||||
for doc in DocumentService.query(kb_id=req["id"]):
|
req["tenant_id"] = req["created_by"] = tenant_id
|
||||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
if not req.get("embedding_model"):
|
||||||
return get_data_error_result(
|
req["embedding_model"] = t.embd_id
|
||||||
retmsg="Remove document error.(Database error)")
|
else:
|
||||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
valid_embedding_models = [
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
"BAAI/bge-large-zh-v1.5",
|
||||||
File2DocumentService.delete_by_document_id(doc.id)
|
"BAAI/bge-base-en-v1.5",
|
||||||
|
"BAAI/bge-large-en-v1.5",
|
||||||
if not KnowledgebaseService.delete_by_id(req["id"]):
|
"BAAI/bge-small-en-v1.5",
|
||||||
return get_data_error_result(
|
"BAAI/bge-small-zh-v1.5",
|
||||||
retmsg="Delete dataset error.(Database serror)")
|
"jinaai/jina-embeddings-v2-base-en",
|
||||||
return get_json_result(data=True)
|
"jinaai/jina-embeddings-v2-small-en",
|
||||||
|
"nomic-ai/nomic-embed-text-v1.5",
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2",
|
||||||
@manager.route('/list', methods=['GET'])
|
"text-embedding-v2",
|
||||||
@token_required
|
"text-embedding-v3",
|
||||||
def list_datasets(tenant_id):
|
"maidalun1020/bce-embedding-base_v1",
|
||||||
page_number = int(request.args.get("page", 1))
|
]
|
||||||
items_per_page = int(request.args.get("page_size", 1024))
|
embd_model = LLMService.query(
|
||||||
orderby = request.args.get("orderby", "create_time")
|
llm_name=req["embedding_model"], model_type="embedding"
|
||||||
desc = bool(request.args.get("desc", True))
|
)
|
||||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
if embd_model:
|
||||||
kbs = KnowledgebaseService.get_by_tenant_ids(
|
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
|
||||||
[m["tenant_id"] for m in tenants], tenant_id, page_number, items_per_page, orderby, desc)
|
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||||
renamed_list = []
|
if not embd_model:
|
||||||
for kb in kbs:
|
embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
|
||||||
key_mapping = {
|
if not embd_model:
|
||||||
"chunk_num": "chunk_count",
|
return get_error_data_result(
|
||||||
"doc_num": "document_count",
|
f"`embedding_model` {req.get('embedding_model')} doesn't exist"
|
||||||
"parser_id": "parse_method",
|
)
|
||||||
"embd_id": "embedding_model"
|
key_mapping = {
|
||||||
}
|
"chunk_num": "chunk_count",
|
||||||
renamed_data = {}
|
"doc_num": "document_count",
|
||||||
for key, value in kb.items():
|
"parser_id": "chunk_method",
|
||||||
new_key = key_mapping.get(key, key)
|
"embd_id": "embedding_model",
|
||||||
renamed_data[new_key] = value
|
}
|
||||||
renamed_list.append(renamed_data)
|
mapped_keys = {
|
||||||
return get_json_result(data=renamed_list)
|
new_key: req[old_key]
|
||||||
|
for new_key, old_key in key_mapping.items()
|
||||||
|
if old_key in req
|
||||||
@manager.route('/detail', methods=['GET'])
|
}
|
||||||
@token_required
|
req.update(mapped_keys)
|
||||||
def detail(tenant_id):
|
if not KnowledgebaseService.save(**req):
|
||||||
req = request.args
|
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||||
key_mapping = {
|
renamed_data = {}
|
||||||
"chunk_num": "chunk_count",
|
e, k = KnowledgebaseService.get_by_id(req["id"])
|
||||||
"doc_num": "document_count",
|
for key, value in k.to_dict().items():
|
||||||
"parser_id": "parse_method",
|
new_key = key_mapping.get(key, key)
|
||||||
"embd_id": "embedding_model"
|
renamed_data[new_key] = value
|
||||||
}
|
return get_result(data=renamed_data)
|
||||||
renamed_data = {}
|
|
||||||
if "id" in req:
|
|
||||||
id = req["id"]
|
@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
|
||||||
kb = KnowledgebaseService.query(created_by=tenant_id, id=req["id"])
|
@token_required
|
||||||
if not kb:
|
def delete(tenant_id):
|
||||||
return get_json_result(
|
"""
|
||||||
data=False, retmsg='You do not own the dataset.',
|
Delete datasets.
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
---
|
||||||
if "name" in req:
|
tags:
|
||||||
name = req["name"]
|
- Datasets
|
||||||
if kb[0].name != name:
|
security:
|
||||||
return get_json_result(
|
- ApiKeyAuth: []
|
||||||
data=False, retmsg='You do not own the dataset.',
|
parameters:
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
- in: header
|
||||||
e, k = KnowledgebaseService.get_by_id(id)
|
name: Authorization
|
||||||
for key, value in k.to_dict().items():
|
type: string
|
||||||
new_key = key_mapping.get(key, key)
|
required: true
|
||||||
renamed_data[new_key] = value
|
description: Bearer token for authentication.
|
||||||
return get_json_result(data=renamed_data)
|
- in: body
|
||||||
else:
|
name: body
|
||||||
if "name" in req:
|
description: Dataset deletion parameters.
|
||||||
name = req["name"]
|
required: true
|
||||||
e, k = KnowledgebaseService.get_by_name(kb_name=name, tenant_id=tenant_id)
|
schema:
|
||||||
if not e:
|
type: object
|
||||||
return get_json_result(
|
properties:
|
||||||
data=False, retmsg='You do not own the dataset.',
|
ids:
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
type: array
|
||||||
for key, value in k.to_dict().items():
|
items:
|
||||||
new_key = key_mapping.get(key, key)
|
type: string
|
||||||
renamed_data[new_key] = value
|
description: List of dataset IDs to delete.
|
||||||
return get_json_result(data=renamed_data)
|
responses:
|
||||||
else:
|
200:
|
||||||
return get_data_error_result(
|
description: Successful operation.
|
||||||
retmsg="At least one of `id` or `name` must be provided.")
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
|
req = request.json
|
||||||
|
if not req:
|
||||||
|
ids = None
|
||||||
|
else:
|
||||||
|
ids = req.get("ids")
|
||||||
|
if not ids:
|
||||||
|
id_list = []
|
||||||
|
kbs = KnowledgebaseService.query(tenant_id=tenant_id)
|
||||||
|
for kb in kbs:
|
||||||
|
id_list.append(kb.id)
|
||||||
|
else:
|
||||||
|
id_list = ids
|
||||||
|
for id in id_list:
|
||||||
|
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
||||||
|
if not kbs:
|
||||||
|
return get_error_data_result(message=f"You don't own the dataset {id}")
|
||||||
|
for doc in DocumentService.query(kb_id=id):
|
||||||
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
|
return get_error_data_result(
|
||||||
|
message="Remove document error.(Database error)"
|
||||||
|
)
|
||||||
|
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||||
|
FileService.filter_delete(
|
||||||
|
[
|
||||||
|
File.source_type == FileSource.KNOWLEDGEBASE,
|
||||||
|
File.id == f2d[0].file_id,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
File2DocumentService.delete_by_document_id(doc.id)
|
||||||
|
FileService.filter_delete(
|
||||||
|
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||||
|
if not KnowledgebaseService.delete_by_id(id):
|
||||||
|
return get_error_data_result(message="Delete dataset error.(Database error)")
|
||||||
|
return get_result(code=settings.RetCode.SUCCESS)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def update(tenant_id, dataset_id):
|
||||||
|
"""
|
||||||
|
Update a dataset.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- Datasets
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
parameters:
|
||||||
|
- in: path
|
||||||
|
name: dataset_id
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: ID of the dataset to update.
|
||||||
|
- in: header
|
||||||
|
name: Authorization
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: Bearer token for authentication.
|
||||||
|
- in: body
|
||||||
|
name: body
|
||||||
|
description: Dataset update parameters.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: New name of the dataset.
|
||||||
|
permission:
|
||||||
|
type: string
|
||||||
|
enum: ['me', 'team']
|
||||||
|
description: Updated permission.
|
||||||
|
language:
|
||||||
|
type: string
|
||||||
|
enum: ['Chinese', 'English']
|
||||||
|
description: Updated language.
|
||||||
|
chunk_method:
|
||||||
|
type: string
|
||||||
|
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||||
|
"presentation", "picture", "one", "knowledge_graph", "email"]
|
||||||
|
description: Updated chunking method.
|
||||||
|
parser_config:
|
||||||
|
type: object
|
||||||
|
description: Updated parser configuration.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Successful operation.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
|
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||||
|
return get_error_data_result(message="You don't own the dataset")
|
||||||
|
req = request.json
|
||||||
|
e, t = TenantService.get_by_id(tenant_id)
|
||||||
|
invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id"}
|
||||||
|
if any(key in req for key in invalid_keys):
|
||||||
|
return get_error_data_result(message="The input parameters are invalid.")
|
||||||
|
permission = req.get("permission")
|
||||||
|
language = req.get("language")
|
||||||
|
chunk_method = req.get("chunk_method")
|
||||||
|
parser_config = req.get("parser_config")
|
||||||
|
valid_permission = ["me", "team"]
|
||||||
|
valid_language = ["Chinese", "English"]
|
||||||
|
valid_chunk_method = [
|
||||||
|
"naive",
|
||||||
|
"manual",
|
||||||
|
"qa",
|
||||||
|
"table",
|
||||||
|
"paper",
|
||||||
|
"book",
|
||||||
|
"laws",
|
||||||
|
"presentation",
|
||||||
|
"picture",
|
||||||
|
"one",
|
||||||
|
"knowledge_graph",
|
||||||
|
"email",
|
||||||
|
]
|
||||||
|
check_validation = valid(
|
||||||
|
permission,
|
||||||
|
valid_permission,
|
||||||
|
language,
|
||||||
|
valid_language,
|
||||||
|
chunk_method,
|
||||||
|
valid_chunk_method,
|
||||||
|
)
|
||||||
|
if check_validation:
|
||||||
|
return check_validation
|
||||||
|
if "tenant_id" in req:
|
||||||
|
if req["tenant_id"] != tenant_id:
|
||||||
|
return get_error_data_result(message="Can't change `tenant_id`.")
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||||
|
if "parser_config" in req:
|
||||||
|
temp_dict = kb.parser_config
|
||||||
|
temp_dict.update(req["parser_config"])
|
||||||
|
req["parser_config"] = temp_dict
|
||||||
|
if "chunk_count" in req:
|
||||||
|
if req["chunk_count"] != kb.chunk_num:
|
||||||
|
return get_error_data_result(message="Can't change `chunk_count`.")
|
||||||
|
req.pop("chunk_count")
|
||||||
|
if "document_count" in req:
|
||||||
|
if req["document_count"] != kb.doc_num:
|
||||||
|
return get_error_data_result(message="Can't change `document_count`.")
|
||||||
|
req.pop("document_count")
|
||||||
|
if "chunk_method" in req:
|
||||||
|
if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
|
||||||
|
return get_error_data_result(
|
||||||
|
message="If `chunk_count` is not 0, `chunk_method` is not changeable."
|
||||||
|
)
|
||||||
|
req["parser_id"] = req.pop("chunk_method")
|
||||||
|
if req["parser_id"] != kb.parser_id:
|
||||||
|
if not req.get("parser_config"):
|
||||||
|
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||||
|
if "embedding_model" in req:
|
||||||
|
if kb.chunk_num != 0 and req["embedding_model"] != kb.embd_id:
|
||||||
|
return get_error_data_result(
|
||||||
|
message="If `chunk_count` is not 0, `embedding_model` is not changeable."
|
||||||
|
)
|
||||||
|
if not req.get("embedding_model"):
|
||||||
|
return get_error_data_result("`embedding_model` can't be empty")
|
||||||
|
valid_embedding_models = [
|
||||||
|
"BAAI/bge-large-zh-v1.5",
|
||||||
|
"BAAI/bge-base-en-v1.5",
|
||||||
|
"BAAI/bge-large-en-v1.5",
|
||||||
|
"BAAI/bge-small-en-v1.5",
|
||||||
|
"BAAI/bge-small-zh-v1.5",
|
||||||
|
"jinaai/jina-embeddings-v2-base-en",
|
||||||
|
"jinaai/jina-embeddings-v2-small-en",
|
||||||
|
"nomic-ai/nomic-embed-text-v1.5",
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2",
|
||||||
|
"text-embedding-v2",
|
||||||
|
"text-embedding-v3",
|
||||||
|
"maidalun1020/bce-embedding-base_v1",
|
||||||
|
]
|
||||||
|
embd_model = LLMService.query(
|
||||||
|
llm_name=req["embedding_model"], model_type="embedding"
|
||||||
|
)
|
||||||
|
if embd_model:
|
||||||
|
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
|
||||||
|
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||||
|
if not embd_model:
|
||||||
|
embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
|
||||||
|
|
||||||
|
if not embd_model:
|
||||||
|
return get_error_data_result(
|
||||||
|
f"`embedding_model` {req.get('embedding_model')} doesn't exist"
|
||||||
|
)
|
||||||
|
req["embd_id"] = req.pop("embedding_model")
|
||||||
|
if "name" in req:
|
||||||
|
req["name"] = req["name"].strip()
|
||||||
|
if (
|
||||||
|
req["name"].lower() != kb.name.lower()
|
||||||
|
and len(
|
||||||
|
KnowledgebaseService.query(
|
||||||
|
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||||
|
)
|
||||||
|
)
|
||||||
|
> 0
|
||||||
|
):
|
||||||
|
return get_error_data_result(
|
||||||
|
message="Duplicated dataset name in updating dataset."
|
||||||
|
)
|
||||||
|
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||||
|
return get_error_data_result(message="Update dataset error.(Database error)")
|
||||||
|
return get_result(code=settings.RetCode.SUCCESS)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def list(tenant_id):
|
||||||
|
"""
|
||||||
|
List datasets.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- Datasets
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
parameters:
|
||||||
|
- in: query
|
||||||
|
name: id
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
description: Dataset ID to filter.
|
||||||
|
- in: query
|
||||||
|
name: name
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
description: Dataset name to filter.
|
||||||
|
- in: query
|
||||||
|
name: page
|
||||||
|
type: integer
|
||||||
|
required: false
|
||||||
|
default: 1
|
||||||
|
description: Page number.
|
||||||
|
- in: query
|
||||||
|
name: page_size
|
||||||
|
type: integer
|
||||||
|
required: false
|
||||||
|
default: 1024
|
||||||
|
description: Number of items per page.
|
||||||
|
- in: query
|
||||||
|
name: orderby
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
default: "create_time"
|
||||||
|
description: Field to order by.
|
||||||
|
- in: query
|
||||||
|
name: desc
|
||||||
|
type: boolean
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
description: Order in descending.
|
||||||
|
- in: header
|
||||||
|
name: Authorization
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: Bearer token for authentication.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Successful operation.
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
|
id = request.args.get("id")
|
||||||
|
name = request.args.get("name")
|
||||||
|
if id:
|
||||||
|
kbs = KnowledgebaseService.get_kb_by_id(id,tenant_id)
|
||||||
|
if not kbs:
|
||||||
|
return get_error_data_result(f"You don't own the dataset {id}")
|
||||||
|
if name:
|
||||||
|
kbs = KnowledgebaseService.get_kb_by_name(name,tenant_id)
|
||||||
|
if not kbs:
|
||||||
|
return get_error_data_result(f"You don't own the dataset {name}")
|
||||||
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 30))
|
||||||
|
orderby = request.args.get("orderby", "create_time")
|
||||||
|
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||||
|
desc = False
|
||||||
|
else:
|
||||||
|
desc = True
|
||||||
|
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||||
|
kbs = KnowledgebaseService.get_list(
|
||||||
|
[m["tenant_id"] for m in tenants],
|
||||||
|
tenant_id,
|
||||||
|
page_number,
|
||||||
|
items_per_page,
|
||||||
|
orderby,
|
||||||
|
desc,
|
||||||
|
id,
|
||||||
|
name,
|
||||||
|
)
|
||||||
|
renamed_list = []
|
||||||
|
for kb in kbs:
|
||||||
|
key_mapping = {
|
||||||
|
"chunk_num": "chunk_count",
|
||||||
|
"doc_num": "document_count",
|
||||||
|
"parser_id": "chunk_method",
|
||||||
|
"embd_id": "embedding_model",
|
||||||
|
}
|
||||||
|
renamed_data = {}
|
||||||
|
for key, value in kb.items():
|
||||||
|
new_key = key_mapping.get(key, key)
|
||||||
|
renamed_data[new_key] = value
|
||||||
|
renamed_list.append(renamed_data)
|
||||||
|
return get_result(data=renamed_list)
|
||||||
|
|||||||
76
api/apps/sdk/dify_retrieval.py
Normal file
76
api/apps/sdk/dify_retrieval.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from flask import request, jsonify
|
||||||
|
|
||||||
|
from api.db import LLMType, ParserType
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.db.services.llm_service import LLMBundle
|
||||||
|
from api import settings
|
||||||
|
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
||||||
|
@apikey_required
|
||||||
|
@validate_request("knowledge_id", "query")
|
||||||
|
def retrieval(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
question = req["query"]
|
||||||
|
kb_id = req["knowledge_id"]
|
||||||
|
retrieval_setting = req.get("retrieval_setting", {})
|
||||||
|
similarity_threshold = float(retrieval_setting.get("score_threshold", 0.0))
|
||||||
|
top = int(retrieval_setting.get("top_k", 1024))
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
|
if not e:
|
||||||
|
return build_error_result(message="Knowledgebase not found!", code=settings.RetCode.NOT_FOUND)
|
||||||
|
|
||||||
|
if kb.tenant_id != tenant_id:
|
||||||
|
return build_error_result(message="Knowledgebase not found!", code=settings.RetCode.NOT_FOUND)
|
||||||
|
|
||||||
|
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||||
|
|
||||||
|
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||||
|
ranks = retr.retrieval(
|
||||||
|
question,
|
||||||
|
embd_mdl,
|
||||||
|
kb.tenant_id,
|
||||||
|
[kb_id],
|
||||||
|
page=1,
|
||||||
|
page_size=top,
|
||||||
|
similarity_threshold=similarity_threshold,
|
||||||
|
vector_similarity_weight=0.3,
|
||||||
|
top=top
|
||||||
|
)
|
||||||
|
records = []
|
||||||
|
for c in ranks["chunks"]:
|
||||||
|
c.pop("vector", None)
|
||||||
|
records.append({
|
||||||
|
"content": c["content_ltks"],
|
||||||
|
"score": c["similarity"],
|
||||||
|
"title": c["docnm_kwd"],
|
||||||
|
"metadata": {}
|
||||||
|
})
|
||||||
|
|
||||||
|
return jsonify({"records": records})
|
||||||
|
except Exception as e:
|
||||||
|
if str(e).find("not_found") > 0:
|
||||||
|
return build_error_result(
|
||||||
|
message='No chunk found! Check the chunk status please!',
|
||||||
|
code=settings.RetCode.NOT_FOUND
|
||||||
|
)
|
||||||
|
return build_error_result(message=str(e), code=settings.RetCode.SERVER_ERROR)
|
||||||
1797
api/apps/sdk/doc.py
1797
api/apps/sdk/doc.py
File diff suppressed because it is too large
Load Diff
@ -1,266 +1,433 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
import re
|
||||||
from uuid import uuid4
|
import json
|
||||||
|
from api.db import LLMType
|
||||||
from flask import request, Response
|
from flask import request, Response
|
||||||
|
|
||||||
from api.db import StatusEnum
|
from api.db.services.conversation_service import ConversationService, iframe_completion
|
||||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
from api.db.services.conversation_service import completion as rag_completion
|
||||||
from api.settings import RetCode
|
from api.db.services.canvas_service import completion as agent_completion
|
||||||
from api.utils import get_uuid
|
from api.db.services.dialog_service import ask
|
||||||
from api.utils.api_utils import get_data_error_result
|
from agent.canvas import Canvas
|
||||||
from api.utils.api_utils import get_json_result, token_required
|
from api.db import StatusEnum
|
||||||
|
from api.db.db_models import APIToken
|
||||||
|
from api.db.services.api_service import API4ConversationService
|
||||||
@manager.route('/save', methods=['POST'])
|
from api.db.services.canvas_service import UserCanvasService
|
||||||
@token_required
|
from api.db.services.dialog_service import DialogService
|
||||||
def set_conversation(tenant_id):
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
req = request.json
|
from api.utils import get_uuid
|
||||||
conv_id = req.get("id")
|
from api.utils.api_utils import get_error_data_result
|
||||||
if "assistant_id" in req:
|
from api.utils.api_utils import get_result, token_required
|
||||||
req["dialog_id"] = req.pop("assistant_id")
|
from api.db.services.llm_service import LLMBundle
|
||||||
if "id" in req:
|
|
||||||
del req["id"]
|
|
||||||
conv = ConversationService.query(id=conv_id)
|
@manager.route('/chats/<chat_id>/sessions', methods=['POST']) # noqa: F821
|
||||||
if not conv:
|
@token_required
|
||||||
return get_data_error_result(retmsg="Session does not exist")
|
def create(tenant_id, chat_id):
|
||||||
if not DialogService.query(id=conv[0].dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
req = request.json
|
||||||
return get_data_error_result(retmsg="You do not own the session")
|
req["dialog_id"] = chat_id
|
||||||
if req.get("dialog_id"):
|
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||||
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
if not dia:
|
||||||
if not dia:
|
return get_error_data_result(message="You do not own the assistant.")
|
||||||
return get_data_error_result(retmsg="You do not own the assistant")
|
conv = {
|
||||||
if "dialog_id" in req and not req.get("dialog_id"):
|
"id": get_uuid(),
|
||||||
return get_data_error_result(retmsg="assistant_id can not be empty.")
|
"dialog_id": req["dialog_id"],
|
||||||
if "message" in req:
|
"name": req.get("name", "New session"),
|
||||||
return get_data_error_result(retmsg="message can not be change")
|
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}]
|
||||||
if "reference" in req:
|
}
|
||||||
return get_data_error_result(retmsg="reference can not be change")
|
if not conv.get("name"):
|
||||||
if "name" in req and not req.get("name"):
|
return get_error_data_result(message="`name` can not be empty.")
|
||||||
return get_data_error_result(retmsg="name can not be empty.")
|
ConversationService.save(**conv)
|
||||||
if not ConversationService.update_by_id(conv_id, req):
|
e, conv = ConversationService.get_by_id(conv["id"])
|
||||||
return get_data_error_result(retmsg="Session updates error")
|
if not e:
|
||||||
return get_json_result(data=True)
|
return get_error_data_result(message="Fail to create a session!")
|
||||||
|
conv = conv.to_dict()
|
||||||
if not req.get("dialog_id"):
|
conv['messages'] = conv.pop("message")
|
||||||
return get_data_error_result(retmsg="assistant_id is required.")
|
conv["chat_id"] = conv.pop("dialog_id")
|
||||||
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
del conv["reference"]
|
||||||
if not dia:
|
return get_result(data=conv)
|
||||||
return get_data_error_result(retmsg="You do not own the assistant")
|
|
||||||
conv = {
|
|
||||||
"id": get_uuid(),
|
@manager.route('/agents/<agent_id>/sessions', methods=['POST']) # noqa: F821
|
||||||
"dialog_id": req["dialog_id"],
|
@token_required
|
||||||
"name": req.get("name", "New session"),
|
def create_agent_session(tenant_id, agent_id):
|
||||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||||
}
|
if not e:
|
||||||
if not conv.get("name"):
|
return get_error_data_result("Agent not found.")
|
||||||
return get_data_error_result(retmsg="name can not be empty.")
|
|
||||||
ConversationService.save(**conv)
|
if not isinstance(cvs.dsl, str):
|
||||||
e, conv = ConversationService.get_by_id(conv["id"])
|
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||||
if not e:
|
|
||||||
return get_data_error_result(retmsg="Fail to new session!")
|
canvas = Canvas(cvs.dsl, tenant_id)
|
||||||
conv = conv.to_dict()
|
if canvas.get_preset_param():
|
||||||
conv['messages'] = conv.pop("message")
|
return get_error_data_result("The agent can't create a session directly")
|
||||||
conv["assistant_id"] = conv.pop("dialog_id")
|
conv = {
|
||||||
del conv["reference"]
|
"id": get_uuid(),
|
||||||
return get_json_result(data=conv)
|
"dialog_id": cvs.id,
|
||||||
|
"user_id": tenant_id,
|
||||||
|
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||||
@manager.route('/completion', methods=['POST'])
|
"source": "agent",
|
||||||
@token_required
|
"dsl": json.loads(cvs.dsl)
|
||||||
def completion(tenant_id):
|
}
|
||||||
req = request.json
|
API4ConversationService.save(**conv)
|
||||||
# req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
conv["agent_id"] = conv.pop("dialog_id")
|
||||||
# {"role": "user", "content": "上海有吗?"}
|
return get_result(data=conv)
|
||||||
# ]}
|
|
||||||
if "session_id" not in req:
|
|
||||||
return get_data_error_result(retmsg="session_id is required")
|
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT']) # noqa: F821
|
||||||
conv = ConversationService.query(id=req["session_id"])
|
@token_required
|
||||||
if not conv:
|
def update(tenant_id, chat_id, session_id):
|
||||||
return get_data_error_result(retmsg="Session does not exist")
|
req = request.json
|
||||||
conv = conv[0]
|
req["dialog_id"] = chat_id
|
||||||
if not DialogService.query(id=conv.dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
conv_id = session_id
|
||||||
return get_data_error_result(retmsg="You do not own the session")
|
conv = ConversationService.query(id=conv_id, dialog_id=chat_id)
|
||||||
msg = []
|
if not conv:
|
||||||
question = {
|
return get_error_data_result(message="Session does not exist")
|
||||||
"content": req.get("question"),
|
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
"role": "user",
|
return get_error_data_result(message="You do not own the session")
|
||||||
"id": str(uuid4())
|
if "message" in req or "messages" in req:
|
||||||
}
|
return get_error_data_result(message="`message` can not be change")
|
||||||
conv.message.append(question)
|
if "reference" in req:
|
||||||
for m in conv.message:
|
return get_error_data_result(message="`reference` can not be change")
|
||||||
if m["role"] == "system": continue
|
if "name" in req and not req.get("name"):
|
||||||
if m["role"] == "assistant" and not msg: continue
|
return get_error_data_result(message="`name` can not be empty.")
|
||||||
msg.append(m)
|
if not ConversationService.update_by_id(conv_id, req):
|
||||||
message_id = msg[-1].get("id")
|
return get_error_data_result(message="Session updates error")
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
return get_result()
|
||||||
del req["session_id"]
|
|
||||||
|
|
||||||
if not conv.reference:
|
@manager.route('/chats/<chat_id>/completions', methods=['POST']) # noqa: F821
|
||||||
conv.reference = []
|
@token_required
|
||||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
def chat_completion(tenant_id, chat_id):
|
||||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
req = request.json
|
||||||
|
if not req or not req.get("session_id"):
|
||||||
def fillin_conv(ans):
|
req = {"question":""}
|
||||||
nonlocal conv, message_id
|
if not DialogService.query(tenant_id=tenant_id,id=chat_id,status=StatusEnum.VALID.value):
|
||||||
if not conv.reference:
|
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||||
conv.reference.append(ans["reference"])
|
if req.get("session_id"):
|
||||||
else:
|
if not ConversationService.query(id=req["session_id"],dialog_id=chat_id):
|
||||||
conv.reference[-1] = ans["reference"]
|
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
if req.get("stream", True):
|
||||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
resp = Response(rag_completion(tenant_id, chat_id, **req), mimetype="text/event-stream")
|
||||||
ans["id"] = message_id
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
def stream():
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
nonlocal dia, msg, req, conv
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
try:
|
|
||||||
for ans in chat(dia, msg, **req):
|
return resp
|
||||||
fillin_conv(ans)
|
else:
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
answer = None
|
||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
for ans in rag_completion(tenant_id, chat_id, **req):
|
||||||
except Exception as e:
|
answer = ans
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
break
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
return get_result(data=answer)
|
||||||
ensure_ascii=False) + "\n\n"
|
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
|
||||||
|
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||||
if req.get("stream", True):
|
@token_required
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
def agent_completions(tenant_id, agent_id):
|
||||||
resp.headers.add_header("Cache-control", "no-cache")
|
req = request.json
|
||||||
resp.headers.add_header("Connection", "keep-alive")
|
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
if not cvs:
|
||||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||||
return resp
|
if req.get("session_id"):
|
||||||
|
conv = API4ConversationService.query(id=req["session_id"], dialog_id=agent_id)
|
||||||
else:
|
if not conv:
|
||||||
answer = None
|
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||||
for ans in chat(dia, msg, **req):
|
else:
|
||||||
answer = ans
|
req["question"]=""
|
||||||
fillin_conv(ans)
|
if req.get("stream", True):
|
||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
resp = Response(agent_completion(tenant_id, agent_id, **req), mimetype="text/event-stream")
|
||||||
break
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
return get_json_result(data=answer)
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
@manager.route('/get', methods=['GET'])
|
return resp
|
||||||
@token_required
|
try:
|
||||||
def get(tenant_id):
|
for answer in agent_completion(tenant_id, agent_id, **req):
|
||||||
req = request.args
|
return get_result(data=answer)
|
||||||
if "id" not in req:
|
except Exception as e:
|
||||||
return get_data_error_result(retmsg="id is required")
|
return get_error_data_result(str(e))
|
||||||
conv_id = req["id"]
|
|
||||||
conv = ConversationService.query(id=conv_id)
|
|
||||||
if not conv:
|
@manager.route('/chats/<chat_id>/sessions', methods=['GET']) # noqa: F821
|
||||||
return get_data_error_result(retmsg="Session does not exist")
|
@token_required
|
||||||
if not DialogService.query(id=conv[0].dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
def list_session(tenant_id, chat_id):
|
||||||
return get_data_error_result(retmsg="You do not own the session")
|
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||||
if "assistant_id" in req:
|
return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
|
||||||
if req["assistant_id"] != conv[0].dialog_id:
|
id = request.args.get("id")
|
||||||
return get_data_error_result(retmsg="The session doesn't belong to the assistant")
|
name = request.args.get("name")
|
||||||
conv = conv[0].to_dict()
|
page_number = int(request.args.get("page", 1))
|
||||||
conv['messages'] = conv.pop("message")
|
items_per_page = int(request.args.get("page_size", 30))
|
||||||
conv["assistant_id"] = conv.pop("dialog_id")
|
orderby = request.args.get("orderby", "create_time")
|
||||||
if conv["reference"]:
|
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||||
messages = conv["messages"]
|
desc = False
|
||||||
message_num = 0
|
else:
|
||||||
chunk_num = 0
|
desc = True
|
||||||
while message_num < len(messages):
|
convs = ConversationService.get_list(chat_id, page_number, items_per_page, orderby, desc, id, name)
|
||||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
if not convs:
|
||||||
chunk_list = []
|
return get_result(data=[])
|
||||||
if "chunks" in conv["reference"][chunk_num]:
|
for conv in convs:
|
||||||
chunks = conv["reference"][chunk_num]["chunks"]
|
conv['messages'] = conv.pop("message")
|
||||||
for chunk in chunks:
|
infos = conv["messages"]
|
||||||
new_chunk = {
|
for info in infos:
|
||||||
"id": chunk["chunk_id"],
|
if "prompt" in info:
|
||||||
"content": chunk["content_with_weight"],
|
info.pop("prompt")
|
||||||
"document_id": chunk["doc_id"],
|
conv["chat_id"] = conv.pop("dialog_id")
|
||||||
"document_name": chunk["docnm_kwd"],
|
if conv["reference"]:
|
||||||
"knowledgebase_id": chunk["kb_id"],
|
messages = conv["messages"]
|
||||||
"image_id": chunk["img_id"],
|
message_num = 0
|
||||||
"similarity": chunk["similarity"],
|
chunk_num = 0
|
||||||
"vector_similarity": chunk["vector_similarity"],
|
while message_num < len(messages):
|
||||||
"term_similarity": chunk["term_similarity"],
|
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||||
"positions": chunk["positions"],
|
chunk_list = []
|
||||||
}
|
if "chunks" in conv["reference"][chunk_num]:
|
||||||
chunk_list.append(new_chunk)
|
chunks = conv["reference"][chunk_num]["chunks"]
|
||||||
chunk_num += 1
|
for chunk in chunks:
|
||||||
messages[message_num]["reference"] = chunk_list
|
new_chunk = {
|
||||||
message_num += 1
|
"id": chunk["chunk_id"],
|
||||||
del conv["reference"]
|
"content": chunk["content_with_weight"],
|
||||||
return get_json_result(data=conv)
|
"document_id": chunk["doc_id"],
|
||||||
|
"document_name": chunk["docnm_kwd"],
|
||||||
|
"dataset_id": chunk["kb_id"],
|
||||||
@manager.route('/list', methods=["GET"])
|
"image_id": chunk.get("image_id", ""),
|
||||||
@token_required
|
"similarity": chunk["similarity"],
|
||||||
def list(tenant_id):
|
"vector_similarity": chunk["vector_similarity"],
|
||||||
assistant_id = request.args["assistant_id"]
|
"term_similarity": chunk["term_similarity"],
|
||||||
if not DialogService.query(tenant_id=tenant_id, id=assistant_id, status=StatusEnum.VALID.value):
|
"positions": chunk["positions"],
|
||||||
return get_json_result(
|
}
|
||||||
data=False, retmsg=f"You don't own the assistant.",
|
chunk_list.append(new_chunk)
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
chunk_num += 1
|
||||||
convs = ConversationService.query(
|
messages[message_num]["reference"] = chunk_list
|
||||||
dialog_id=assistant_id,
|
message_num += 1
|
||||||
order_by=ConversationService.model.create_time,
|
del conv["reference"]
|
||||||
reverse=True)
|
return get_result(data=convs)
|
||||||
convs = [d.to_dict() for d in convs]
|
|
||||||
for conv in convs:
|
|
||||||
conv['messages'] = conv.pop("message")
|
@manager.route('/agents/<agent_id>/sessions', methods=['GET']) # noqa: F821
|
||||||
conv["assistant_id"] = conv.pop("dialog_id")
|
@token_required
|
||||||
if conv["reference"]:
|
def list_agent_session(tenant_id, agent_id):
|
||||||
messages = conv["messages"]
|
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||||
message_num = 0
|
return get_error_data_result(message=f"You don't own the agent {agent_id}.")
|
||||||
chunk_num = 0
|
id = request.args.get("id")
|
||||||
while message_num < len(messages):
|
if not API4ConversationService.query(id=id, user_id=tenant_id):
|
||||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
return get_error_data_result(f"You don't own the session {id}")
|
||||||
chunk_list = []
|
page_number = int(request.args.get("page", 1))
|
||||||
if "chunks" in conv["reference"][chunk_num]:
|
items_per_page = int(request.args.get("page_size", 30))
|
||||||
chunks = conv["reference"][chunk_num]["chunks"]
|
orderby = request.args.get("orderby", "update_time")
|
||||||
for chunk in chunks:
|
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||||
new_chunk = {
|
desc = False
|
||||||
"id": chunk["chunk_id"],
|
else:
|
||||||
"content": chunk["content_with_weight"],
|
desc = True
|
||||||
"document_id": chunk["doc_id"],
|
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id)
|
||||||
"document_name": chunk["docnm_kwd"],
|
if not convs:
|
||||||
"knowledgebase_id": chunk["kb_id"],
|
return get_result(data=[])
|
||||||
"image_id": chunk["img_id"],
|
for conv in convs:
|
||||||
"similarity": chunk["similarity"],
|
conv['messages'] = conv.pop("message")
|
||||||
"vector_similarity": chunk["vector_similarity"],
|
infos = conv["messages"]
|
||||||
"term_similarity": chunk["term_similarity"],
|
for info in infos:
|
||||||
"positions": chunk["positions"],
|
if "prompt" in info:
|
||||||
}
|
info.pop("prompt")
|
||||||
chunk_list.append(new_chunk)
|
conv["agent_id"] = conv.pop("dialog_id")
|
||||||
chunk_num += 1
|
if conv["reference"]:
|
||||||
messages[message_num]["reference"] = chunk_list
|
messages = conv["messages"]
|
||||||
message_num += 1
|
message_num = 0
|
||||||
del conv["reference"]
|
chunk_num = 0
|
||||||
return get_json_result(data=convs)
|
while message_num < len(messages):
|
||||||
|
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||||
|
chunk_list = []
|
||||||
@manager.route('/delete', methods=["DELETE"])
|
if "chunks" in conv["reference"][chunk_num]:
|
||||||
@token_required
|
chunks = conv["reference"][chunk_num]["chunks"]
|
||||||
def delete(tenant_id):
|
for chunk in chunks:
|
||||||
id = request.args.get("id")
|
new_chunk = {
|
||||||
if not id:
|
"id": chunk["chunk_id"],
|
||||||
return get_data_error_result(retmsg="`id` is required in deleting operation")
|
"content": chunk["content"],
|
||||||
conv = ConversationService.query(id=id)
|
"document_id": chunk["doc_id"],
|
||||||
if not conv:
|
"document_name": chunk["docnm_kwd"],
|
||||||
return get_data_error_result(retmsg="Session doesn't exist")
|
"dataset_id": chunk["kb_id"],
|
||||||
conv = conv[0]
|
"image_id": chunk.get("image_id", ""),
|
||||||
if not DialogService.query(id=conv.dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
"similarity": chunk["similarity"],
|
||||||
return get_data_error_result(retmsg="You don't own the session")
|
"vector_similarity": chunk["vector_similarity"],
|
||||||
ConversationService.delete_by_id(id)
|
"term_similarity": chunk["term_similarity"],
|
||||||
return get_json_result(data=True)
|
"positions": chunk["positions"],
|
||||||
|
}
|
||||||
|
chunk_list.append(new_chunk)
|
||||||
|
chunk_num += 1
|
||||||
|
messages[message_num]["reference"] = chunk_list
|
||||||
|
message_num += 1
|
||||||
|
del conv["reference"]
|
||||||
|
return get_result(data=convs)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def delete(tenant_id, chat_id):
|
||||||
|
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
|
return get_error_data_result(message="You don't own the chat")
|
||||||
|
req = request.json
|
||||||
|
convs = ConversationService.query(dialog_id=chat_id)
|
||||||
|
if not req:
|
||||||
|
ids = None
|
||||||
|
else:
|
||||||
|
ids = req.get("ids")
|
||||||
|
|
||||||
|
if not ids:
|
||||||
|
conv_list = []
|
||||||
|
for conv in convs:
|
||||||
|
conv_list.append(conv.id)
|
||||||
|
else:
|
||||||
|
conv_list = ids
|
||||||
|
for id in conv_list:
|
||||||
|
conv = ConversationService.query(id=id, dialog_id=chat_id)
|
||||||
|
if not conv:
|
||||||
|
return get_error_data_result(message="The chat doesn't own the session")
|
||||||
|
ConversationService.delete_by_id(id)
|
||||||
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/sessions/ask', methods=['POST']) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def ask_about(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
if not req.get("question"):
|
||||||
|
return get_error_data_result("`question` is required.")
|
||||||
|
if not req.get("dataset_ids"):
|
||||||
|
return get_error_data_result("`dataset_ids` is required.")
|
||||||
|
if not isinstance(req.get("dataset_ids"), list):
|
||||||
|
return get_error_data_result("`dataset_ids` should be a list.")
|
||||||
|
req["kb_ids"] = req.pop("dataset_ids")
|
||||||
|
for kb_id in req["kb_ids"]:
|
||||||
|
if not KnowledgebaseService.accessible(kb_id, tenant_id):
|
||||||
|
return get_error_data_result(f"You don't own the dataset {kb_id}.")
|
||||||
|
kbs = KnowledgebaseService.query(id=kb_id)
|
||||||
|
kb = kbs[0]
|
||||||
|
if kb.chunk_num == 0:
|
||||||
|
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||||
|
uid = tenant_id
|
||||||
|
|
||||||
|
def stream():
|
||||||
|
nonlocal req, uid
|
||||||
|
try:
|
||||||
|
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
|
except Exception as e:
|
||||||
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/sessions/related_questions', methods=['POST']) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def related_questions(tenant_id):
|
||||||
|
req = request.json
|
||||||
|
if not req.get("question"):
|
||||||
|
return get_error_data_result("`question` is required.")
|
||||||
|
question = req["question"]
|
||||||
|
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||||
|
prompt = """
|
||||||
|
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
|
||||||
|
Instructions:
|
||||||
|
- Based on the keywords provided by the user, generate 5-10 related search terms.
|
||||||
|
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
|
||||||
|
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
|
||||||
|
- Keep the term length between 2-4 words, concise and clear.
|
||||||
|
- DO NOT translate, use the language of the original keywords.
|
||||||
|
|
||||||
|
### Example:
|
||||||
|
Keywords: Chinese football
|
||||||
|
Related search terms:
|
||||||
|
1. Current status of Chinese football
|
||||||
|
2. Reform of Chinese football
|
||||||
|
3. Youth training of Chinese football
|
||||||
|
4. Chinese football in the Asian Cup
|
||||||
|
5. Chinese football in the World Cup
|
||||||
|
|
||||||
|
Reason:
|
||||||
|
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
|
||||||
|
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
|
||||||
|
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
||||||
|
|
||||||
|
"""
|
||||||
|
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
||||||
|
Keywords: {question}
|
||||||
|
Related search terms:
|
||||||
|
"""}], {"temperature": 0.9})
|
||||||
|
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/chatbots/<dialog_id>/completions', methods=['POST']) # noqa: F821
|
||||||
|
def chatbot_completions(dialog_id):
|
||||||
|
req = request.json
|
||||||
|
|
||||||
|
token = request.headers.get('Authorization').split()
|
||||||
|
if len(token) != 2:
|
||||||
|
return get_error_data_result(message='Authorization is not valid!"')
|
||||||
|
token = token[1]
|
||||||
|
objs = APIToken.query(beta=token)
|
||||||
|
if not objs:
|
||||||
|
return get_error_data_result(message='Token is not valid!"')
|
||||||
|
|
||||||
|
if "quote" not in req:
|
||||||
|
req["quote"] = False
|
||||||
|
|
||||||
|
if req.get("stream", True):
|
||||||
|
resp = Response(iframe_completion(dialog_id, **req), mimetype="text/event-stream")
|
||||||
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
|
return resp
|
||||||
|
|
||||||
|
for answer in iframe_completion(dialog_id, **req):
|
||||||
|
return get_result(data=answer)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/agentbots/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||||
|
def agent_bot_completions(agent_id):
|
||||||
|
req = request.json
|
||||||
|
|
||||||
|
token = request.headers.get('Authorization').split()
|
||||||
|
if len(token) != 2:
|
||||||
|
return get_error_data_result(message='Authorization is not valid!"')
|
||||||
|
token = token[1]
|
||||||
|
objs = APIToken.query(beta=token)
|
||||||
|
if not objs:
|
||||||
|
return get_error_data_result(message='Token is not valid!"')
|
||||||
|
|
||||||
|
if "quote" not in req:
|
||||||
|
req["quote"] = False
|
||||||
|
|
||||||
|
if req.get("stream", True):
|
||||||
|
resp = Response(agent_completion(objs[0].tenant_id, agent_id, **req), mimetype="text/event-stream")
|
||||||
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
|
return resp
|
||||||
|
|
||||||
|
for answer in agent_completion(objs[0].tenant_id, agent_id, **req):
|
||||||
|
return get_result(data=answer)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -13,78 +13,288 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License
|
# limitations under the License
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from flask_login import login_required
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
|
from api.db.db_models import APIToken
|
||||||
|
from api.db.services.api_service import APITokenService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.settings import DATABASE_TYPE
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.utils.api_utils import get_json_result
|
from api import settings
|
||||||
from api.versions import get_rag_version
|
from api.utils import current_timestamp, datetime_format
|
||||||
from rag.settings import SVR_QUEUE_NAME
|
from api.utils.api_utils import (
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
get_json_result,
|
||||||
|
get_data_error_result,
|
||||||
|
server_error_response,
|
||||||
|
generate_confirmation_token,
|
||||||
|
)
|
||||||
|
from api.versions import get_ragflow_version
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL, STORAGE_IMPL_TYPE
|
from rag.utils.storage_factory import STORAGE_IMPL, STORAGE_IMPL_TYPE
|
||||||
from timeit import default_timer as timer
|
from timeit import default_timer as timer
|
||||||
|
|
||||||
from rag.utils.redis_conn import REDIS_CONN
|
from rag.utils.redis_conn import REDIS_CONN
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/version', methods=['GET'])
|
@manager.route("/version", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def version():
|
def version():
|
||||||
return get_json_result(data=get_rag_version())
|
"""
|
||||||
|
Get the current version of the application.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- System
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Version retrieved successfully.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
version:
|
||||||
|
type: string
|
||||||
|
description: Version number.
|
||||||
|
"""
|
||||||
|
return get_json_result(data=get_ragflow_version())
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/status', methods=['GET'])
|
@manager.route("/status", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def status():
|
def status():
|
||||||
|
"""
|
||||||
|
Get the system status.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- System
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: System is operational.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
es:
|
||||||
|
type: object
|
||||||
|
description: Elasticsearch status.
|
||||||
|
storage:
|
||||||
|
type: object
|
||||||
|
description: Storage status.
|
||||||
|
database:
|
||||||
|
type: object
|
||||||
|
description: Database status.
|
||||||
|
503:
|
||||||
|
description: Service unavailable.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
error:
|
||||||
|
type: string
|
||||||
|
description: Error message.
|
||||||
|
"""
|
||||||
res = {}
|
res = {}
|
||||||
st = timer()
|
st = timer()
|
||||||
try:
|
try:
|
||||||
res["es"] = ELASTICSEARCH.health()
|
res["doc_engine"] = settings.docStoreConn.health()
|
||||||
res["es"]["elapsed"] = "{:.1f}".format((timer() - st)*1000.)
|
res["doc_engine"]["elapsed"] = "{:.1f}".format((timer() - st) * 1000.0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
res["es"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
res["doc_engine"] = {
|
||||||
|
"type": "unknown",
|
||||||
|
"status": "red",
|
||||||
|
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||||
|
"error": str(e),
|
||||||
|
}
|
||||||
|
|
||||||
st = timer()
|
st = timer()
|
||||||
try:
|
try:
|
||||||
STORAGE_IMPL.health()
|
STORAGE_IMPL.health()
|
||||||
res["storage"] = {"storage": STORAGE_IMPL_TYPE.lower(), "status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
res["storage"] = {
|
||||||
|
"storage": STORAGE_IMPL_TYPE.lower(),
|
||||||
|
"status": "green",
|
||||||
|
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||||
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
res["storage"] = {"storage": STORAGE_IMPL_TYPE.lower(), "status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
res["storage"] = {
|
||||||
|
"storage": STORAGE_IMPL_TYPE.lower(),
|
||||||
|
"status": "red",
|
||||||
|
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||||
|
"error": str(e),
|
||||||
|
}
|
||||||
|
|
||||||
st = timer()
|
st = timer()
|
||||||
try:
|
try:
|
||||||
KnowledgebaseService.get_by_id("x")
|
KnowledgebaseService.get_by_id("x")
|
||||||
res["database"] = {"database": DATABASE_TYPE.lower(), "status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
res["database"] = {
|
||||||
|
"database": settings.DATABASE_TYPE.lower(),
|
||||||
|
"status": "green",
|
||||||
|
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||||
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
res["database"] = {"database": DATABASE_TYPE.lower(), "status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
res["database"] = {
|
||||||
|
"database": settings.DATABASE_TYPE.lower(),
|
||||||
|
"status": "red",
|
||||||
|
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||||
|
"error": str(e),
|
||||||
|
}
|
||||||
|
|
||||||
st = timer()
|
st = timer()
|
||||||
try:
|
try:
|
||||||
if not REDIS_CONN.health():
|
if not REDIS_CONN.health():
|
||||||
raise Exception("Lost connection!")
|
raise Exception("Lost connection!")
|
||||||
res["redis"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
res["redis"] = {
|
||||||
|
"status": "green",
|
||||||
|
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||||
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
res["redis"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
res["redis"] = {
|
||||||
|
"status": "red",
|
||||||
|
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||||
|
"error": str(e),
|
||||||
|
}
|
||||||
|
|
||||||
|
task_executor_heartbeats = {}
|
||||||
try:
|
try:
|
||||||
v = REDIS_CONN.get("TASKEXE")
|
task_executors = REDIS_CONN.smembers("TASKEXE")
|
||||||
if not v:
|
now = datetime.now().timestamp()
|
||||||
raise Exception("No task executor running!")
|
for task_executor_id in task_executors:
|
||||||
obj = json.loads(v)
|
heartbeats = REDIS_CONN.zrangebyscore(task_executor_id, now - 60*30, now)
|
||||||
color = "green"
|
heartbeats = [json.loads(heartbeat) for heartbeat in heartbeats]
|
||||||
for id in obj.keys():
|
task_executor_heartbeats[task_executor_id] = heartbeats
|
||||||
arr = obj[id]
|
except Exception:
|
||||||
if len(arr) == 1:
|
logging.exception("get task executor heartbeats failed!")
|
||||||
obj[id] = [0]
|
res["task_executor_heartbeats"] = task_executor_heartbeats
|
||||||
else:
|
|
||||||
obj[id] = [arr[i+1]-arr[i] for i in range(len(arr)-1)]
|
|
||||||
elapsed = max(obj[id])
|
|
||||||
if elapsed > 50: color = "yellow"
|
|
||||||
if elapsed > 120: color = "red"
|
|
||||||
res["task_executor"] = {"status": color, "elapsed": obj}
|
|
||||||
except Exception as e:
|
|
||||||
res["task_executor"] = {"status": "red", "error": str(e)}
|
|
||||||
|
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=res)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/new_token", methods=["POST"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def new_token():
|
||||||
|
"""
|
||||||
|
Generate a new API token.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- API Tokens
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
parameters:
|
||||||
|
- in: query
|
||||||
|
name: name
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
description: Name of the token.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Token generated successfully.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
token:
|
||||||
|
type: string
|
||||||
|
description: The generated API token.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
|
if not tenants:
|
||||||
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
|
tenant_id = tenants[0].tenant_id
|
||||||
|
obj = {
|
||||||
|
"tenant_id": tenant_id,
|
||||||
|
"token": generate_confirmation_token(tenant_id),
|
||||||
|
"beta": generate_confirmation_token(generate_confirmation_token(tenant_id)).replace("ragflow-", "")[:32],
|
||||||
|
"create_time": current_timestamp(),
|
||||||
|
"create_date": datetime_format(datetime.now()),
|
||||||
|
"update_time": None,
|
||||||
|
"update_date": None,
|
||||||
|
}
|
||||||
|
|
||||||
|
if not APITokenService.save(**obj):
|
||||||
|
return get_data_error_result(message="Fail to new a dialog!")
|
||||||
|
|
||||||
|
return get_json_result(data=obj)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/token_list", methods=["GET"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def token_list():
|
||||||
|
"""
|
||||||
|
List all API tokens for the current user.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- API Tokens
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: List of API tokens.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
tokens:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
token:
|
||||||
|
type: string
|
||||||
|
description: The API token.
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: Name of the token.
|
||||||
|
create_time:
|
||||||
|
type: string
|
||||||
|
description: Token creation time.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
|
if not tenants:
|
||||||
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
|
tenant_id = tenants[0].tenant_id
|
||||||
|
objs = APITokenService.query(tenant_id=tenant_id)
|
||||||
|
objs = [o.to_dict() for o in objs]
|
||||||
|
for o in objs:
|
||||||
|
if not o["beta"]:
|
||||||
|
o["beta"] = generate_confirmation_token(generate_confirmation_token(tenants[0].tenant_id)).replace("ragflow-", "")[:32]
|
||||||
|
APITokenService.filter_update([APIToken.tenant_id == tenant_id, APIToken.token == o["token"]], o)
|
||||||
|
return get_json_result(data=objs)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/token/<token>", methods=["DELETE"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def rm(token):
|
||||||
|
"""
|
||||||
|
Remove an API token.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- API Tokens
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
parameters:
|
||||||
|
- in: path
|
||||||
|
name: token
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: The API token to remove.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Token removed successfully.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
success:
|
||||||
|
type: boolean
|
||||||
|
description: Deletion status.
|
||||||
|
"""
|
||||||
|
APITokenService.filter_delete(
|
||||||
|
[APIToken.tenant_id == current_user.id, APIToken.token == token]
|
||||||
|
)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|||||||
@ -15,71 +15,108 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import current_user, login_required
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
|
from api import settings
|
||||||
from api.db import UserTenantRole, StatusEnum
|
from api.db import UserTenantRole, StatusEnum
|
||||||
from api.db.db_models import UserTenant
|
from api.db.db_models import UserTenant
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
from api.db.services.user_service import UserTenantService, UserService
|
||||||
from api.settings import RetCode
|
|
||||||
|
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid, delta_seconds
|
||||||
from api.utils.api_utils import get_json_result, validate_request, server_error_response
|
from api.utils.api_utils import get_json_result, validate_request, server_error_response, get_data_error_result
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/list", methods=["GET"])
|
@manager.route("/<tenant_id>/user/list", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
|
||||||
def tenant_list():
|
|
||||||
try:
|
|
||||||
tenants = TenantService.get_by_user_id(current_user.id)
|
|
||||||
return get_json_result(data=tenants)
|
|
||||||
except Exception as e:
|
|
||||||
return server_error_response(e)
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/<tenant_id>/user/list", methods=["GET"])
|
|
||||||
@login_required
|
@login_required
|
||||||
def user_list(tenant_id):
|
def user_list(tenant_id):
|
||||||
|
if current_user.id != tenant_id:
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
users = UserTenantService.get_by_tenant_id(tenant_id)
|
users = UserTenantService.get_by_tenant_id(tenant_id)
|
||||||
|
for u in users:
|
||||||
|
u["delta_seconds"] = delta_seconds(str(u["update_date"]))
|
||||||
return get_json_result(data=users)
|
return get_json_result(data=users)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/<tenant_id>/user', methods=['POST'])
|
@manager.route('/<tenant_id>/user', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("user_id")
|
@validate_request("email")
|
||||||
def create(tenant_id):
|
def create(tenant_id):
|
||||||
user_id = request.json.get("user_id")
|
if current_user.id != tenant_id:
|
||||||
if not user_id:
|
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "USER ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
try:
|
req = request.json
|
||||||
user_tenants = UserTenantService.query(user_id=user_id, tenant_id=tenant_id)
|
invite_user_email = req["email"]
|
||||||
if user_tenants:
|
invite_users = UserService.query(email=invite_user_email)
|
||||||
uuid = user_tenants[0].id
|
if not invite_users:
|
||||||
return get_json_result(data={"id": uuid})
|
return get_data_error_result(message="User not found.")
|
||||||
|
|
||||||
uuid = get_uuid()
|
user_id_to_invite = invite_users[0].id
|
||||||
UserTenantService.save(
|
user_tenants = UserTenantService.query(user_id=user_id_to_invite, tenant_id=tenant_id)
|
||||||
id = uuid,
|
if user_tenants:
|
||||||
user_id = user_id,
|
user_tenant_role = user_tenants[0].role
|
||||||
tenant_id = tenant_id,
|
if user_tenant_role == UserTenantRole.NORMAL:
|
||||||
role = UserTenantRole.NORMAL.value,
|
return get_data_error_result(message=f"{invite_user_email} is already in the team.")
|
||||||
status = StatusEnum.VALID.value)
|
if user_tenant_role == UserTenantRole.OWNER:
|
||||||
|
return get_data_error_result(message=f"{invite_user_email} is the owner of the team.")
|
||||||
|
return get_data_error_result(message=f"{invite_user_email} is in the team, but the role: {user_tenant_role} is invalid.")
|
||||||
|
|
||||||
return get_json_result(data={"id": uuid})
|
UserTenantService.save(
|
||||||
except Exception as e:
|
id=get_uuid(),
|
||||||
return server_error_response(e)
|
user_id=user_id_to_invite,
|
||||||
|
tenant_id=tenant_id,
|
||||||
|
invited_by=current_user.id,
|
||||||
|
role=UserTenantRole.INVITE,
|
||||||
|
status=StatusEnum.VALID.value)
|
||||||
|
|
||||||
|
usr = invite_users[0].to_dict()
|
||||||
|
usr = {k: v for k, v in usr.items() if k in ["id", "avatar", "email", "nickname"]}
|
||||||
|
|
||||||
|
return get_json_result(data=usr)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE'])
|
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def rm(tenant_id, user_id):
|
def rm(tenant_id, user_id):
|
||||||
|
if current_user.id != tenant_id and current_user.id != user_id:
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
UserTenantService.filter_delete([UserTenant.tenant_id == tenant_id, UserTenant.user_id == user_id])
|
UserTenantService.filter_delete([UserTenant.tenant_id == tenant_id, UserTenant.user_id == user_id])
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def tenant_list():
|
||||||
|
try:
|
||||||
|
users = UserTenantService.get_tenants_by_user_id(current_user.id)
|
||||||
|
for u in users:
|
||||||
|
u["delta_seconds"] = delta_seconds(str(u["update_date"]))
|
||||||
|
return get_json_result(data=users)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/agree/<tenant_id>", methods=["PUT"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def agree(tenant_id):
|
||||||
|
try:
|
||||||
|
UserTenantService.filter_update([UserTenant.tenant_id == tenant_id, UserTenant.user_id == current_user.id], {"role": UserTenantRole.NORMAL})
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
@ -23,65 +24,127 @@ from flask_login import login_required, current_user, login_user, logout_user
|
|||||||
|
|
||||||
from api.db.db_models import TenantLLM
|
from api.db.db_models import TenantLLM
|
||||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||||
from api.utils.api_utils import server_error_response, validate_request
|
from api.utils.api_utils import (
|
||||||
from api.utils import get_uuid, get_format_time, decrypt, download_img, current_timestamp, datetime_format
|
server_error_response,
|
||||||
from api.db import UserTenantRole, LLMType, FileType
|
validate_request,
|
||||||
from api.settings import RetCode, GITHUB_OAUTH, FEISHU_OAUTH, CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, \
|
get_data_error_result,
|
||||||
API_KEY, \
|
)
|
||||||
LLM_FACTORY, LLM_BASE_URL, RERANK_MDL
|
from api.utils import (
|
||||||
|
get_uuid,
|
||||||
|
get_format_time,
|
||||||
|
decrypt,
|
||||||
|
download_img,
|
||||||
|
current_timestamp,
|
||||||
|
datetime_format,
|
||||||
|
)
|
||||||
|
from api.db import UserTenantRole, FileType
|
||||||
|
from api import settings
|
||||||
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.settings import stat_logger
|
|
||||||
from api.utils.api_utils import get_json_result, construct_response
|
from api.utils.api_utils import get_json_result, construct_response
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/login', methods=['POST', 'GET'])
|
@manager.route("/login", methods=["POST", "GET"]) # noqa: F821
|
||||||
def login():
|
def login():
|
||||||
|
"""
|
||||||
|
User login endpoint.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- User
|
||||||
|
parameters:
|
||||||
|
- in: body
|
||||||
|
name: body
|
||||||
|
description: Login credentials.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
email:
|
||||||
|
type: string
|
||||||
|
description: User email.
|
||||||
|
password:
|
||||||
|
type: string
|
||||||
|
description: User password.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Login successful.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
401:
|
||||||
|
description: Authentication failed.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
if not request.json:
|
if not request.json:
|
||||||
return get_json_result(data=False,
|
return get_json_result(
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!"
|
||||||
retmsg='Unauthorized!')
|
)
|
||||||
|
|
||||||
email = request.json.get('email', "")
|
email = request.json.get("email", "")
|
||||||
users = UserService.query(email=email)
|
users = UserService.query(email=email)
|
||||||
if not users:
|
if not users:
|
||||||
return get_json_result(data=False,
|
return get_json_result(
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
data=False,
|
||||||
retmsg=f'Email: {email} is not registered!')
|
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||||
|
message=f"Email: {email} is not registered!",
|
||||||
|
)
|
||||||
|
|
||||||
password = request.json.get('password')
|
password = request.json.get("password")
|
||||||
try:
|
try:
|
||||||
password = decrypt(password)
|
password = decrypt(password)
|
||||||
except BaseException:
|
except BaseException:
|
||||||
return get_json_result(data=False,
|
return get_json_result(
|
||||||
retcode=RetCode.SERVER_ERROR,
|
data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password"
|
||||||
retmsg='Fail to crypt password')
|
)
|
||||||
|
|
||||||
user = UserService.query_user(email, password)
|
user = UserService.query_user(email, password)
|
||||||
if user:
|
if user:
|
||||||
response_data = user.to_json()
|
response_data = user.to_json()
|
||||||
user.access_token = get_uuid()
|
user.access_token = get_uuid()
|
||||||
login_user(user)
|
login_user(user)
|
||||||
user.update_time = current_timestamp(),
|
user.update_time = (current_timestamp(),)
|
||||||
user.update_date = datetime_format(datetime.now()),
|
user.update_date = (datetime_format(datetime.now()),)
|
||||||
user.save()
|
user.save()
|
||||||
msg = "Welcome back!"
|
msg = "Welcome back!"
|
||||||
return construct_response(data=response_data, auth=user.get_id(), retmsg=msg)
|
return construct_response(data=response_data, auth=user.get_id(), message=msg)
|
||||||
else:
|
else:
|
||||||
return get_json_result(data=False,
|
return get_json_result(
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
data=False,
|
||||||
retmsg='Email and password do not match!')
|
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||||
|
message="Email and password do not match!",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/github_callback', methods=['GET'])
|
@manager.route("/github_callback", methods=["GET"]) # noqa: F821
|
||||||
def github_callback():
|
def github_callback():
|
||||||
|
"""
|
||||||
|
GitHub OAuth callback endpoint.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- OAuth
|
||||||
|
parameters:
|
||||||
|
- in: query
|
||||||
|
name: code
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: Authorization code from GitHub.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Authentication successful.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
import requests
|
import requests
|
||||||
res = requests.post(GITHUB_OAUTH.get("url"),
|
|
||||||
data={
|
res = requests.post(
|
||||||
"client_id": GITHUB_OAUTH.get("client_id"),
|
settings.GITHUB_OAUTH.get("url"),
|
||||||
"client_secret": GITHUB_OAUTH.get("secret_key"),
|
data={
|
||||||
"code": request.args.get('code')},
|
"client_id": settings.GITHUB_OAUTH.get("client_id"),
|
||||||
headers={"Accept": "application/json"})
|
"client_secret": settings.GITHUB_OAUTH.get("secret_key"),
|
||||||
|
"code": request.args.get("code"),
|
||||||
|
},
|
||||||
|
headers={"Accept": "application/json"},
|
||||||
|
)
|
||||||
res = res.json()
|
res = res.json()
|
||||||
if "error" in res:
|
if "error" in res:
|
||||||
return redirect("/?error=%s" % res["error_description"])
|
return redirect("/?error=%s" % res["error_description"])
|
||||||
@ -101,21 +164,24 @@ def github_callback():
|
|||||||
try:
|
try:
|
||||||
avatar = download_img(user_info["avatar_url"])
|
avatar = download_img(user_info["avatar_url"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
stat_logger.exception(e)
|
logging.exception(e)
|
||||||
avatar = ""
|
avatar = ""
|
||||||
users = user_register(user_id, {
|
users = user_register(
|
||||||
"access_token": session["access_token"],
|
user_id,
|
||||||
"email": email_address,
|
{
|
||||||
"avatar": avatar,
|
"access_token": session["access_token"],
|
||||||
"nickname": user_info["login"],
|
"email": email_address,
|
||||||
"login_channel": "github",
|
"avatar": avatar,
|
||||||
"last_login_time": get_format_time(),
|
"nickname": user_info["login"],
|
||||||
"is_superuser": False,
|
"login_channel": "github",
|
||||||
})
|
"last_login_time": get_format_time(),
|
||||||
|
"is_superuser": False,
|
||||||
|
},
|
||||||
|
)
|
||||||
if not users:
|
if not users:
|
||||||
raise Exception(f'Fail to register {email_address}.')
|
raise Exception(f"Fail to register {email_address}.")
|
||||||
if len(users) > 1:
|
if len(users) > 1:
|
||||||
raise Exception(f'Same email: {email_address} exists!')
|
raise Exception(f"Same email: {email_address} exists!")
|
||||||
|
|
||||||
# Try to log in
|
# Try to log in
|
||||||
user = users[0]
|
user = users[0]
|
||||||
@ -123,7 +189,7 @@ def github_callback():
|
|||||||
return redirect("/?auth=%s" % user.get_id())
|
return redirect("/?auth=%s" % user.get_id())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
rollback_user_registration(user_id)
|
rollback_user_registration(user_id)
|
||||||
stat_logger.exception(e)
|
logging.exception(e)
|
||||||
return redirect("/?error=%s" % str(e))
|
return redirect("/?error=%s" % str(e))
|
||||||
|
|
||||||
# User has already registered, try to log in
|
# User has already registered, try to log in
|
||||||
@ -134,33 +200,59 @@ def github_callback():
|
|||||||
return redirect("/?auth=%s" % user.get_id())
|
return redirect("/?auth=%s" % user.get_id())
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/feishu_callback', methods=['GET'])
|
@manager.route("/feishu_callback", methods=["GET"]) # noqa: F821
|
||||||
def feishu_callback():
|
def feishu_callback():
|
||||||
|
"""
|
||||||
|
Feishu OAuth callback endpoint.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- OAuth
|
||||||
|
parameters:
|
||||||
|
- in: query
|
||||||
|
name: code
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: Authorization code from Feishu.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Authentication successful.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
import requests
|
import requests
|
||||||
app_access_token_res = requests.post(FEISHU_OAUTH.get("app_access_token_url"),
|
|
||||||
data=json.dumps({
|
app_access_token_res = requests.post(
|
||||||
"app_id": FEISHU_OAUTH.get("app_id"),
|
settings.FEISHU_OAUTH.get("app_access_token_url"),
|
||||||
"app_secret": FEISHU_OAUTH.get("app_secret")
|
data=json.dumps(
|
||||||
}),
|
{
|
||||||
headers={"Content-Type": "application/json; charset=utf-8"})
|
"app_id": settings.FEISHU_OAUTH.get("app_id"),
|
||||||
|
"app_secret": settings.FEISHU_OAUTH.get("app_secret"),
|
||||||
|
}
|
||||||
|
),
|
||||||
|
headers={"Content-Type": "application/json; charset=utf-8"},
|
||||||
|
)
|
||||||
app_access_token_res = app_access_token_res.json()
|
app_access_token_res = app_access_token_res.json()
|
||||||
if app_access_token_res['code'] != 0:
|
if app_access_token_res["code"] != 0:
|
||||||
return redirect("/?error=%s" % app_access_token_res)
|
return redirect("/?error=%s" % app_access_token_res)
|
||||||
|
|
||||||
res = requests.post(FEISHU_OAUTH.get("user_access_token_url"),
|
res = requests.post(
|
||||||
data=json.dumps({
|
settings.FEISHU_OAUTH.get("user_access_token_url"),
|
||||||
"grant_type": FEISHU_OAUTH.get("grant_type"),
|
data=json.dumps(
|
||||||
"code": request.args.get('code')
|
{
|
||||||
}),
|
"grant_type": settings.FEISHU_OAUTH.get("grant_type"),
|
||||||
headers={
|
"code": request.args.get("code"),
|
||||||
"Content-Type": "application/json; charset=utf-8",
|
}
|
||||||
'Authorization': f"Bearer {app_access_token_res['app_access_token']}"
|
),
|
||||||
})
|
headers={
|
||||||
|
"Content-Type": "application/json; charset=utf-8",
|
||||||
|
"Authorization": f"Bearer {app_access_token_res['app_access_token']}",
|
||||||
|
},
|
||||||
|
)
|
||||||
res = res.json()
|
res = res.json()
|
||||||
if res['code'] != 0:
|
if res["code"] != 0:
|
||||||
return redirect("/?error=%s" % res["message"])
|
return redirect("/?error=%s" % res["message"])
|
||||||
|
|
||||||
if "contact:user.email:readonly" not in res["data"]["scope"].split(" "):
|
if "contact:user.email:readonly" not in res["data"]["scope"].split():
|
||||||
return redirect("/?error=contact:user.email:readonly not in scope")
|
return redirect("/?error=contact:user.email:readonly not in scope")
|
||||||
session["access_token"] = res["data"]["access_token"]
|
session["access_token"] = res["data"]["access_token"]
|
||||||
session["access_token_from"] = "feishu"
|
session["access_token_from"] = "feishu"
|
||||||
@ -174,21 +266,24 @@ def feishu_callback():
|
|||||||
try:
|
try:
|
||||||
avatar = download_img(user_info["avatar_url"])
|
avatar = download_img(user_info["avatar_url"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
stat_logger.exception(e)
|
logging.exception(e)
|
||||||
avatar = ""
|
avatar = ""
|
||||||
users = user_register(user_id, {
|
users = user_register(
|
||||||
"access_token": session["access_token"],
|
user_id,
|
||||||
"email": email_address,
|
{
|
||||||
"avatar": avatar,
|
"access_token": session["access_token"],
|
||||||
"nickname": user_info["en_name"],
|
"email": email_address,
|
||||||
"login_channel": "feishu",
|
"avatar": avatar,
|
||||||
"last_login_time": get_format_time(),
|
"nickname": user_info["en_name"],
|
||||||
"is_superuser": False,
|
"login_channel": "feishu",
|
||||||
})
|
"last_login_time": get_format_time(),
|
||||||
|
"is_superuser": False,
|
||||||
|
},
|
||||||
|
)
|
||||||
if not users:
|
if not users:
|
||||||
raise Exception(f'Fail to register {email_address}.')
|
raise Exception(f"Fail to register {email_address}.")
|
||||||
if len(users) > 1:
|
if len(users) > 1:
|
||||||
raise Exception(f'Same email: {email_address} exists!')
|
raise Exception(f"Same email: {email_address} exists!")
|
||||||
|
|
||||||
# Try to log in
|
# Try to log in
|
||||||
user = users[0]
|
user = users[0]
|
||||||
@ -196,7 +291,7 @@ def feishu_callback():
|
|||||||
return redirect("/?auth=%s" % user.get_id())
|
return redirect("/?auth=%s" % user.get_id())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
rollback_user_registration(user_id)
|
rollback_user_registration(user_id)
|
||||||
stat_logger.exception(e)
|
logging.exception(e)
|
||||||
return redirect("/?error=%s" % str(e))
|
return redirect("/?error=%s" % str(e))
|
||||||
|
|
||||||
# User has already registered, try to log in
|
# User has already registered, try to log in
|
||||||
@ -209,11 +304,14 @@ def feishu_callback():
|
|||||||
|
|
||||||
def user_info_from_feishu(access_token):
|
def user_info_from_feishu(access_token):
|
||||||
import requests
|
import requests
|
||||||
headers = {"Content-Type": "application/json; charset=utf-8",
|
|
||||||
'Authorization': f"Bearer {access_token}"}
|
headers = {
|
||||||
|
"Content-Type": "application/json; charset=utf-8",
|
||||||
|
"Authorization": f"Bearer {access_token}",
|
||||||
|
}
|
||||||
res = requests.get(
|
res = requests.get(
|
||||||
f"https://open.feishu.cn/open-apis/authen/v1/user_info",
|
"https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers
|
||||||
headers=headers)
|
)
|
||||||
user_info = res.json()["data"]
|
user_info = res.json()["data"]
|
||||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||||
return user_info
|
return user_info
|
||||||
@ -221,46 +319,103 @@ def user_info_from_feishu(access_token):
|
|||||||
|
|
||||||
def user_info_from_github(access_token):
|
def user_info_from_github(access_token):
|
||||||
import requests
|
import requests
|
||||||
headers = {"Accept": "application/json",
|
|
||||||
'Authorization': f"token {access_token}"}
|
headers = {"Accept": "application/json", "Authorization": f"token {access_token}"}
|
||||||
res = requests.get(
|
res = requests.get(
|
||||||
f"https://api.github.com/user?access_token={access_token}",
|
f"https://api.github.com/user?access_token={access_token}", headers=headers
|
||||||
headers=headers)
|
)
|
||||||
user_info = res.json()
|
user_info = res.json()
|
||||||
email_info = requests.get(
|
email_info = requests.get(
|
||||||
f"https://api.github.com/user/emails?access_token={access_token}",
|
f"https://api.github.com/user/emails?access_token={access_token}",
|
||||||
headers=headers).json()
|
headers=headers,
|
||||||
|
).json()
|
||||||
user_info["email"] = next(
|
user_info["email"] = next(
|
||||||
(email for email in email_info if email['primary'] == True),
|
(email for email in email_info if email["primary"]), None
|
||||||
None)["email"]
|
)["email"]
|
||||||
return user_info
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/logout", methods=['GET'])
|
@manager.route("/logout", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def log_out():
|
def log_out():
|
||||||
|
"""
|
||||||
|
User logout endpoint.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- User
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Logout successful.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
current_user.access_token = ""
|
current_user.access_token = ""
|
||||||
current_user.save()
|
current_user.save()
|
||||||
logout_user()
|
logout_user()
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/setting", methods=["POST"])
|
@manager.route("/setting", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def setting_user():
|
def setting_user():
|
||||||
|
"""
|
||||||
|
Update user settings.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- User
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
parameters:
|
||||||
|
- in: body
|
||||||
|
name: body
|
||||||
|
description: User settings to update.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
nickname:
|
||||||
|
type: string
|
||||||
|
description: New nickname.
|
||||||
|
email:
|
||||||
|
type: string
|
||||||
|
description: New email.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Settings updated successfully.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
update_dict = {}
|
update_dict = {}
|
||||||
request_data = request.json
|
request_data = request.json
|
||||||
if request_data.get("password"):
|
if request_data.get("password"):
|
||||||
new_password = request_data.get("new_password")
|
new_password = request_data.get("new_password")
|
||||||
if not check_password_hash(
|
if not check_password_hash(
|
||||||
current_user.password, decrypt(request_data["password"])):
|
current_user.password, decrypt(request_data["password"])
|
||||||
return get_json_result(data=False, retcode=RetCode.AUTHENTICATION_ERROR, retmsg='Password error!')
|
):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||||
|
message="Password error!",
|
||||||
|
)
|
||||||
|
|
||||||
if new_password:
|
if new_password:
|
||||||
update_dict["password"] = generate_password_hash(decrypt(new_password))
|
update_dict["password"] = generate_password_hash(decrypt(new_password))
|
||||||
|
|
||||||
for k in request_data.keys():
|
for k in request_data.keys():
|
||||||
if k in ["password", "new_password"]:
|
if k in [
|
||||||
|
"password",
|
||||||
|
"new_password",
|
||||||
|
"email",
|
||||||
|
"status",
|
||||||
|
"is_superuser",
|
||||||
|
"login_channel",
|
||||||
|
"is_anonymous",
|
||||||
|
"is_active",
|
||||||
|
"is_authenticated",
|
||||||
|
"last_login_time",
|
||||||
|
]:
|
||||||
continue
|
continue
|
||||||
update_dict[k] = request_data[k]
|
update_dict[k] = request_data[k]
|
||||||
|
|
||||||
@ -268,34 +423,59 @@ def setting_user():
|
|||||||
UserService.update_by_id(current_user.id, update_dict)
|
UserService.update_by_id(current_user.id, update_dict)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
stat_logger.exception(e)
|
logging.exception(e)
|
||||||
return get_json_result(data=False, retmsg='Update failure!', retcode=RetCode.EXCEPTION_ERROR)
|
return get_json_result(
|
||||||
|
data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/info", methods=["GET"])
|
@manager.route("/info", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def user_profile():
|
def user_profile():
|
||||||
|
"""
|
||||||
|
Get user profile information.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- User
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: User profile retrieved successfully.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
id:
|
||||||
|
type: string
|
||||||
|
description: User ID.
|
||||||
|
nickname:
|
||||||
|
type: string
|
||||||
|
description: User nickname.
|
||||||
|
email:
|
||||||
|
type: string
|
||||||
|
description: User email.
|
||||||
|
"""
|
||||||
return get_json_result(data=current_user.to_dict())
|
return get_json_result(data=current_user.to_dict())
|
||||||
|
|
||||||
|
|
||||||
def rollback_user_registration(user_id):
|
def rollback_user_registration(user_id):
|
||||||
try:
|
try:
|
||||||
UserService.delete_by_id(user_id)
|
UserService.delete_by_id(user_id)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
TenantService.delete_by_id(user_id)
|
TenantService.delete_by_id(user_id)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
u = UserTenantService.query(tenant_id=user_id)
|
u = UserTenantService.query(tenant_id=user_id)
|
||||||
if u:
|
if u:
|
||||||
UserTenantService.delete_by_id(u[0].id)
|
UserTenantService.delete_by_id(u[0].id)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -304,18 +484,18 @@ def user_register(user_id, user):
|
|||||||
tenant = {
|
tenant = {
|
||||||
"id": user_id,
|
"id": user_id,
|
||||||
"name": user["nickname"] + "‘s Kingdom",
|
"name": user["nickname"] + "‘s Kingdom",
|
||||||
"llm_id": CHAT_MDL,
|
"llm_id": settings.CHAT_MDL,
|
||||||
"embd_id": EMBEDDING_MDL,
|
"embd_id": settings.EMBEDDING_MDL,
|
||||||
"asr_id": ASR_MDL,
|
"asr_id": settings.ASR_MDL,
|
||||||
"parser_ids": PARSERS,
|
"parser_ids": settings.PARSERS,
|
||||||
"img2txt_id": IMAGE2TEXT_MDL,
|
"img2txt_id": settings.IMAGE2TEXT_MDL,
|
||||||
"rerank_id": RERANK_MDL
|
"rerank_id": settings.RERANK_MDL,
|
||||||
}
|
}
|
||||||
usr_tenant = {
|
usr_tenant = {
|
||||||
"tenant_id": user_id,
|
"tenant_id": user_id,
|
||||||
"user_id": user_id,
|
"user_id": user_id,
|
||||||
"invited_by": user_id,
|
"invited_by": user_id,
|
||||||
"role": UserTenantRole.OWNER
|
"role": UserTenantRole.OWNER,
|
||||||
}
|
}
|
||||||
file_id = get_uuid()
|
file_id = get_uuid()
|
||||||
file = {
|
file = {
|
||||||
@ -329,14 +509,18 @@ def user_register(user_id, user):
|
|||||||
"location": "",
|
"location": "",
|
||||||
}
|
}
|
||||||
tenant_llm = []
|
tenant_llm = []
|
||||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
for llm in LLMService.query(fid=settings.LLM_FACTORY):
|
||||||
tenant_llm.append({"tenant_id": user_id,
|
tenant_llm.append(
|
||||||
"llm_factory": LLM_FACTORY,
|
{
|
||||||
"llm_name": llm.llm_name,
|
"tenant_id": user_id,
|
||||||
"model_type": llm.model_type,
|
"llm_factory": settings.LLM_FACTORY,
|
||||||
"api_key": API_KEY,
|
"llm_name": llm.llm_name,
|
||||||
"api_base": LLM_BASE_URL
|
"model_type": llm.model_type,
|
||||||
})
|
"api_key": settings.API_KEY,
|
||||||
|
"api_base": settings.LLM_BASE_URL,
|
||||||
|
"max_tokens": llm.max_tokens if llm.max_tokens else 8192
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
if not UserService.save(**user):
|
if not UserService.save(**user):
|
||||||
return
|
return
|
||||||
@ -347,24 +531,55 @@ def user_register(user_id, user):
|
|||||||
return UserService.query(email=user["email"])
|
return UserService.query(email=user["email"])
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/register", methods=["POST"])
|
@manager.route("/register", methods=["POST"]) # noqa: F821
|
||||||
@validate_request("nickname", "email", "password")
|
@validate_request("nickname", "email", "password")
|
||||||
def user_add():
|
def user_add():
|
||||||
|
"""
|
||||||
|
Register a new user.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- User
|
||||||
|
parameters:
|
||||||
|
- in: body
|
||||||
|
name: body
|
||||||
|
description: Registration details.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
nickname:
|
||||||
|
type: string
|
||||||
|
description: User nickname.
|
||||||
|
email:
|
||||||
|
type: string
|
||||||
|
description: User email.
|
||||||
|
password:
|
||||||
|
type: string
|
||||||
|
description: User password.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Registration successful.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
req = request.json
|
req = request.json
|
||||||
email_address = req["email"]
|
email_address = req["email"]
|
||||||
|
|
||||||
# Validate the email address
|
# Validate the email address
|
||||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,4}$", email_address):
|
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,5}$", email_address):
|
||||||
return get_json_result(data=False,
|
return get_json_result(
|
||||||
retmsg=f'Invalid email address: {email_address}!',
|
data=False,
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
message=f"Invalid email address: {email_address}!",
|
||||||
|
code=settings.RetCode.OPERATING_ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
# Check if the email address is already used
|
# Check if the email address is already used
|
||||||
if UserService.query(email=email_address):
|
if UserService.query(email=email_address):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
retmsg=f'Email: {email_address} has already registered!',
|
message=f"Email: {email_address} has already registered!",
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
# Construct user info data
|
# Construct user info data
|
||||||
nickname = req["nickname"]
|
nickname = req["nickname"]
|
||||||
@ -382,40 +597,107 @@ def user_add():
|
|||||||
try:
|
try:
|
||||||
users = user_register(user_id, user_dict)
|
users = user_register(user_id, user_dict)
|
||||||
if not users:
|
if not users:
|
||||||
raise Exception(f'Fail to register {email_address}.')
|
raise Exception(f"Fail to register {email_address}.")
|
||||||
if len(users) > 1:
|
if len(users) > 1:
|
||||||
raise Exception(f'Same email: {email_address} exists!')
|
raise Exception(f"Same email: {email_address} exists!")
|
||||||
user = users[0]
|
user = users[0]
|
||||||
login_user(user)
|
login_user(user)
|
||||||
return construct_response(data=user.to_json(),
|
return construct_response(
|
||||||
auth=user.get_id(),
|
data=user.to_json(),
|
||||||
retmsg=f"{nickname}, welcome aboard!")
|
auth=user.get_id(),
|
||||||
|
message=f"{nickname}, welcome aboard!",
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
rollback_user_registration(user_id)
|
rollback_user_registration(user_id)
|
||||||
stat_logger.exception(e)
|
logging.exception(e)
|
||||||
return get_json_result(data=False,
|
return get_json_result(
|
||||||
retmsg=f'User registration failure, error: {str(e)}',
|
data=False,
|
||||||
retcode=RetCode.EXCEPTION_ERROR)
|
message=f"User registration failure, error: {str(e)}",
|
||||||
|
code=settings.RetCode.EXCEPTION_ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/tenant_info", methods=["GET"])
|
@manager.route("/tenant_info", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def tenant_info():
|
def tenant_info():
|
||||||
|
"""
|
||||||
|
Get tenant information.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- Tenant
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Tenant information retrieved successfully.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
tenant_id:
|
||||||
|
type: string
|
||||||
|
description: Tenant ID.
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: Tenant name.
|
||||||
|
llm_id:
|
||||||
|
type: string
|
||||||
|
description: LLM ID.
|
||||||
|
embd_id:
|
||||||
|
type: string
|
||||||
|
description: Embedding model ID.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
tenants = TenantService.get_by_user_id(current_user.id)[0]
|
tenants = TenantService.get_info_by(current_user.id)
|
||||||
return get_json_result(data=tenants)
|
if not tenants:
|
||||||
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
return get_json_result(data=tenants[0])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/set_tenant_info", methods=["POST"])
|
@manager.route("/set_tenant_info", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
||||||
def set_tenant_info():
|
def set_tenant_info():
|
||||||
|
"""
|
||||||
|
Update tenant information.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- Tenant
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
parameters:
|
||||||
|
- in: body
|
||||||
|
name: body
|
||||||
|
description: Tenant information to update.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
tenant_id:
|
||||||
|
type: string
|
||||||
|
description: Tenant ID.
|
||||||
|
llm_id:
|
||||||
|
type: string
|
||||||
|
description: LLM ID.
|
||||||
|
embd_id:
|
||||||
|
type: string
|
||||||
|
description: Embedding model ID.
|
||||||
|
asr_id:
|
||||||
|
type: string
|
||||||
|
description: ASR model ID.
|
||||||
|
img2txt_id:
|
||||||
|
type: string
|
||||||
|
description: Image to Text model ID.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Tenant information updated successfully.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
req = request.json
|
req = request.json
|
||||||
try:
|
try:
|
||||||
tid = req["tenant_id"]
|
tid = req.pop("tenant_id")
|
||||||
del req["tenant_id"]
|
|
||||||
TenantService.update_by_id(tid, req)
|
TenantService.update_by_id(tid, req)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@ -13,4 +13,15 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
NAME_LENGTH_LIMIT = 2 ** 10
|
NAME_LENGTH_LIMIT = 2 ** 10
|
||||||
|
|
||||||
|
IMG_BASE64_PREFIX = 'data:image/png;base64,'
|
||||||
|
|
||||||
|
SERVICE_CONF = "service_conf.yaml"
|
||||||
|
|
||||||
|
API_VERSION = "v1"
|
||||||
|
RAG_FLOW_SERVICE_NAME = "ragflow"
|
||||||
|
REQUEST_WAIT_SEC = 2
|
||||||
|
REQUEST_MAX_WAIT_SEC = 300
|
||||||
|
|
||||||
|
DATASET_NAME_LIMIT = 128
|
||||||
@ -27,6 +27,7 @@ class UserTenantRole(StrEnum):
|
|||||||
OWNER = 'owner'
|
OWNER = 'owner'
|
||||||
ADMIN = 'admin'
|
ADMIN = 'admin'
|
||||||
NORMAL = 'normal'
|
NORMAL = 'normal'
|
||||||
|
INVITE = 'invite'
|
||||||
|
|
||||||
|
|
||||||
class TenantPermission(StrEnum):
|
class TenantPermission(StrEnum):
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import inspect
|
import inspect
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -29,12 +30,11 @@ from peewee import (
|
|||||||
Field, Model, Metadata
|
Field, Model, Metadata
|
||||||
)
|
)
|
||||||
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase
|
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase
|
||||||
from api.db import SerializedType, ParserType
|
|
||||||
from api.settings import DATABASE, stat_logger, SECRET_KEY, DATABASE_TYPE
|
|
||||||
from api.utils.log_utils import getLogger
|
|
||||||
from api import utils
|
|
||||||
|
|
||||||
LOGGER = getLogger()
|
|
||||||
|
from api.db import SerializedType, ParserType
|
||||||
|
from api import settings
|
||||||
|
from api import utils
|
||||||
|
|
||||||
|
|
||||||
def singleton(cls, *args, **kw):
|
def singleton(cls, *args, **kw):
|
||||||
@ -65,7 +65,7 @@ class TextFieldType(Enum):
|
|||||||
|
|
||||||
|
|
||||||
class LongTextField(TextField):
|
class LongTextField(TextField):
|
||||||
field_type = TextFieldType[DATABASE_TYPE.upper()].value
|
field_type = TextFieldType[settings.DATABASE_TYPE.upper()].value
|
||||||
|
|
||||||
|
|
||||||
class JSONField(LongTextField):
|
class JSONField(LongTextField):
|
||||||
@ -130,7 +130,7 @@ def is_continuous_field(cls: typing.Type) -> bool:
|
|||||||
for p in cls.__bases__:
|
for p in cls.__bases__:
|
||||||
if p in CONTINUOUS_FIELD_TYPE:
|
if p in CONTINUOUS_FIELD_TYPE:
|
||||||
return True
|
return True
|
||||||
elif p != Field and p != object:
|
elif p is not Field and p is not object:
|
||||||
if is_continuous_field(p):
|
if is_continuous_field(p):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
@ -272,6 +272,7 @@ class JsonSerializedField(SerializedField):
|
|||||||
super(JsonSerializedField, self).__init__(serialized_type=SerializedType.JSON, object_hook=object_hook,
|
super(JsonSerializedField, self).__init__(serialized_type=SerializedType.JSON, object_hook=object_hook,
|
||||||
object_pairs_hook=object_pairs_hook, **kwargs)
|
object_pairs_hook=object_pairs_hook, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class PooledDatabase(Enum):
|
class PooledDatabase(Enum):
|
||||||
MYSQL = PooledMySQLDatabase
|
MYSQL = PooledMySQLDatabase
|
||||||
POSTGRES = PooledPostgresqlDatabase
|
POSTGRES = PooledPostgresqlDatabase
|
||||||
@ -285,10 +286,11 @@ class DatabaseMigrator(Enum):
|
|||||||
@singleton
|
@singleton
|
||||||
class BaseDataBase:
|
class BaseDataBase:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
database_config = DATABASE.copy()
|
database_config = settings.DATABASE.copy()
|
||||||
db_name = database_config.pop("name")
|
db_name = database_config.pop("name")
|
||||||
self.database_connection = PooledDatabase[DATABASE_TYPE.upper()].value(db_name, **database_config)
|
self.database_connection = PooledDatabase[settings.DATABASE_TYPE.upper()].value(db_name, **database_config)
|
||||||
stat_logger.info('init database on cluster mode successfully')
|
logging.info('init database on cluster mode successfully')
|
||||||
|
|
||||||
|
|
||||||
class PostgresDatabaseLock:
|
class PostgresDatabaseLock:
|
||||||
def __init__(self, lock_name, timeout=10, db=None):
|
def __init__(self, lock_name, timeout=10, db=None):
|
||||||
@ -334,6 +336,7 @@ class PostgresDatabaseLock:
|
|||||||
|
|
||||||
return magic
|
return magic
|
||||||
|
|
||||||
|
|
||||||
class MysqlDatabaseLock:
|
class MysqlDatabaseLock:
|
||||||
def __init__(self, lock_name, timeout=10, db=None):
|
def __init__(self, lock_name, timeout=10, db=None):
|
||||||
self.lock_name = lock_name
|
self.lock_name = lock_name
|
||||||
@ -388,7 +391,7 @@ class DatabaseLock(Enum):
|
|||||||
|
|
||||||
|
|
||||||
DB = BaseDataBase().database_connection
|
DB = BaseDataBase().database_connection
|
||||||
DB.lock = DatabaseLock[DATABASE_TYPE.upper()].value
|
DB.lock = DatabaseLock[settings.DATABASE_TYPE.upper()].value
|
||||||
|
|
||||||
|
|
||||||
def close_connection():
|
def close_connection():
|
||||||
@ -396,7 +399,7 @@ def close_connection():
|
|||||||
if DB:
|
if DB:
|
||||||
DB.close_stale(age=30)
|
DB.close_stale(age=30)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOGGER.exception(e)
|
logging.exception(e)
|
||||||
|
|
||||||
|
|
||||||
class DataBaseModel(BaseModel):
|
class DataBaseModel(BaseModel):
|
||||||
@ -412,15 +415,15 @@ def init_database_tables(alter_fields=[]):
|
|||||||
for name, obj in members:
|
for name, obj in members:
|
||||||
if obj != DataBaseModel and issubclass(obj, DataBaseModel):
|
if obj != DataBaseModel and issubclass(obj, DataBaseModel):
|
||||||
table_objs.append(obj)
|
table_objs.append(obj)
|
||||||
LOGGER.info(f"start create table {obj.__name__}")
|
logging.debug(f"start create table {obj.__name__}")
|
||||||
try:
|
try:
|
||||||
obj.create_table()
|
obj.create_table()
|
||||||
LOGGER.info(f"create table success: {obj.__name__}")
|
logging.debug(f"create table success: {obj.__name__}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOGGER.exception(e)
|
logging.exception(e)
|
||||||
create_failed_list.append(obj.__name__)
|
create_failed_list.append(obj.__name__)
|
||||||
if create_failed_list:
|
if create_failed_list:
|
||||||
LOGGER.info(f"create tables failed: {create_failed_list}")
|
logging.error(f"create tables failed: {create_failed_list}")
|
||||||
raise Exception(f"create tables failed: {create_failed_list}")
|
raise Exception(f"create tables failed: {create_failed_list}")
|
||||||
migrate_db()
|
migrate_db()
|
||||||
|
|
||||||
@ -470,7 +473,7 @@ class User(DataBaseModel, UserMixin):
|
|||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
is_superuser = BooleanField(null=True, help_text="is root", default=False, index=True)
|
is_superuser = BooleanField(null=True, help_text="is root", default=False, index=True)
|
||||||
@ -479,7 +482,7 @@ class User(DataBaseModel, UserMixin):
|
|||||||
return self.email
|
return self.email
|
||||||
|
|
||||||
def get_id(self):
|
def get_id(self):
|
||||||
jwt = Serializer(secret_key=SECRET_KEY)
|
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||||
return jwt.dumps(str(self.access_token))
|
return jwt.dumps(str(self.access_token))
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
@ -525,7 +528,7 @@ class Tenant(DataBaseModel):
|
|||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
|
|
||||||
@ -542,7 +545,7 @@ class UserTenant(DataBaseModel):
|
|||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
|
|
||||||
@ -559,7 +562,7 @@ class InvitationCode(DataBaseModel):
|
|||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
|
|
||||||
@ -582,7 +585,7 @@ class LLMFactories(DataBaseModel):
|
|||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
|
|
||||||
@ -616,7 +619,7 @@ class LLM(DataBaseModel):
|
|||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
|
|
||||||
@ -648,7 +651,7 @@ class TenantLLM(DataBaseModel):
|
|||||||
index=True)
|
index=True)
|
||||||
api_key = CharField(max_length=1024, null=True, help_text="API KEY", index=True)
|
api_key = CharField(max_length=1024, null=True, help_text="API KEY", index=True)
|
||||||
api_base = CharField(max_length=255, null=True, help_text="API Base")
|
api_base = CharField(max_length=255, null=True, help_text="API Base")
|
||||||
|
max_tokens = IntegerField(default=8192, index=True)
|
||||||
used_tokens = IntegerField(default=0, index=True)
|
used_tokens = IntegerField(default=0, index=True)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -700,10 +703,11 @@ class Knowledgebase(DataBaseModel):
|
|||||||
default=ParserType.NAIVE.value,
|
default=ParserType.NAIVE.value,
|
||||||
index=True)
|
index=True)
|
||||||
parser_config = JSONField(null=False, default={"pages": [[1, 1000000]]})
|
parser_config = JSONField(null=False, default={"pages": [[1, 1000000]]})
|
||||||
|
pagerank = IntegerField(default=0, index=False)
|
||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
|
|
||||||
@ -767,7 +771,7 @@ class Document(DataBaseModel):
|
|||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
|
|
||||||
@ -840,7 +844,7 @@ class Task(DataBaseModel):
|
|||||||
doc_id = CharField(max_length=32, null=False, index=True)
|
doc_id = CharField(max_length=32, null=False, index=True)
|
||||||
from_page = IntegerField(default=0)
|
from_page = IntegerField(default=0)
|
||||||
|
|
||||||
to_page = IntegerField(default=-1)
|
to_page = IntegerField(default=100000000)
|
||||||
|
|
||||||
begin_at = DateTimeField(null=True, index=True)
|
begin_at = DateTimeField(null=True, index=True)
|
||||||
process_duation = FloatField(default=0)
|
process_duation = FloatField(default=0)
|
||||||
@ -851,6 +855,8 @@ class Task(DataBaseModel):
|
|||||||
help_text="process message",
|
help_text="process message",
|
||||||
default="")
|
default="")
|
||||||
retry_count = IntegerField(default=0)
|
retry_count = IntegerField(default=0)
|
||||||
|
digest = TextField(null=True, help_text="task digest", default="")
|
||||||
|
chunk_ids = LongTextField(null=True, help_text="chunk ids", default="")
|
||||||
|
|
||||||
|
|
||||||
class Dialog(DataBaseModel):
|
class Dialog(DataBaseModel):
|
||||||
@ -879,8 +885,10 @@ class Dialog(DataBaseModel):
|
|||||||
default="simple",
|
default="simple",
|
||||||
help_text="simple|advanced",
|
help_text="simple|advanced",
|
||||||
index=True)
|
index=True)
|
||||||
prompt_config = JSONField(null=False, default={"system": "", "prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
prompt_config = JSONField(null=False,
|
||||||
"parameters": [], "empty_response": "Sorry! 知识库中未找到相关内容!"})
|
default={"system": "", "prologue": "Hi! I'm your assistant, what can I do for you?",
|
||||||
|
"parameters": [],
|
||||||
|
"empty_response": "Sorry! No relevant content was found in the knowledge base!"})
|
||||||
|
|
||||||
similarity_threshold = FloatField(default=0.2)
|
similarity_threshold = FloatField(default=0.2)
|
||||||
vector_similarity_weight = FloatField(default=0.3)
|
vector_similarity_weight = FloatField(default=0.3)
|
||||||
@ -894,7 +902,7 @@ class Dialog(DataBaseModel):
|
|||||||
null=False,
|
null=False,
|
||||||
default="1",
|
default="1",
|
||||||
help_text="it needs to insert reference index into answer or not")
|
help_text="it needs to insert reference index into answer or not")
|
||||||
|
|
||||||
rerank_id = CharField(
|
rerank_id = CharField(
|
||||||
max_length=128,
|
max_length=128,
|
||||||
null=False,
|
null=False,
|
||||||
@ -904,7 +912,7 @@ class Dialog(DataBaseModel):
|
|||||||
status = CharField(
|
status = CharField(
|
||||||
max_length=1,
|
max_length=1,
|
||||||
null=True,
|
null=True,
|
||||||
help_text="is it validate(0: wasted,1: validate)",
|
help_text="is it validate(0: wasted, 1: validate)",
|
||||||
default="1",
|
default="1",
|
||||||
index=True)
|
index=True)
|
||||||
|
|
||||||
@ -928,6 +936,7 @@ class APIToken(DataBaseModel):
|
|||||||
token = CharField(max_length=255, null=False, index=True)
|
token = CharField(max_length=255, null=False, index=True)
|
||||||
dialog_id = CharField(max_length=32, null=False, index=True)
|
dialog_id = CharField(max_length=32, null=False, index=True)
|
||||||
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
||||||
|
beta = CharField(max_length=255, null=True, index=True)
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
db_table = "api_token"
|
db_table = "api_token"
|
||||||
@ -942,7 +951,7 @@ class API4Conversation(DataBaseModel):
|
|||||||
reference = JSONField(null=True, default=[])
|
reference = JSONField(null=True, default=[])
|
||||||
tokens = IntegerField(default=0)
|
tokens = IntegerField(default=0)
|
||||||
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
||||||
|
dsl = JSONField(null=True, default={})
|
||||||
duration = FloatField(default=0, index=True)
|
duration = FloatField(default=0, index=True)
|
||||||
round = IntegerField(default=0, index=True)
|
round = IntegerField(default=0, index=True)
|
||||||
thumb_up = IntegerField(default=0, index=True)
|
thumb_up = IntegerField(default=0, index=True)
|
||||||
@ -980,14 +989,14 @@ class CanvasTemplate(DataBaseModel):
|
|||||||
|
|
||||||
def migrate_db():
|
def migrate_db():
|
||||||
with DB.transaction():
|
with DB.transaction():
|
||||||
migrator = DatabaseMigrator[DATABASE_TYPE.upper()].value(DB)
|
migrator = DatabaseMigrator[settings.DATABASE_TYPE.upper()].value(DB)
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
migrator.add_column('file', 'source_type', CharField(max_length=128, null=False, default="",
|
migrator.add_column('file', 'source_type', CharField(max_length=128, null=False, default="",
|
||||||
help_text="where dose this document come from",
|
help_text="where dose this document come from",
|
||||||
index=True))
|
index=True))
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
@ -996,7 +1005,7 @@ def migrate_db():
|
|||||||
help_text="default rerank model ID"))
|
help_text="default rerank model ID"))
|
||||||
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
@ -1004,52 +1013,95 @@ def migrate_db():
|
|||||||
help_text="default rerank model ID"))
|
help_text="default rerank model ID"))
|
||||||
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
migrator.add_column('dialog', 'top_k', IntegerField(default=1024))
|
migrator.add_column('dialog', 'top_k', IntegerField(default=1024))
|
||||||
|
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
migrator.alter_column_type('tenant_llm', 'api_key',
|
migrator.alter_column_type('tenant_llm', 'api_key',
|
||||||
CharField(max_length=1024, null=True, help_text="API KEY", index=True))
|
CharField(max_length=1024, null=True, help_text="API KEY", index=True))
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
migrator.add_column('api_token', 'source',
|
migrator.add_column('api_token', 'source',
|
||||||
CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True))
|
CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True))
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
migrator.add_column("tenant","tts_id",
|
migrator.add_column("tenant", "tts_id",
|
||||||
CharField(max_length=256,null=True,help_text="default tts model ID",index=True))
|
CharField(max_length=256, null=True, help_text="default tts model ID", index=True))
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
migrator.add_column('api_4_conversation', 'source',
|
migrator.add_column('api_4_conversation', 'source',
|
||||||
CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True))
|
CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True))
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
DB.execute_sql('ALTER TABLE llm DROP PRIMARY KEY;')
|
DB.execute_sql('ALTER TABLE llm DROP PRIMARY KEY;')
|
||||||
DB.execute_sql('ALTER TABLE llm ADD PRIMARY KEY (llm_name,fid);')
|
DB.execute_sql('ALTER TABLE llm ADD PRIMARY KEY (llm_name,fid);')
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
migrate(
|
migrate(
|
||||||
migrator.add_column('task', 'retry_count', IntegerField(default=0))
|
migrator.add_column('task', 'retry_count', IntegerField(default=0))
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
migrate(
|
||||||
|
migrator.alter_column_type('api_token', 'dialog_id',
|
||||||
|
CharField(max_length=32, null=True, index=True))
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
migrate(
|
||||||
|
migrator.add_column("tenant_llm","max_tokens",IntegerField(default=8192,index=True))
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
migrate(
|
||||||
|
migrator.add_column("api_4_conversation","dsl",JSONField(null=True, default={}))
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
migrate(
|
||||||
|
migrator.add_column("knowledgebase", "pagerank", IntegerField(default=0, index=False))
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
migrate(
|
||||||
|
migrator.add_column("api_token", "beta", CharField(max_length=255, null=True, index=True))
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
migrate(
|
||||||
|
migrator.add_column("task", "digest", TextField(null=True, help_text="task digest", default=""))
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
migrate(
|
||||||
|
migrator.add_column("task", "chunk_ids", LongTextField(null=True, help_text="chunk ids", default=""))
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|||||||
@ -15,19 +15,12 @@
|
|||||||
#
|
#
|
||||||
import operator
|
import operator
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from typing import Dict, Type, Union
|
|
||||||
|
|
||||||
from playhouse.pool import PooledMySQLDatabase
|
from playhouse.pool import PooledMySQLDatabase
|
||||||
|
|
||||||
from api.utils import current_timestamp, timestamp_to_date
|
from api.utils import current_timestamp, timestamp_to_date
|
||||||
|
|
||||||
from api.db.db_models import DB, DataBaseModel
|
from api.db.db_models import DB, DataBaseModel
|
||||||
from api.db.runtime_config import RuntimeConfig
|
|
||||||
from api.utils.log_utils import getLogger
|
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
|
|
||||||
LOGGER = getLogger()
|
|
||||||
|
|
||||||
|
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
@ -93,7 +86,7 @@ supported_operators = {
|
|||||||
|
|
||||||
|
|
||||||
def query_dict2expression(
|
def query_dict2expression(
|
||||||
model: Type[DataBaseModel], query: Dict[str, Union[bool, int, str, list, tuple]]):
|
model: type[DataBaseModel], query: dict[str, bool | int | str | list | tuple]):
|
||||||
expression = []
|
expression = []
|
||||||
|
|
||||||
for field, value in query.items():
|
for field, value in query.items():
|
||||||
@ -111,8 +104,8 @@ def query_dict2expression(
|
|||||||
return reduce(operator.iand, expression)
|
return reduce(operator.iand, expression)
|
||||||
|
|
||||||
|
|
||||||
def query_db(model: Type[DataBaseModel], limit: int = 0, offset: int = 0,
|
def query_db(model: type[DataBaseModel], limit: int = 0, offset: int = 0,
|
||||||
query: dict = None, order_by: Union[str, list, tuple] = None):
|
query: dict = None, order_by: str | list | tuple | None = None):
|
||||||
data = model.select()
|
data = model.select()
|
||||||
if query:
|
if query:
|
||||||
data = data.where(query_dict2expression(model, query))
|
data = data.where(query_dict2expression(model, query))
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
@ -28,7 +29,7 @@ from api.db.services.document_service import DocumentService
|
|||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
|
from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
|
from api import settings
|
||||||
from api.utils.file_utils import get_project_base_directory
|
from api.utils.file_utils import get_project_base_directory
|
||||||
|
|
||||||
|
|
||||||
@ -50,11 +51,11 @@ def init_superuser():
|
|||||||
tenant = {
|
tenant = {
|
||||||
"id": user_info["id"],
|
"id": user_info["id"],
|
||||||
"name": user_info["nickname"] + "‘s Kingdom",
|
"name": user_info["nickname"] + "‘s Kingdom",
|
||||||
"llm_id": CHAT_MDL,
|
"llm_id": settings.CHAT_MDL,
|
||||||
"embd_id": EMBEDDING_MDL,
|
"embd_id": settings.EMBEDDING_MDL,
|
||||||
"asr_id": ASR_MDL,
|
"asr_id": settings.ASR_MDL,
|
||||||
"parser_ids": PARSERS,
|
"parser_ids": settings.PARSERS,
|
||||||
"img2txt_id": IMAGE2TEXT_MDL
|
"img2txt_id": settings.IMAGE2TEXT_MDL
|
||||||
}
|
}
|
||||||
usr_tenant = {
|
usr_tenant = {
|
||||||
"tenant_id": user_info["id"],
|
"tenant_id": user_info["id"],
|
||||||
@ -63,42 +64,43 @@ def init_superuser():
|
|||||||
"role": UserTenantRole.OWNER
|
"role": UserTenantRole.OWNER
|
||||||
}
|
}
|
||||||
tenant_llm = []
|
tenant_llm = []
|
||||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
for llm in LLMService.query(fid=settings.LLM_FACTORY):
|
||||||
tenant_llm.append(
|
tenant_llm.append(
|
||||||
{"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
|
{"tenant_id": user_info["id"], "llm_factory": settings.LLM_FACTORY, "llm_name": llm.llm_name,
|
||||||
"api_key": API_KEY, "api_base": LLM_BASE_URL})
|
"model_type": llm.model_type,
|
||||||
|
"api_key": settings.API_KEY, "api_base": settings.LLM_BASE_URL})
|
||||||
|
|
||||||
if not UserService.save(**user_info):
|
if not UserService.save(**user_info):
|
||||||
print("\033[93m【ERROR】\033[0mcan't init admin.")
|
logging.error("can't init admin.")
|
||||||
return
|
return
|
||||||
TenantService.insert(**tenant)
|
TenantService.insert(**tenant)
|
||||||
UserTenantService.insert(**usr_tenant)
|
UserTenantService.insert(**usr_tenant)
|
||||||
TenantLLMService.insert_many(tenant_llm)
|
TenantLLMService.insert_many(tenant_llm)
|
||||||
print(
|
logging.info(
|
||||||
"【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
|
"Super user initialized. email: admin@ragflow.io, password: admin. Changing the password after login is strongly recommended.")
|
||||||
|
|
||||||
chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
|
chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
|
||||||
msg = chat_mdl.chat(system="", history=[
|
msg = chat_mdl.chat(system="", history=[
|
||||||
{"role": "user", "content": "Hello!"}], gen_conf={})
|
{"role": "user", "content": "Hello!"}], gen_conf={})
|
||||||
if msg.find("ERROR: ") == 0:
|
if msg.find("ERROR: ") == 0:
|
||||||
print(
|
logging.error(
|
||||||
"\33[91m【ERROR】\33[0m: ",
|
|
||||||
"'{}' dosen't work. {}".format(
|
"'{}' dosen't work. {}".format(
|
||||||
tenant["llm_id"],
|
tenant["llm_id"],
|
||||||
msg))
|
msg))
|
||||||
embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
|
embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
|
||||||
v, c = embd_mdl.encode(["Hello!"])
|
v, c = embd_mdl.encode(["Hello!"])
|
||||||
if c == 0:
|
if c == 0:
|
||||||
print(
|
logging.error(
|
||||||
"\33[91m【ERROR】\33[0m:",
|
"'{}' dosen't work!".format(
|
||||||
" '{}' dosen't work!".format(
|
|
||||||
tenant["embd_id"]))
|
tenant["embd_id"]))
|
||||||
|
|
||||||
|
|
||||||
def init_llm_factory():
|
def init_llm_factory():
|
||||||
try:
|
try:
|
||||||
LLMService.filter_delete([(LLM.fid == "MiniMax" or LLM.fid == "Minimax")])
|
LLMService.filter_delete([(LLM.fid == "MiniMax" or LLM.fid == "Minimax")])
|
||||||
except Exception as e:
|
LLMService.filter_delete([(LLM.fid == "cohere")])
|
||||||
|
LLMFactoriesService.filter_delete([LLMFactories.name == "cohere"])
|
||||||
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
factory_llm_infos = json.load(
|
factory_llm_infos = json.load(
|
||||||
@ -111,14 +113,14 @@ def init_llm_factory():
|
|||||||
llm_infos = factory_llm_info.pop("llm")
|
llm_infos = factory_llm_info.pop("llm")
|
||||||
try:
|
try:
|
||||||
LLMFactoriesService.save(**factory_llm_info)
|
LLMFactoriesService.save(**factory_llm_info)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
LLMService.filter_delete([LLM.fid == factory_llm_info["name"]])
|
LLMService.filter_delete([LLM.fid == factory_llm_info["name"]])
|
||||||
for llm_info in llm_infos:
|
for llm_info in llm_infos:
|
||||||
llm_info["fid"] = factory_llm_info["name"]
|
llm_info["fid"] = factory_llm_info["name"]
|
||||||
try:
|
try:
|
||||||
LLMService.save(**llm_info)
|
LLMService.save(**llm_info)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
|
LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
|
||||||
@ -129,10 +131,11 @@ def init_llm_factory():
|
|||||||
LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
|
LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
|
||||||
LLMService.filter_delete([LLMService.model.fid == "QAnything"])
|
LLMService.filter_delete([LLMService.model.fid == "QAnything"])
|
||||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
||||||
|
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "cohere"], {"llm_factory": "Cohere"})
|
||||||
TenantService.filter_update([1 == 1], {
|
TenantService.filter_update([1 == 1], {
|
||||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
|
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
|
||||||
## insert openai two embedding models to the current openai user.
|
## insert openai two embedding models to the current openai user.
|
||||||
print("Start to insert 2 OpenAI embedding models...")
|
# print("Start to insert 2 OpenAI embedding models...")
|
||||||
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
||||||
for tid in tenant_ids:
|
for tid in tenant_ids:
|
||||||
for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
|
for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
|
||||||
@ -145,7 +148,7 @@ def init_llm_factory():
|
|||||||
row = deepcopy(row)
|
row = deepcopy(row)
|
||||||
row["llm_name"] = "text-embedding-3-large"
|
row["llm_name"] = "text-embedding-3-large"
|
||||||
TenantLLMService.save(**row)
|
TenantLLMService.save(**row)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
break
|
break
|
||||||
for kb_id in KnowledgebaseService.get_all_ids():
|
for kb_id in KnowledgebaseService.get_all_ids():
|
||||||
@ -167,22 +170,21 @@ def add_graph_templates():
|
|||||||
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
||||||
try:
|
try:
|
||||||
CanvasTemplateService.save(**cnvs)
|
CanvasTemplateService.save(**cnvs)
|
||||||
except:
|
except Exception:
|
||||||
CanvasTemplateService.update_by_id(cnvs["id"], cnvs)
|
CanvasTemplateService.update_by_id(cnvs["id"], cnvs)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
print("Add graph templates error: ", e)
|
logging.exception("Add graph templates error: ")
|
||||||
print("------------", flush=True)
|
|
||||||
|
|
||||||
|
|
||||||
def init_web_data():
|
def init_web_data():
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
init_llm_factory()
|
init_llm_factory()
|
||||||
#if not UserService.get_all().count():
|
# if not UserService.get_all().count():
|
||||||
# init_superuser()
|
# init_superuser()
|
||||||
|
|
||||||
add_graph_templates()
|
add_graph_templates()
|
||||||
print("init web data success:{}".format(time.time() - start_time))
|
logging.info("init web data success:{}".format(time.time() - start_time))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@ -1,21 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import operator
|
|
||||||
import time
|
|
||||||
import typing
|
|
||||||
from api.utils.log_utils import sql_logger
|
|
||||||
import peewee
|
|
||||||
@ -13,7 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from api.versions import get_versions
|
from api.versions import get_ragflow_version
|
||||||
from .reload_config_base import ReloadConfigBase
|
from .reload_config_base import ReloadConfigBase
|
||||||
|
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ class RuntimeConfig(ReloadConfigBase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def init_env(cls):
|
def init_env(cls):
|
||||||
cls.ENV.update(get_versions())
|
cls.ENV.update({"version": get_ragflow_version()})
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_config_manager(cls):
|
def load_config_manager(cls):
|
||||||
|
|||||||
@ -15,13 +15,14 @@
|
|||||||
#
|
#
|
||||||
import pathlib
|
import pathlib
|
||||||
import re
|
import re
|
||||||
from .user_service import UserService
|
from .user_service import UserService as UserService
|
||||||
|
|
||||||
|
|
||||||
def duplicate_name(query_func, **kwargs):
|
def duplicate_name(query_func, **kwargs):
|
||||||
fnm = kwargs["name"]
|
fnm = kwargs["name"]
|
||||||
objs = query_func(**kwargs)
|
objs = query_func(**kwargs)
|
||||||
if not objs: return fnm
|
if not objs:
|
||||||
|
return fnm
|
||||||
ext = pathlib.Path(fnm).suffix #.jpg
|
ext = pathlib.Path(fnm).suffix #.jpg
|
||||||
nm = re.sub(r"%s$"%ext, "", fnm)
|
nm = re.sub(r"%s$"%ext, "", fnm)
|
||||||
r = re.search(r"\(([0-9]+)\)$", nm)
|
r = re.search(r"\(([0-9]+)\)$", nm)
|
||||||
@ -31,8 +32,8 @@ def duplicate_name(query_func, **kwargs):
|
|||||||
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
||||||
c += 1
|
c += 1
|
||||||
nm = f"{nm}({c})"
|
nm = f"{nm}({c})"
|
||||||
if ext: nm += f"{ext}"
|
if ext:
|
||||||
|
nm += f"{ext}"
|
||||||
|
|
||||||
kwargs["name"] = nm
|
kwargs["name"] = nm
|
||||||
return duplicate_name(query_func, **kwargs)
|
return duplicate_name(query_func, **kwargs)
|
||||||
|
|
||||||
|
|||||||
@ -39,6 +39,22 @@ class APITokenService(CommonService):
|
|||||||
class API4ConversationService(CommonService):
|
class API4ConversationService(CommonService):
|
||||||
model = API4Conversation
|
model = API4Conversation
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_list(cls,dialog_id, tenant_id,
|
||||||
|
page_number, items_per_page, orderby, desc, id):
|
||||||
|
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
||||||
|
if id:
|
||||||
|
sessions = sessions.where(cls.model.id == id)
|
||||||
|
if desc:
|
||||||
|
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||||
|
else:
|
||||||
|
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||||
|
sessions = sessions.where(cls.model.user_id == tenant_id)
|
||||||
|
sessions = sessions.paginate(page_number, items_per_page)
|
||||||
|
|
||||||
|
return list(sessions.dicts())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def append_message(cls, id, conversation):
|
def append_message(cls, id, conversation):
|
||||||
@ -48,7 +64,8 @@ class API4ConversationService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def stats(cls, tenant_id, from_date, to_date, source=None):
|
def stats(cls, tenant_id, from_date, to_date, source=None):
|
||||||
if len(to_date) == 10: to_date += " 23:59:59"
|
if len(to_date) == 10:
|
||||||
|
to_date += " 23:59:59"
|
||||||
return cls.model.select(
|
return cls.model.select(
|
||||||
cls.model.create_date.truncate("day").alias("dt"),
|
cls.model.create_date.truncate("day").alias("dt"),
|
||||||
peewee.fn.COUNT(
|
peewee.fn.COUNT(
|
||||||
|
|||||||
@ -13,14 +13,143 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from datetime import datetime
|
import json
|
||||||
import peewee
|
import traceback
|
||||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog, CanvasTemplate, UserCanvas
|
from uuid import uuid4
|
||||||
|
from agent.canvas import Canvas
|
||||||
|
from api.db.db_models import DB, CanvasTemplate, UserCanvas, API4Conversation
|
||||||
|
from api.db.services.api_service import API4ConversationService
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
|
from api.db.services.conversation_service import structure_answer
|
||||||
|
from api.utils import get_uuid
|
||||||
|
|
||||||
|
|
||||||
class CanvasTemplateService(CommonService):
|
class CanvasTemplateService(CommonService):
|
||||||
model = CanvasTemplate
|
model = CanvasTemplate
|
||||||
|
|
||||||
|
|
||||||
class UserCanvasService(CommonService):
|
class UserCanvasService(CommonService):
|
||||||
model = UserCanvas
|
model = UserCanvas
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_list(cls, tenant_id,
|
||||||
|
page_number, items_per_page, orderby, desc, id, title):
|
||||||
|
agents = cls.model.select()
|
||||||
|
if id:
|
||||||
|
agents = agents.where(cls.model.id == id)
|
||||||
|
if title:
|
||||||
|
agents = agents.where(cls.model.title == title)
|
||||||
|
agents = agents.where(cls.model.user_id == tenant_id)
|
||||||
|
if desc:
|
||||||
|
agents = agents.order_by(cls.model.getter_by(orderby).desc())
|
||||||
|
else:
|
||||||
|
agents = agents.order_by(cls.model.getter_by(orderby).asc())
|
||||||
|
|
||||||
|
agents = agents.paginate(page_number, items_per_page)
|
||||||
|
|
||||||
|
return list(agents.dicts())
|
||||||
|
|
||||||
|
|
||||||
|
def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||||
|
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||||
|
assert e, "Agent not found."
|
||||||
|
assert cvs.user_id == tenant_id, "You do not own the agent."
|
||||||
|
if not isinstance(cvs.dsl,str):
|
||||||
|
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||||
|
canvas = Canvas(cvs.dsl, tenant_id)
|
||||||
|
canvas.reset()
|
||||||
|
message_id = str(uuid4())
|
||||||
|
if not session_id:
|
||||||
|
query = canvas.get_preset_param()
|
||||||
|
if query:
|
||||||
|
for ele in query:
|
||||||
|
if not ele["optional"]:
|
||||||
|
if not kwargs.get(ele["key"]):
|
||||||
|
assert False, f"`{ele['key']}` is required"
|
||||||
|
ele["value"] = kwargs[ele["key"]]
|
||||||
|
if ele["optional"]:
|
||||||
|
if kwargs.get(ele["key"]):
|
||||||
|
ele["value"] = kwargs[ele['key']]
|
||||||
|
else:
|
||||||
|
if "value" in ele:
|
||||||
|
ele.pop("value")
|
||||||
|
cvs.dsl = json.loads(str(canvas))
|
||||||
|
temp_dsl = cvs.dsl
|
||||||
|
UserCanvasService.update_by_id(agent_id, cvs.to_dict())
|
||||||
|
else:
|
||||||
|
temp_dsl = json.loads(cvs.dsl)
|
||||||
|
session_id = get_uuid()
|
||||||
|
conv = {
|
||||||
|
"id": session_id,
|
||||||
|
"dialog_id": cvs.id,
|
||||||
|
"user_id": kwargs.get("user_id", ""),
|
||||||
|
"source": "agent",
|
||||||
|
"dsl": temp_dsl
|
||||||
|
}
|
||||||
|
API4ConversationService.save(**conv)
|
||||||
|
conv = API4Conversation(**conv)
|
||||||
|
else:
|
||||||
|
e, conv = API4ConversationService.get_by_id(session_id)
|
||||||
|
assert e, "Session not found!"
|
||||||
|
canvas = Canvas(json.dumps(conv.dsl), tenant_id)
|
||||||
|
canvas.messages.append({"role": "user", "content": question, "id": message_id})
|
||||||
|
canvas.add_user_input(question)
|
||||||
|
if not conv.message:
|
||||||
|
conv.message = []
|
||||||
|
conv.message.append({
|
||||||
|
"role": "user",
|
||||||
|
"content": question,
|
||||||
|
"id": message_id
|
||||||
|
})
|
||||||
|
if not conv.reference:
|
||||||
|
conv.reference = []
|
||||||
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
|
final_ans = {"reference": [], "content": ""}
|
||||||
|
if stream:
|
||||||
|
try:
|
||||||
|
for ans in canvas.run(stream=stream):
|
||||||
|
if ans.get("running_status"):
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||||
|
"data": {"answer": ans["content"],
|
||||||
|
"running_status": True}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
continue
|
||||||
|
for k in ans.keys():
|
||||||
|
final_ans[k] = ans[k]
|
||||||
|
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||||
|
ans = structure_answer(conv, ans, message_id, session_id)
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
|
canvas.history.append(("assistant", final_ans["content"]))
|
||||||
|
if final_ans.get("reference"):
|
||||||
|
canvas.reference.append(final_ans["reference"])
|
||||||
|
conv.dsl = json.loads(str(canvas))
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
except Exception as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
conv.dsl = json.loads(str(canvas))
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
|
else:
|
||||||
|
for answer in canvas.run(stream=False):
|
||||||
|
if answer.get("running_status"):
|
||||||
|
continue
|
||||||
|
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||||
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
|
if final_ans.get("reference"):
|
||||||
|
canvas.reference.append(final_ans["reference"])
|
||||||
|
conv.dsl = json.loads(str(canvas))
|
||||||
|
|
||||||
|
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||||
|
result = structure_answer(conv, result, message_id, session_id)
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
yield result
|
||||||
|
break
|
||||||
@ -115,7 +115,7 @@ class CommonService:
|
|||||||
try:
|
try:
|
||||||
obj = cls.model.query(id=pid)[0]
|
obj = cls.model.query(id=pid)[0]
|
||||||
return True, obj
|
return True, obj
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
229
api/db/services/conversation_service.py
Normal file
229
api/db/services/conversation_service.py
Normal file
@ -0,0 +1,229 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from uuid import uuid4
|
||||||
|
from api.db import StatusEnum
|
||||||
|
from api.db.db_models import Conversation, DB
|
||||||
|
from api.db.services.api_service import API4ConversationService
|
||||||
|
from api.db.services.common_service import CommonService
|
||||||
|
from api.db.services.dialog_service import DialogService, chat
|
||||||
|
from api.utils import get_uuid
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
class ConversationService(CommonService):
|
||||||
|
model = Conversation
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
|
||||||
|
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
||||||
|
if id:
|
||||||
|
sessions = sessions.where(cls.model.id == id)
|
||||||
|
if name:
|
||||||
|
sessions = sessions.where(cls.model.name == name)
|
||||||
|
if desc:
|
||||||
|
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||||
|
else:
|
||||||
|
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||||
|
|
||||||
|
sessions = sessions.paginate(page_number, items_per_page)
|
||||||
|
|
||||||
|
return list(sessions.dicts())
|
||||||
|
|
||||||
|
|
||||||
|
def structure_answer(conv, ans, message_id, session_id):
|
||||||
|
reference = ans["reference"]
|
||||||
|
if not isinstance(reference, dict):
|
||||||
|
reference = {}
|
||||||
|
ans["reference"] = {}
|
||||||
|
|
||||||
|
def get_value(d, k1, k2):
|
||||||
|
return d.get(k1, d.get(k2))
|
||||||
|
chunk_list = [{
|
||||||
|
"id": get_value(chunk, "chunk_id", "id"),
|
||||||
|
"content": get_value(chunk, "content", "content_with_weight"),
|
||||||
|
"document_id": get_value(chunk, "doc_id", "document_id"),
|
||||||
|
"document_name": get_value(chunk, "docnm_kwd", "document_name"),
|
||||||
|
"dataset_id": get_value(chunk, "kb_id", "dataset_id"),
|
||||||
|
"image_id": get_value(chunk, "image_id", "img_id"),
|
||||||
|
"positions": get_value(chunk, "positions", "position_int"),
|
||||||
|
} for chunk in reference.get("chunks", [])]
|
||||||
|
|
||||||
|
reference["chunks"] = chunk_list
|
||||||
|
ans["id"] = message_id
|
||||||
|
ans["session_id"] = session_id
|
||||||
|
|
||||||
|
if not conv:
|
||||||
|
return ans
|
||||||
|
|
||||||
|
if not conv.message:
|
||||||
|
conv.message = []
|
||||||
|
if not conv.message or conv.message[-1].get("role", "") != "assistant":
|
||||||
|
conv.message.append({"role": "assistant", "content": ans["answer"], "id": message_id})
|
||||||
|
else:
|
||||||
|
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||||
|
if conv.reference:
|
||||||
|
conv.reference[-1] = reference
|
||||||
|
return ans
|
||||||
|
|
||||||
|
|
||||||
|
def completion(tenant_id, chat_id, question, name="New session", session_id=None, stream=True, **kwargs):
|
||||||
|
assert name, "`name` can not be empty."
|
||||||
|
dia = DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||||
|
assert dia, "You do not own the chat."
|
||||||
|
|
||||||
|
if not session_id:
|
||||||
|
session_id = get_uuid()
|
||||||
|
conv = {
|
||||||
|
"id":session_id ,
|
||||||
|
"dialog_id": chat_id,
|
||||||
|
"name": name,
|
||||||
|
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}]
|
||||||
|
}
|
||||||
|
ConversationService.save(**conv)
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||||
|
"data": {
|
||||||
|
"answer": conv["message"][0]["content"],
|
||||||
|
"reference": {},
|
||||||
|
"audio_binary": None,
|
||||||
|
"id": None,
|
||||||
|
"session_id": session_id
|
||||||
|
}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
return
|
||||||
|
|
||||||
|
conv = ConversationService.query(id=session_id, dialog_id=chat_id)
|
||||||
|
if not conv:
|
||||||
|
raise LookupError("Session does not exist")
|
||||||
|
|
||||||
|
conv = conv[0]
|
||||||
|
msg = []
|
||||||
|
question = {
|
||||||
|
"content": question,
|
||||||
|
"role": "user",
|
||||||
|
"id": str(uuid4())
|
||||||
|
}
|
||||||
|
conv.message.append(question)
|
||||||
|
for m in conv.message:
|
||||||
|
if m["role"] == "system":
|
||||||
|
continue
|
||||||
|
if m["role"] == "assistant" and not msg:
|
||||||
|
continue
|
||||||
|
msg.append(m)
|
||||||
|
message_id = msg[-1].get("id")
|
||||||
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
|
|
||||||
|
if not conv.reference:
|
||||||
|
conv.reference = []
|
||||||
|
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||||
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
|
if stream:
|
||||||
|
try:
|
||||||
|
for ans in chat(dia, msg, True, **kwargs):
|
||||||
|
ans = structure_answer(conv, ans, message_id, session_id)
|
||||||
|
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
|
except Exception as e:
|
||||||
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
|
else:
|
||||||
|
answer = None
|
||||||
|
for ans in chat(dia, msg, False, **kwargs):
|
||||||
|
answer = structure_answer(conv, ans, message_id, session_id)
|
||||||
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
|
break
|
||||||
|
yield answer
|
||||||
|
|
||||||
|
|
||||||
|
def iframe_completion(dialog_id, question, session_id=None, stream=True, **kwargs):
|
||||||
|
e, dia = DialogService.get_by_id(dialog_id)
|
||||||
|
assert e, "Dialog not found"
|
||||||
|
if not session_id:
|
||||||
|
session_id = get_uuid()
|
||||||
|
conv = {
|
||||||
|
"id": session_id,
|
||||||
|
"dialog_id": dialog_id,
|
||||||
|
"user_id": kwargs.get("user_id", ""),
|
||||||
|
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||||
|
}
|
||||||
|
API4ConversationService.save(**conv)
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||||
|
"data": {
|
||||||
|
"answer": conv["message"][0]["content"],
|
||||||
|
"reference": {},
|
||||||
|
"audio_binary": None,
|
||||||
|
"id": None,
|
||||||
|
"session_id": session_id
|
||||||
|
}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
session_id = session_id
|
||||||
|
e, conv = API4ConversationService.get_by_id(session_id)
|
||||||
|
assert e, "Session not found!"
|
||||||
|
|
||||||
|
if not conv.message:
|
||||||
|
conv.message = []
|
||||||
|
messages = conv.message
|
||||||
|
question = {
|
||||||
|
"role": "user",
|
||||||
|
"content": question,
|
||||||
|
"id": str(uuid4())
|
||||||
|
}
|
||||||
|
messages.append(question)
|
||||||
|
|
||||||
|
msg = []
|
||||||
|
for m in messages:
|
||||||
|
if m["role"] == "system":
|
||||||
|
continue
|
||||||
|
if m["role"] == "assistant" and not msg:
|
||||||
|
continue
|
||||||
|
msg.append(m)
|
||||||
|
if not msg[-1].get("id"):
|
||||||
|
msg[-1]["id"] = get_uuid()
|
||||||
|
message_id = msg[-1]["id"]
|
||||||
|
|
||||||
|
if not conv.reference:
|
||||||
|
conv.reference = []
|
||||||
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
|
if stream:
|
||||||
|
try:
|
||||||
|
for ans in chat(dia, msg, True, **kwargs):
|
||||||
|
ans = structure_answer(conv, ans, message_id, session_id)
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
except Exception as e:
|
||||||
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
|
else:
|
||||||
|
answer = None
|
||||||
|
for ans in chat(dia, msg, False, **kwargs):
|
||||||
|
answer = structure_answer(conv, ans, message_id, session_id)
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
break
|
||||||
|
yield answer
|
||||||
|
|
||||||
@ -13,20 +13,23 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import binascii
|
import binascii
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
from collections import defaultdict
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from timeit import default_timer as timer
|
from timeit import default_timer as timer
|
||||||
from api.db import LLMType, ParserType
|
import datetime
|
||||||
from api.db.db_models import Dialog, Conversation
|
from datetime import timedelta
|
||||||
|
from api.db import LLMType, ParserType,StatusEnum
|
||||||
|
from api.db.db_models import Dialog, DB
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||||
from api.settings import chat_logger, retrievaler, kg_retrievaler
|
from api import settings
|
||||||
from rag.app.resume import forbidden_select_fields4resume
|
from rag.app.resume import forbidden_select_fields4resume
|
||||||
from rag.nlp import keyword_extraction
|
|
||||||
from rag.nlp.search import index_name
|
from rag.nlp.search import index_name
|
||||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||||
from api.utils.file_utils import get_project_base_directory
|
from api.utils.file_utils import get_project_base_directory
|
||||||
@ -35,9 +38,27 @@ from api.utils.file_utils import get_project_base_directory
|
|||||||
class DialogService(CommonService):
|
class DialogService(CommonService):
|
||||||
model = Dialog
|
model = Dialog
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_list(cls, tenant_id,
|
||||||
|
page_number, items_per_page, orderby, desc, id , name):
|
||||||
|
chats = cls.model.select()
|
||||||
|
if id:
|
||||||
|
chats = chats.where(cls.model.id == id)
|
||||||
|
if name:
|
||||||
|
chats = chats.where(cls.model.name == name)
|
||||||
|
chats = chats.where(
|
||||||
|
(cls.model.tenant_id == tenant_id)
|
||||||
|
& (cls.model.status == StatusEnum.VALID.value)
|
||||||
|
)
|
||||||
|
if desc:
|
||||||
|
chats = chats.order_by(cls.model.getter_by(orderby).desc())
|
||||||
|
else:
|
||||||
|
chats = chats.order_by(cls.model.getter_by(orderby).asc())
|
||||||
|
|
||||||
class ConversationService(CommonService):
|
chats = chats.paginate(page_number, items_per_page)
|
||||||
model = Conversation
|
|
||||||
|
return list(chats.dicts())
|
||||||
|
|
||||||
|
|
||||||
def message_fit_in(msg, max_length=4000):
|
def message_fit_in(msg, max_length=4000):
|
||||||
@ -57,44 +78,67 @@ def message_fit_in(msg, max_length=4000):
|
|||||||
return c, msg
|
return c, msg
|
||||||
|
|
||||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||||
msg_.append(msg[-1])
|
if len(msg) > 1:
|
||||||
|
msg_.append(msg[-1])
|
||||||
msg = msg_
|
msg = msg_
|
||||||
c = count()
|
c = count()
|
||||||
if c < max_length:
|
if c < max_length:
|
||||||
return c, msg
|
return c, msg
|
||||||
|
|
||||||
ll = num_tokens_from_string(msg_[0]["content"])
|
ll = num_tokens_from_string(msg_[0]["content"])
|
||||||
l = num_tokens_from_string(msg_[-1]["content"])
|
ll2 = num_tokens_from_string(msg_[-1]["content"])
|
||||||
if ll / (ll + l) > 0.8:
|
if ll / (ll + ll2) > 0.8:
|
||||||
m = msg_[0]["content"]
|
m = msg_[0]["content"]
|
||||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||||
msg[0]["content"] = m
|
msg[0]["content"] = m
|
||||||
return max_length, msg
|
return max_length, msg
|
||||||
|
|
||||||
m = msg_[1]["content"]
|
m = msg_[1]["content"]
|
||||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||||
msg[1]["content"] = m
|
msg[1]["content"] = m
|
||||||
return max_length, msg
|
return max_length, msg
|
||||||
|
|
||||||
|
|
||||||
def llm_id2llm_type(llm_id):
|
def llm_id2llm_type(llm_id):
|
||||||
llm_id = llm_id.split("@")[0]
|
llm_id, _ = TenantLLMService.split_model_name_and_factory(llm_id)
|
||||||
fnm = os.path.join(get_project_base_directory(), "conf")
|
fnm = os.path.join(get_project_base_directory(), "conf")
|
||||||
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
||||||
for llm_factory in llm_factories["factory_llm_infos"]:
|
for llm_factory in llm_factories["factory_llm_infos"]:
|
||||||
for llm in llm_factory["llm"]:
|
for llm in llm_factory["llm"]:
|
||||||
if llm_id == llm["llm_name"]:
|
if llm_id == llm["llm_name"]:
|
||||||
return llm["model_type"].strip(",")[-1]
|
return llm["model_type"].strip(",")[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def kb_prompt(kbinfos, max_tokens):
|
||||||
|
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||||
|
used_token_count = 0
|
||||||
|
chunks_num = 0
|
||||||
|
for i, c in enumerate(knowledges):
|
||||||
|
used_token_count += num_tokens_from_string(c)
|
||||||
|
chunks_num += 1
|
||||||
|
if max_tokens * 0.97 < used_token_count:
|
||||||
|
knowledges = knowledges[:i]
|
||||||
|
break
|
||||||
|
|
||||||
|
doc2chunks = defaultdict(list)
|
||||||
|
for i, ck in enumerate(kbinfos["chunks"]):
|
||||||
|
if i >= chunks_num:
|
||||||
|
break
|
||||||
|
doc2chunks["docnm_kwd"].append(ck["content_with_weight"])
|
||||||
|
|
||||||
|
knowledges = []
|
||||||
|
for nm, chunks in doc2chunks.items():
|
||||||
|
txt = f"Document: {nm} \nContains the following relevant fragments:\n"
|
||||||
|
for i, chunk in enumerate(chunks, 1):
|
||||||
|
txt += f"{i}. {chunk}\n"
|
||||||
|
knowledges.append(txt)
|
||||||
|
return knowledges
|
||||||
|
|
||||||
|
|
||||||
def chat(dialog, messages, stream=True, **kwargs):
|
def chat(dialog, messages, stream=True, **kwargs):
|
||||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||||
st = timer()
|
st = timer()
|
||||||
tmp = dialog.llm_id.split("@")
|
llm_id, fid = TenantLLMService.split_model_name_and_factory(dialog.llm_id)
|
||||||
fid = None
|
|
||||||
llm_id = tmp[0]
|
|
||||||
if len(tmp)>1: fid = tmp[1]
|
|
||||||
|
|
||||||
llm = LLMService.query(llm_name=llm_id) if not fid else LLMService.query(llm_name=llm_id, fid=fid)
|
llm = LLMService.query(llm_name=llm_id) if not fid else LLMService.query(llm_name=llm_id, fid=fid)
|
||||||
if not llm:
|
if not llm:
|
||||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not fid else \
|
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not fid else \
|
||||||
@ -111,7 +155,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||||
|
|
||||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||||
retr = retrievaler if not is_kg else kg_retrievaler
|
retr = settings.retrievaler if not is_kg else settings.kg_retrievaler
|
||||||
|
|
||||||
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
||||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||||
@ -122,6 +166,9 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
attachments.extend(m["doc_ids"])
|
attachments.extend(m["doc_ids"])
|
||||||
|
|
||||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||||
|
if not embd_mdl:
|
||||||
|
raise LookupError("Embedding model(%s) not found" % embd_nms[0])
|
||||||
|
|
||||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||||
else:
|
else:
|
||||||
@ -134,7 +181,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||||
# try to use sql if field mapping is good to go
|
# try to use sql if field mapping is good to go
|
||||||
if field_map:
|
if field_map:
|
||||||
chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
|
logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
|
||||||
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
|
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
|
||||||
if ans:
|
if ans:
|
||||||
yield ans
|
yield ans
|
||||||
@ -153,6 +200,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
||||||
else:
|
else:
|
||||||
questions = questions[-1:]
|
questions = questions[-1:]
|
||||||
|
refineQ_tm = timer()
|
||||||
|
keyword_tm = timer()
|
||||||
|
|
||||||
rerank_mdl = None
|
rerank_mdl = None
|
||||||
if dialog.rerank_id:
|
if dialog.rerank_id:
|
||||||
@ -165,13 +214,16 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
else:
|
else:
|
||||||
if prompt_config.get("keyword", False):
|
if prompt_config.get("keyword", False):
|
||||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||||
kbinfos = retr.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
keyword_tm = timer()
|
||||||
|
|
||||||
|
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||||
|
kbinfos = retr.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
||||||
dialog.similarity_threshold,
|
dialog.similarity_threshold,
|
||||||
dialog.vector_similarity_weight,
|
dialog.vector_similarity_weight,
|
||||||
doc_ids=attachments,
|
doc_ids=attachments,
|
||||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||||
chat_logger.info(
|
logging.debug(
|
||||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||||
retrieval_tm = timer()
|
retrieval_tm = timer()
|
||||||
|
|
||||||
@ -189,6 +241,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||||
prompt = msg[0]["content"]
|
prompt = msg[0]["content"]
|
||||||
|
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||||
|
|
||||||
if "max_tokens" in gen_conf:
|
if "max_tokens" in gen_conf:
|
||||||
gen_conf["max_tokens"] = min(
|
gen_conf["max_tokens"] = min(
|
||||||
@ -210,7 +263,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||||
recall_docs = [
|
recall_docs = [
|
||||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
if not recall_docs:
|
||||||
|
recall_docs = kbinfos["doc_aggs"]
|
||||||
kbinfos["doc_aggs"] = recall_docs
|
kbinfos["doc_aggs"] = recall_docs
|
||||||
|
|
||||||
refs = deepcopy(kbinfos)
|
refs = deepcopy(kbinfos)
|
||||||
@ -219,9 +273,11 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
del c["vector"]
|
del c["vector"]
|
||||||
|
|
||||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||||
done_tm = timer()
|
done_tm = timer()
|
||||||
prompt += "\n\n### Elapsed\n - Retrieval: %.1f ms\n - LLM: %.1f ms"%((retrieval_tm-st)*1000, (done_tm-st)*1000)
|
prompt += "\n\n### Elapsed\n - Refine Question: %.1f ms\n - Keywords: %.1f ms\n - Retrieval: %.1f ms\n - LLM: %.1f ms" % (
|
||||||
|
(refineQ_tm - st) * 1000, (keyword_tm - refineQ_tm) * 1000, (retrieval_tm - keyword_tm) * 1000,
|
||||||
|
(done_tm - retrieval_tm) * 1000)
|
||||||
return {"answer": answer, "reference": refs, "prompt": prompt}
|
return {"answer": answer, "reference": refs, "prompt": prompt}
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
@ -240,7 +296,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
yield decorate_answer(answer)
|
yield decorate_answer(answer)
|
||||||
else:
|
else:
|
||||||
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
||||||
chat_logger.info("User: {}|Assistant: {}".format(
|
logging.debug("User: {}|Assistant: {}".format(
|
||||||
msg[-1]["content"], answer))
|
msg[-1]["content"], answer))
|
||||||
res = decorate_answer(answer)
|
res = decorate_answer(answer)
|
||||||
res["audio_binary"] = tts(tts_mdl, answer)
|
res["audio_binary"] = tts(tts_mdl, answer)
|
||||||
@ -268,8 +324,7 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
|||||||
nonlocal sys_prompt, user_promt, question, tried_times
|
nonlocal sys_prompt, user_promt, question, tried_times
|
||||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
||||||
"temperature": 0.06})
|
"temperature": 0.06})
|
||||||
print(user_promt, sql)
|
logging.debug(f"{question} ==> {user_promt} get SQL: {sql}")
|
||||||
chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
|
|
||||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||||
sql = re.sub(r" +", " ", sql)
|
sql = re.sub(r" +", " ", sql)
|
||||||
@ -289,11 +344,9 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
|||||||
flds.append(k)
|
flds.append(k)
|
||||||
sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
|
sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
|
||||||
|
|
||||||
print(f"“{question}” get SQL(refined): {sql}")
|
logging.debug(f"{question} get SQL(refined): {sql}")
|
||||||
|
|
||||||
chat_logger.info(f"“{question}” get SQL(refined): {sql}")
|
|
||||||
tried_times += 1
|
tried_times += 1
|
||||||
return retrievaler.sql_retrieval(sql, format="json"), sql
|
return settings.retrievaler.sql_retrieval(sql, format="json"), sql
|
||||||
|
|
||||||
tbl, sql = get_table()
|
tbl, sql = get_table()
|
||||||
if tbl is None:
|
if tbl is None:
|
||||||
@ -320,10 +373,9 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
|||||||
question, sql, tbl["error"]
|
question, sql, tbl["error"]
|
||||||
)
|
)
|
||||||
tbl, sql = get_table()
|
tbl, sql = get_table()
|
||||||
chat_logger.info("TRY it again: {}".format(sql))
|
logging.debug("TRY it again: {}".format(sql))
|
||||||
|
|
||||||
chat_logger.info("GET table: {}".format(tbl))
|
logging.debug("GET table: {}".format(tbl))
|
||||||
print(tbl)
|
|
||||||
if tbl.get("error") or len(tbl["rows"]) == 0:
|
if tbl.get("error") or len(tbl["rows"]) == 0:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -345,6 +397,7 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
|||||||
rows = ["|" +
|
rows = ["|" +
|
||||||
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
||||||
"|" for r in tbl["rows"]]
|
"|" for r in tbl["rows"]]
|
||||||
|
rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
|
||||||
if quota:
|
if quota:
|
||||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||||
else:
|
else:
|
||||||
@ -352,7 +405,7 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
|||||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||||
|
|
||||||
if not docid_idx or not docnm_idx:
|
if not docid_idx or not docnm_idx:
|
||||||
chat_logger.warning("SQL missing field: " + sql)
|
logging.warning("SQL missing field: " + sql)
|
||||||
return {
|
return {
|
||||||
"answer": "\n".join([clmns, line, rows]),
|
"answer": "\n".join([clmns, line, rows]),
|
||||||
"reference": {"chunks": [], "doc_aggs": []},
|
"reference": {"chunks": [], "doc_aggs": []},
|
||||||
@ -387,13 +440,15 @@ def relevant(tenant_id, llm_id, question, contents: list):
|
|||||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||||
No other words needed except 'yes' or 'no'.
|
No other words needed except 'yes' or 'no'.
|
||||||
"""
|
"""
|
||||||
if not contents:return False
|
if not contents:
|
||||||
|
return False
|
||||||
contents = "Documents: \n" + " - ".join(contents)
|
contents = "Documents: \n" + " - ".join(contents)
|
||||||
contents = f"Question: {question}\n" + contents
|
contents = f"Question: {question}\n" + contents
|
||||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||||
if ans.lower().find("yes") >= 0: return True
|
if ans.lower().find("yes") >= 0:
|
||||||
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@ -415,6 +470,62 @@ def rewrite(tenant_id, llm_id, question):
|
|||||||
return ans
|
return ans
|
||||||
|
|
||||||
|
|
||||||
|
def keyword_extraction(chat_mdl, content, topn=3):
|
||||||
|
prompt = f"""
|
||||||
|
Role: You're a text analyzer.
|
||||||
|
Task: extract the most important keywords/phrases of a given piece of text content.
|
||||||
|
Requirements:
|
||||||
|
- Summarize the text content, and give top {topn} important keywords/phrases.
|
||||||
|
- The keywords MUST be in language of the given piece of text content.
|
||||||
|
- The keywords are delimited by ENGLISH COMMA.
|
||||||
|
- Keywords ONLY in output.
|
||||||
|
|
||||||
|
### Text Content
|
||||||
|
{content}
|
||||||
|
|
||||||
|
"""
|
||||||
|
msg = [
|
||||||
|
{"role": "system", "content": prompt},
|
||||||
|
{"role": "user", "content": "Output: "}
|
||||||
|
]
|
||||||
|
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||||
|
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||||
|
if isinstance(kwd, tuple):
|
||||||
|
kwd = kwd[0]
|
||||||
|
if kwd.find("**ERROR**") >=0:
|
||||||
|
return ""
|
||||||
|
return kwd
|
||||||
|
|
||||||
|
|
||||||
|
def question_proposal(chat_mdl, content, topn=3):
|
||||||
|
prompt = f"""
|
||||||
|
Role: You're a text analyzer.
|
||||||
|
Task: propose {topn} questions about a given piece of text content.
|
||||||
|
Requirements:
|
||||||
|
- Understand and summarize the text content, and propose top {topn} important questions.
|
||||||
|
- The questions SHOULD NOT have overlapping meanings.
|
||||||
|
- The questions SHOULD cover the main content of the text as much as possible.
|
||||||
|
- The questions MUST be in language of the given piece of text content.
|
||||||
|
- One question per line.
|
||||||
|
- Question ONLY in output.
|
||||||
|
|
||||||
|
### Text Content
|
||||||
|
{content}
|
||||||
|
|
||||||
|
"""
|
||||||
|
msg = [
|
||||||
|
{"role": "system", "content": prompt},
|
||||||
|
{"role": "user", "content": "Output: "}
|
||||||
|
]
|
||||||
|
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||||
|
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||||
|
if isinstance(kwd, tuple):
|
||||||
|
kwd = kwd[0]
|
||||||
|
if kwd.find("**ERROR**") >= 0:
|
||||||
|
return ""
|
||||||
|
return kwd
|
||||||
|
|
||||||
|
|
||||||
def full_question(tenant_id, llm_id, messages):
|
def full_question(tenant_id, llm_id, messages):
|
||||||
if llm_id2llm_type(llm_id) == "image2text":
|
if llm_id2llm_type(llm_id) == "image2text":
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||||
@ -422,12 +533,20 @@ def full_question(tenant_id, llm_id, messages):
|
|||||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||||
conv = []
|
conv = []
|
||||||
for m in messages:
|
for m in messages:
|
||||||
if m["role"] not in ["user", "assistant"]: continue
|
if m["role"] not in ["user", "assistant"]:
|
||||||
|
continue
|
||||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||||
conv = "\n".join(conv)
|
conv = "\n".join(conv)
|
||||||
|
today = datetime.date.today().isoformat()
|
||||||
|
yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
|
||||||
|
tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
|
||||||
prompt = f"""
|
prompt = f"""
|
||||||
Role: A helpful assistant
|
Role: A helpful assistant
|
||||||
Task: Generate a full user question that would follow the conversation.
|
|
||||||
|
Task and steps:
|
||||||
|
1. Generate a full user question that would follow the conversation.
|
||||||
|
2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
|
||||||
|
|
||||||
Requirements & Restrictions:
|
Requirements & Restrictions:
|
||||||
- Text generated MUST be in the same language of the original user's question.
|
- Text generated MUST be in the same language of the original user's question.
|
||||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||||
@ -456,6 +575,14 @@ User: What's her full name?
|
|||||||
###############
|
###############
|
||||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||||
|
|
||||||
|
------------
|
||||||
|
# Example 3
|
||||||
|
## Conversation
|
||||||
|
USER: What's the weather today in London?
|
||||||
|
ASSISTANT: Cloudy.
|
||||||
|
USER: What's about tomorrow in Rochester?
|
||||||
|
###############
|
||||||
|
Output: What's the weather in Rochester on {tomorrow}?
|
||||||
######################
|
######################
|
||||||
|
|
||||||
# Real Data
|
# Real Data
|
||||||
@ -468,7 +595,8 @@ Output: What's the full name of Donald Trump's mother Mary Trump?
|
|||||||
|
|
||||||
|
|
||||||
def tts(tts_mdl, text):
|
def tts(tts_mdl, text):
|
||||||
if not tts_mdl or not text: return
|
if not tts_mdl or not text:
|
||||||
|
return
|
||||||
bin = b""
|
bin = b""
|
||||||
for chunk in tts_mdl.tts(text):
|
for chunk in tts_mdl.tts(text):
|
||||||
bin += chunk
|
bin += chunk
|
||||||
@ -480,22 +608,14 @@ def ask(question, kb_ids, tenant_id):
|
|||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||||
|
|
||||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||||
retr = retrievaler if not is_kg else kg_retrievaler
|
retr = settings.retrievaler if not is_kg else settings.kg_retrievaler
|
||||||
|
|
||||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||||
max_tokens = chat_mdl.max_length
|
max_tokens = chat_mdl.max_length
|
||||||
|
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||||
kbinfos = retr.retrieval(question, embd_mdl, tenant_id, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
kbinfos = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
||||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||||
|
|
||||||
used_token_count = 0
|
|
||||||
for i, c in enumerate(knowledges):
|
|
||||||
used_token_count += num_tokens_from_string(c)
|
|
||||||
if max_tokens * 0.97 < used_token_count:
|
|
||||||
knowledges = knowledges[:i]
|
|
||||||
break
|
|
||||||
|
|
||||||
prompt = """
|
prompt = """
|
||||||
Role: You're a smart assistant. Your name is Miss R.
|
Role: You're a smart assistant. Your name is Miss R.
|
||||||
Task: Summarize the information from knowledge bases and answer user's question.
|
Task: Summarize the information from knowledge bases and answer user's question.
|
||||||
@ -505,29 +625,30 @@ def ask(question, kb_ids, tenant_id):
|
|||||||
- Answer with markdown format text.
|
- Answer with markdown format text.
|
||||||
- Answer in language of user's question.
|
- Answer in language of user's question.
|
||||||
- DO NOT make things up, especially for numbers.
|
- DO NOT make things up, especially for numbers.
|
||||||
|
|
||||||
### Information from knowledge bases
|
### Information from knowledge bases
|
||||||
%s
|
%s
|
||||||
|
|
||||||
The above is information from knowledge bases.
|
The above is information from knowledge bases.
|
||||||
|
|
||||||
"""%"\n".join(knowledges)
|
""" % "\n".join(knowledges)
|
||||||
msg = [{"role": "user", "content": question}]
|
msg = [{"role": "user", "content": question}]
|
||||||
|
|
||||||
def decorate_answer(answer):
|
def decorate_answer(answer):
|
||||||
nonlocal knowledges, kbinfos, prompt
|
nonlocal knowledges, kbinfos, prompt
|
||||||
answer, idx = retr.insert_citations(answer,
|
answer, idx = retr.insert_citations(answer,
|
||||||
[ck["content_ltks"]
|
[ck["content_ltks"]
|
||||||
for ck in kbinfos["chunks"]],
|
for ck in kbinfos["chunks"]],
|
||||||
[ck["vector"]
|
[ck["vector"]
|
||||||
for ck in kbinfos["chunks"]],
|
for ck in kbinfos["chunks"]],
|
||||||
embd_mdl,
|
embd_mdl,
|
||||||
tkweight=0.7,
|
tkweight=0.7,
|
||||||
vtweight=0.3)
|
vtweight=0.3)
|
||||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||||
recall_docs = [
|
recall_docs = [
|
||||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
if not recall_docs:
|
||||||
|
recall_docs = kbinfos["doc_aggs"]
|
||||||
kbinfos["doc_aggs"] = recall_docs
|
kbinfos["doc_aggs"] = recall_docs
|
||||||
refs = deepcopy(kbinfos)
|
refs = deepcopy(kbinfos)
|
||||||
for c in refs["chunks"]:
|
for c in refs["chunks"]:
|
||||||
|
|||||||
@ -13,32 +13,28 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import hashlib
|
import logging
|
||||||
|
import xxhash
|
||||||
import json
|
import json
|
||||||
import os
|
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
import traceback
|
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
from peewee import fn
|
from peewee import fn
|
||||||
|
|
||||||
from api.db.db_utils import bulk_insert_into_db
|
from api.db.db_utils import bulk_insert_into_db
|
||||||
from api.settings import stat_logger
|
from api import settings
|
||||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||||
from api.utils.file_utils import get_project_base_directory
|
|
||||||
from graphrag.mind_map_extractor import MindMapExtractor
|
from graphrag.mind_map_extractor import MindMapExtractor
|
||||||
from rag.settings import SVR_QUEUE_NAME
|
from rag.settings import SVR_QUEUE_NAME
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
from rag.nlp import search, rag_tokenizer
|
from rag.nlp import search, rag_tokenizer
|
||||||
|
|
||||||
from api.db import FileType, TaskStatus, ParserType, LLMType
|
from api.db import FileType, TaskStatus, ParserType, LLMType
|
||||||
from api.db.db_models import DB, Knowledgebase, Tenant, Task
|
from api.db.db_models import DB, Knowledgebase, Tenant, Task, UserTenant
|
||||||
from api.db.db_models import Document
|
from api.db.db_models import Document
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
@ -49,6 +45,31 @@ from rag.utils.redis_conn import REDIS_CONN
|
|||||||
class DocumentService(CommonService):
|
class DocumentService(CommonService):
|
||||||
model = Document
|
model = Document
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_list(cls, kb_id, page_number, items_per_page,
|
||||||
|
orderby, desc, keywords, id, name):
|
||||||
|
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||||
|
if id:
|
||||||
|
docs = docs.where(
|
||||||
|
cls.model.id == id)
|
||||||
|
if name:
|
||||||
|
docs = docs.where(
|
||||||
|
cls.model.name == name
|
||||||
|
)
|
||||||
|
if keywords:
|
||||||
|
docs = docs.where(
|
||||||
|
fn.LOWER(cls.model.name).contains(keywords.lower())
|
||||||
|
)
|
||||||
|
if desc:
|
||||||
|
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
||||||
|
else:
|
||||||
|
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
||||||
|
|
||||||
|
docs = docs.paginate(page_number, items_per_page)
|
||||||
|
count = docs.count()
|
||||||
|
return list(docs.dicts()), count
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_kb_id(cls, kb_id, page_number, items_per_page,
|
def get_by_kb_id(cls, kb_id, page_number, items_per_page,
|
||||||
@ -70,35 +91,6 @@ class DocumentService(CommonService):
|
|||||||
|
|
||||||
return list(docs.dicts()), count
|
return list(docs.dicts()), count
|
||||||
|
|
||||||
@classmethod
|
|
||||||
@DB.connection_context()
|
|
||||||
def list_documents_in_dataset(cls, dataset_id, offset, count, order_by, descend, keywords):
|
|
||||||
if keywords:
|
|
||||||
docs = cls.model.select().where(
|
|
||||||
(cls.model.kb_id == dataset_id),
|
|
||||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
docs = cls.model.select().where(cls.model.kb_id == dataset_id)
|
|
||||||
|
|
||||||
total = docs.count()
|
|
||||||
|
|
||||||
if descend == 'True':
|
|
||||||
docs = docs.order_by(cls.model.getter_by(order_by).desc())
|
|
||||||
if descend == 'False':
|
|
||||||
docs = docs.order_by(cls.model.getter_by(order_by).asc())
|
|
||||||
|
|
||||||
docs = list(docs.dicts())
|
|
||||||
docs_length = len(docs)
|
|
||||||
|
|
||||||
if offset < 0 or offset > docs_length:
|
|
||||||
raise IndexError("Offset is out of the valid range.")
|
|
||||||
|
|
||||||
if count == -1:
|
|
||||||
return docs[offset:], total
|
|
||||||
|
|
||||||
return docs[offset:offset + count], total
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def insert(cls, doc):
|
def insert(cls, doc):
|
||||||
@ -116,8 +108,7 @@ class DocumentService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def remove_document(cls, doc, tenant_id):
|
def remove_document(cls, doc, tenant_id):
|
||||||
ELASTICSEARCH.deleteByQuery(
|
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
|
||||||
cls.clear_chunk_num(doc.id)
|
cls.clear_chunk_num(doc.id)
|
||||||
return cls.delete_by_id(doc.id)
|
return cls.delete_by_id(doc.id)
|
||||||
|
|
||||||
@ -140,26 +131,27 @@ class DocumentService(CommonService):
|
|||||||
cls.model.update_time]
|
cls.model.update_time]
|
||||||
docs = cls.model.select(*fields) \
|
docs = cls.model.select(*fields) \
|
||||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
|
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
|
||||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))\
|
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
|
||||||
.where(
|
.where(
|
||||||
cls.model.status == StatusEnum.VALID.value,
|
cls.model.status == StatusEnum.VALID.value,
|
||||||
~(cls.model.type == FileType.VIRTUAL.value),
|
~(cls.model.type == FileType.VIRTUAL.value),
|
||||||
cls.model.progress == 0,
|
cls.model.progress == 0,
|
||||||
cls.model.update_time >= current_timestamp() - 1000 * 600,
|
cls.model.update_time >= current_timestamp() - 1000 * 600,
|
||||||
cls.model.run == TaskStatus.RUNNING.value)\
|
cls.model.run == TaskStatus.RUNNING.value) \
|
||||||
.order_by(cls.model.update_time.asc())
|
.order_by(cls.model.update_time.asc())
|
||||||
return list(docs.dicts())
|
return list(docs.dicts())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_unfinished_docs(cls):
|
def get_unfinished_docs(cls):
|
||||||
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg, cls.model.run]
|
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg,
|
||||||
|
cls.model.run]
|
||||||
docs = cls.model.select(*fields) \
|
docs = cls.model.select(*fields) \
|
||||||
.where(
|
.where(
|
||||||
cls.model.status == StatusEnum.VALID.value,
|
cls.model.status == StatusEnum.VALID.value,
|
||||||
~(cls.model.type == FileType.VIRTUAL.value),
|
~(cls.model.type == FileType.VIRTUAL.value),
|
||||||
cls.model.progress < 1,
|
cls.model.progress < 1,
|
||||||
cls.model.progress > 0)
|
cls.model.progress > 0)
|
||||||
return list(docs.dicts())
|
return list(docs.dicts())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -174,12 +166,12 @@ class DocumentService(CommonService):
|
|||||||
"Document not found which is supposed to be there")
|
"Document not found which is supposed to be there")
|
||||||
num = Knowledgebase.update(
|
num = Knowledgebase.update(
|
||||||
token_num=Knowledgebase.token_num +
|
token_num=Knowledgebase.token_num +
|
||||||
token_num,
|
token_num,
|
||||||
chunk_num=Knowledgebase.chunk_num +
|
chunk_num=Knowledgebase.chunk_num +
|
||||||
chunk_num).where(
|
chunk_num).where(
|
||||||
Knowledgebase.id == kb_id).execute()
|
Knowledgebase.id == kb_id).execute()
|
||||||
return num
|
return num
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def decrement_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
|
def decrement_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duation):
|
||||||
@ -192,13 +184,13 @@ class DocumentService(CommonService):
|
|||||||
"Document not found which is supposed to be there")
|
"Document not found which is supposed to be there")
|
||||||
num = Knowledgebase.update(
|
num = Knowledgebase.update(
|
||||||
token_num=Knowledgebase.token_num -
|
token_num=Knowledgebase.token_num -
|
||||||
token_num,
|
token_num,
|
||||||
chunk_num=Knowledgebase.chunk_num -
|
chunk_num=Knowledgebase.chunk_num -
|
||||||
chunk_num
|
chunk_num
|
||||||
).where(
|
).where(
|
||||||
Knowledgebase.id == kb_id).execute()
|
Knowledgebase.id == kb_id).execute()
|
||||||
return num
|
return num
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def clear_chunk_num(cls, doc_id):
|
def clear_chunk_num(cls, doc_id):
|
||||||
@ -207,10 +199,10 @@ class DocumentService(CommonService):
|
|||||||
|
|
||||||
num = Knowledgebase.update(
|
num = Knowledgebase.update(
|
||||||
token_num=Knowledgebase.token_num -
|
token_num=Knowledgebase.token_num -
|
||||||
doc.token_num,
|
doc.token_num,
|
||||||
chunk_num=Knowledgebase.chunk_num -
|
chunk_num=Knowledgebase.chunk_num -
|
||||||
doc.chunk_num,
|
doc.chunk_num,
|
||||||
doc_num=Knowledgebase.doc_num-1
|
doc_num=Knowledgebase.doc_num - 1
|
||||||
).where(
|
).where(
|
||||||
Knowledgebase.id == doc.kb_id).execute()
|
Knowledgebase.id == doc.kb_id).execute()
|
||||||
return num
|
return num
|
||||||
@ -221,13 +213,22 @@ class DocumentService(CommonService):
|
|||||||
docs = cls.model.select(
|
docs = cls.model.select(
|
||||||
Knowledgebase.tenant_id).join(
|
Knowledgebase.tenant_id).join(
|
||||||
Knowledgebase, on=(
|
Knowledgebase, on=(
|
||||||
Knowledgebase.id == cls.model.kb_id)).where(
|
Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||||
docs = docs.dicts()
|
docs = docs.dicts()
|
||||||
if not docs:
|
if not docs:
|
||||||
return
|
return
|
||||||
return docs[0]["tenant_id"]
|
return docs[0]["tenant_id"]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_knowledgebase_id(cls, doc_id):
|
||||||
|
docs = cls.model.select(cls.model.kb_id).where(cls.model.id == doc_id)
|
||||||
|
docs = docs.dicts()
|
||||||
|
if not docs:
|
||||||
|
return
|
||||||
|
return docs[0]["kb_id"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_tenant_id_by_name(cls, name):
|
def get_tenant_id_by_name(cls, name):
|
||||||
@ -241,19 +242,71 @@ class DocumentService(CommonService):
|
|||||||
return
|
return
|
||||||
return docs[0]["tenant_id"]
|
return docs[0]["tenant_id"]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def accessible(cls, doc_id, user_id):
|
||||||
|
docs = cls.model.select(
|
||||||
|
cls.model.id).join(
|
||||||
|
Knowledgebase, on=(
|
||||||
|
Knowledgebase.id == cls.model.kb_id)
|
||||||
|
).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||||
|
).where(cls.model.id == doc_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||||
|
docs = docs.dicts()
|
||||||
|
if not docs:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def accessible4deletion(cls, doc_id, user_id):
|
||||||
|
docs = cls.model.select(
|
||||||
|
cls.model.id).join(
|
||||||
|
Knowledgebase, on=(
|
||||||
|
Knowledgebase.id == cls.model.kb_id)
|
||||||
|
).where(cls.model.id == doc_id, Knowledgebase.created_by == user_id).paginate(0, 1)
|
||||||
|
docs = docs.dicts()
|
||||||
|
if not docs:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_embd_id(cls, doc_id):
|
def get_embd_id(cls, doc_id):
|
||||||
docs = cls.model.select(
|
docs = cls.model.select(
|
||||||
Knowledgebase.embd_id).join(
|
Knowledgebase.embd_id).join(
|
||||||
Knowledgebase, on=(
|
Knowledgebase, on=(
|
||||||
Knowledgebase.id == cls.model.kb_id)).where(
|
Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||||
docs = docs.dicts()
|
docs = docs.dicts()
|
||||||
if not docs:
|
if not docs:
|
||||||
return
|
return
|
||||||
return docs[0]["embd_id"]
|
return docs[0]["embd_id"]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_chunking_config(cls, doc_id):
|
||||||
|
configs = (
|
||||||
|
cls.model.select(
|
||||||
|
cls.model.id,
|
||||||
|
cls.model.kb_id,
|
||||||
|
cls.model.parser_id,
|
||||||
|
cls.model.parser_config,
|
||||||
|
Knowledgebase.language,
|
||||||
|
Knowledgebase.embd_id,
|
||||||
|
Tenant.id.alias("tenant_id"),
|
||||||
|
Tenant.img2txt_id,
|
||||||
|
Tenant.asr_id,
|
||||||
|
Tenant.llm_id,
|
||||||
|
)
|
||||||
|
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id))
|
||||||
|
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||||
|
.where(cls.model.id == doc_id)
|
||||||
|
)
|
||||||
|
configs = configs.dicts()
|
||||||
|
if not configs:
|
||||||
|
return None
|
||||||
|
return configs[0]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_doc_id_by_doc_name(cls, doc_name):
|
def get_doc_id_by_doc_name(cls, doc_name):
|
||||||
@ -268,7 +321,7 @@ class DocumentService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_thumbnails(cls, docids):
|
def get_thumbnails(cls, docids):
|
||||||
fields = [cls.model.id, cls.model.thumbnail]
|
fields = [cls.model.id, cls.model.kb_id, cls.model.thumbnail]
|
||||||
return list(cls.model.select(
|
return list(cls.model.select(
|
||||||
*fields).where(cls.model.id.in_(docids)).dicts())
|
*fields).where(cls.model.id.in_(docids)).dicts())
|
||||||
|
|
||||||
@ -289,7 +342,10 @@ class DocumentService(CommonService):
|
|||||||
dfs_update(old[k], v)
|
dfs_update(old[k], v)
|
||||||
else:
|
else:
|
||||||
old[k] = v
|
old[k] = v
|
||||||
|
|
||||||
dfs_update(d.parser_config, config)
|
dfs_update(d.parser_config, config)
|
||||||
|
if not config.get("raptor") and d.parser_config.get("raptor"):
|
||||||
|
del d.parser_config["raptor"]
|
||||||
cls.update_by_id(id, {"parser_config": d.parser_config})
|
cls.update_by_id(id, {"parser_config": d.parser_config})
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -305,7 +361,7 @@ class DocumentService(CommonService):
|
|||||||
def begin2parse(cls, docid):
|
def begin2parse(cls, docid):
|
||||||
cls.update_by_id(
|
cls.update_by_id(
|
||||||
docid, {"progress": random.random() * 1 / 100.,
|
docid, {"progress": random.random() * 1 / 100.,
|
||||||
"progress_msg": "Task dispatched...",
|
"progress_msg": "Task is queued...",
|
||||||
"process_begin_at": get_format_time()
|
"process_begin_at": get_format_time()
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -323,7 +379,7 @@ class DocumentService(CommonService):
|
|||||||
finished = True
|
finished = True
|
||||||
bad = 0
|
bad = 0
|
||||||
e, doc = DocumentService.get_by_id(d["id"])
|
e, doc = DocumentService.get_by_id(d["id"])
|
||||||
status = doc.run#TaskStatus.RUNNING.value
|
status = doc.run # TaskStatus.RUNNING.value
|
||||||
for t in tsks:
|
for t in tsks:
|
||||||
if 0 <= t.progress < 1:
|
if 0 <= t.progress < 1:
|
||||||
finished = False
|
finished = False
|
||||||
@ -337,9 +393,10 @@ class DocumentService(CommonService):
|
|||||||
prg = -1
|
prg = -1
|
||||||
status = TaskStatus.FAIL.value
|
status = TaskStatus.FAIL.value
|
||||||
elif finished:
|
elif finished:
|
||||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(" raptor")<0:
|
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(
|
||||||
|
" raptor") < 0:
|
||||||
queue_raptor_tasks(d)
|
queue_raptor_tasks(d)
|
||||||
prg *= 0.98
|
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||||
msg.append("------ RAPTOR -------")
|
msg.append("------ RAPTOR -------")
|
||||||
else:
|
else:
|
||||||
status = TaskStatus.DONE.value
|
status = TaskStatus.DONE.value
|
||||||
@ -356,7 +413,8 @@ class DocumentService(CommonService):
|
|||||||
info["progress_msg"] = msg
|
info["progress_msg"] = msg
|
||||||
cls.update_by_id(d["id"], info)
|
cls.update_by_id(d["id"], info)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
stat_logger.error("fetch task exception:" + str(e))
|
if str(e).find("'0'") < 0:
|
||||||
|
logging.exception("fetch task exception")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
@ -364,30 +422,37 @@ class DocumentService(CommonService):
|
|||||||
return len(cls.model.select(cls.model.id).where(
|
return len(cls.model.select(cls.model.id).where(
|
||||||
cls.model.kb_id == kb_id).dicts())
|
cls.model.kb_id == kb_id).dicts())
|
||||||
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def do_cancel(cls, doc_id):
|
def do_cancel(cls, doc_id):
|
||||||
try:
|
try:
|
||||||
_, doc = DocumentService.get_by_id(doc_id)
|
_, doc = DocumentService.get_by_id(doc_id)
|
||||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def queue_raptor_tasks(doc):
|
def queue_raptor_tasks(doc):
|
||||||
|
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||||
|
hasher = xxhash.xxh64()
|
||||||
|
for field in sorted(chunking_config.keys()):
|
||||||
|
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||||
|
|
||||||
def new_task():
|
def new_task():
|
||||||
nonlocal doc
|
nonlocal doc
|
||||||
return {
|
return {
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
"doc_id": doc["id"],
|
"doc_id": doc["id"],
|
||||||
"from_page": 0,
|
"from_page": 100000000,
|
||||||
"to_page": -1,
|
"to_page": 100000000,
|
||||||
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing For Tree-Organized Retrieval)."
|
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing for Tree-Organized Retrieval)."
|
||||||
}
|
}
|
||||||
|
|
||||||
task = new_task()
|
task = new_task()
|
||||||
|
for field in ["doc_id", "from_page", "to_page"]:
|
||||||
|
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||||
|
task["digest"] = hasher.hexdigest()
|
||||||
bulk_insert_into_db(Task, [task], True)
|
bulk_insert_into_db(Task, [task], True)
|
||||||
task["type"] = "raptor"
|
task["type"] = "raptor"
|
||||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
||||||
@ -395,11 +460,12 @@ def queue_raptor_tasks(doc):
|
|||||||
|
|
||||||
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||||
from rag.app import presentation, picture, naive, audio, email
|
from rag.app import presentation, picture, naive, audio, email
|
||||||
from api.db.services.dialog_service import ConversationService, DialogService
|
from api.db.services.dialog_service import DialogService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.db.services.user_service import TenantService
|
from api.db.services.user_service import TenantService
|
||||||
from api.db.services.api_service import API4ConversationService
|
from api.db.services.api_service import API4ConversationService
|
||||||
|
from api.db.services.conversation_service import ConversationService
|
||||||
|
|
||||||
e, conv = ConversationService.get_by_id(conversation_id)
|
e, conv = ConversationService.get_by_id(conversation_id)
|
||||||
if not e:
|
if not e:
|
||||||
@ -412,11 +478,6 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
if not e:
|
if not e:
|
||||||
raise LookupError("Can't find this knowledgebase!")
|
raise LookupError("Can't find this knowledgebase!")
|
||||||
|
|
||||||
idxnm = search.index_name(kb.tenant_id)
|
|
||||||
if not ELASTICSEARCH.indexExist(idxnm):
|
|
||||||
ELASTICSEARCH.createIdx(idxnm, json.load(
|
|
||||||
open(os.path.join(get_project_base_directory(), "conf", "mapping.json"), "r")))
|
|
||||||
|
|
||||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id, lang=kb.language)
|
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id, lang=kb.language)
|
||||||
|
|
||||||
err, files = FileService.upload_document(kb, file_objs, user_id)
|
err, files = FileService.upload_document(kb, file_objs, user_id)
|
||||||
@ -457,10 +518,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
for ck in th.result():
|
for ck in th.result():
|
||||||
d = deepcopy(doc)
|
d = deepcopy(doc)
|
||||||
d.update(ck)
|
d.update(ck)
|
||||||
md5 = hashlib.md5()
|
d["id"] = xxhash.xxh64((ck["content_with_weight"] + str(d["doc_id"])).encode("utf-8")).hexdigest()
|
||||||
md5.update((ck["content_with_weight"] +
|
|
||||||
str(d["doc_id"])).encode("utf-8"))
|
|
||||||
d["_id"] = md5.hexdigest()
|
|
||||||
d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
|
d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
|
||||||
d["create_timestamp_flt"] = datetime.now().timestamp()
|
d["create_timestamp_flt"] = datetime.now().timestamp()
|
||||||
if not d.get("image"):
|
if not d.get("image"):
|
||||||
@ -473,9 +531,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
else:
|
else:
|
||||||
d["image"].save(output_buffer, format='JPEG')
|
d["image"].save(output_buffer, format='JPEG')
|
||||||
|
|
||||||
STORAGE_IMPL.put(kb.id, d["_id"], output_buffer.getvalue())
|
STORAGE_IMPL.put(kb.id, d["id"], output_buffer.getvalue())
|
||||||
d["img_id"] = "{}-{}".format(kb.id, d["_id"])
|
d["img_id"] = "{}-{}".format(kb.id, d["id"])
|
||||||
del d["image"]
|
d.pop("image", None)
|
||||||
docs.append(d)
|
docs.append(d)
|
||||||
|
|
||||||
parser_ids = {d["id"]: d["parser_id"] for d, _ in files}
|
parser_ids = {d["id"]: d["parser_id"] for d, _ in files}
|
||||||
@ -494,6 +552,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
token_counts[doc_id] += c
|
token_counts[doc_id] += c
|
||||||
return vects
|
return vects
|
||||||
|
|
||||||
|
idxnm = search.index_name(kb.tenant_id)
|
||||||
|
try_create_idx = True
|
||||||
|
|
||||||
_, tenant = TenantService.get_by_id(kb.tenant_id)
|
_, tenant = TenantService.get_by_id(kb.tenant_id)
|
||||||
llm_bdl = LLMBundle(kb.tenant_id, LLMType.CHAT, tenant.llm_id)
|
llm_bdl = LLMBundle(kb.tenant_id, LLMType.CHAT, tenant.llm_id)
|
||||||
for doc_id in docids:
|
for doc_id in docids:
|
||||||
@ -504,7 +565,8 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
try:
|
try:
|
||||||
mind_map = json.dumps(mindmap([c["content_with_weight"] for c in docs if c["doc_id"] == doc_id]).output,
|
mind_map = json.dumps(mindmap([c["content_with_weight"] for c in docs if c["doc_id"] == doc_id]).output,
|
||||||
ensure_ascii=False, indent=2)
|
ensure_ascii=False, indent=2)
|
||||||
if len(mind_map) < 32: raise Exception("Few content: " + mind_map)
|
if len(mind_map) < 32:
|
||||||
|
raise Exception("Few content: " + mind_map)
|
||||||
cks.append({
|
cks.append({
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
"doc_id": doc_id,
|
"doc_id": doc_id,
|
||||||
@ -516,7 +578,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
"knowledge_graph_kwd": "mind_map"
|
"knowledge_graph_kwd": "mind_map"
|
||||||
})
|
})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
stat_logger.error("Mind map generation error:", traceback.format_exc())
|
logging.exception("Mind map generation error")
|
||||||
|
|
||||||
vects = embedding(doc_id, [c["content_with_weight"] for c in cks])
|
vects = embedding(doc_id, [c["content_with_weight"] for c in cks])
|
||||||
assert len(cks) == len(vects)
|
assert len(cks) == len(vects)
|
||||||
@ -524,9 +586,13 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
v = vects[i]
|
v = vects[i]
|
||||||
d["q_%d_vec" % len(v)] = v
|
d["q_%d_vec" % len(v)] = v
|
||||||
for b in range(0, len(cks), es_bulk_size):
|
for b in range(0, len(cks), es_bulk_size):
|
||||||
ELASTICSEARCH.bulk(cks[b:b + es_bulk_size], idxnm)
|
if try_create_idx:
|
||||||
|
if not settings.docStoreConn.indexExist(idxnm, kb_id):
|
||||||
|
settings.docStoreConn.createIdx(idxnm, kb_id, len(vects[0]))
|
||||||
|
try_create_idx = False
|
||||||
|
settings.docStoreConn.insert(cks[b:b + es_bulk_size], idxnm, kb_id)
|
||||||
|
|
||||||
DocumentService.increment_chunk_num(
|
DocumentService.increment_chunk_num(
|
||||||
doc_id, kb.id, token_counts[doc_id], chunk_counts[doc_id], 0)
|
doc_id, kb.id, token_counts[doc_id], chunk_counts[doc_id], 0)
|
||||||
|
|
||||||
return [d["id"] for d,_ in files]
|
return [d["id"] for d, _ in files]
|
||||||
@ -20,7 +20,7 @@ from api.db.db_models import DB
|
|||||||
from api.db.db_models import File, File2Document
|
from api.db.db_models import File, File2Document
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.utils import current_timestamp, datetime_format, get_uuid
|
from api.utils import current_timestamp, datetime_format
|
||||||
|
|
||||||
|
|
||||||
class File2DocumentService(CommonService):
|
class File2DocumentService(CommonService):
|
||||||
@ -63,7 +63,7 @@ class File2DocumentService(CommonService):
|
|||||||
def update_by_file_id(cls, file_id, obj):
|
def update_by_file_id(cls, file_id, obj):
|
||||||
obj["update_time"] = current_timestamp()
|
obj["update_time"] = current_timestamp()
|
||||||
obj["update_date"] = datetime_format(datetime.now())
|
obj["update_date"] = datetime_format(datetime.now())
|
||||||
num = cls.model.update(obj).where(cls.model.id == file_id).execute()
|
# num = cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||||
e, obj = cls.get_by_id(cls.model.id)
|
e, obj = cls.get_by_id(cls.model.id)
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
|
|||||||
@ -13,8 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
from flask_login import current_user
|
from flask_login import current_user
|
||||||
from peewee import fn
|
from peewee import fn
|
||||||
|
|
||||||
@ -26,7 +29,7 @@ from api.db.services.common_service import CommonService
|
|||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.file_utils import filename_type, thumbnail
|
from api.utils.file_utils import filename_type, thumbnail_img
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
|
|
||||||
@ -82,7 +85,8 @@ class FileService(CommonService):
|
|||||||
.join(Document, on=(File2Document.document_id == Document.id))
|
.join(Document, on=(File2Document.document_id == Document.id))
|
||||||
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
||||||
.where(cls.model.id == file_id))
|
.where(cls.model.id == file_id))
|
||||||
if not kbs: return []
|
if not kbs:
|
||||||
|
return []
|
||||||
kbs_info_list = []
|
kbs_info_list = []
|
||||||
for kb in list(kbs.dicts()):
|
for kb in list(kbs.dicts()):
|
||||||
kbs_info_list.append({"kb_id": kb['id'], "kb_name": kb['name']})
|
kbs_info_list.append({"kb_id": kb['id'], "kb_name": kb['name']})
|
||||||
@ -272,8 +276,8 @@ class FileService(CommonService):
|
|||||||
cls.delete_folder_by_pf_id(user_id, file.id)
|
cls.delete_folder_by_pf_id(user_id, file.id)
|
||||||
return cls.model.delete().where((cls.model.tenant_id == user_id)
|
return cls.model.delete().where((cls.model.tenant_id == user_id)
|
||||||
& (cls.model.id == folder_id)).execute(),
|
& (cls.model.id == folder_id)).execute(),
|
||||||
except Exception as e:
|
except Exception:
|
||||||
print(e)
|
logging.exception("delete_folder_by_pf_id")
|
||||||
raise RuntimeError("Database error (File retrieval)!")
|
raise RuntimeError("Database error (File retrieval)!")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -301,7 +305,8 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def add_file_from_kb(cls, doc, kb_folder_id, tenant_id):
|
def add_file_from_kb(cls, doc, kb_folder_id, tenant_id):
|
||||||
for _ in File2DocumentService.get_by_document_id(doc["id"]): return
|
for _ in File2DocumentService.get_by_document_id(doc["id"]):
|
||||||
|
return
|
||||||
file = {
|
file = {
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
"parent_id": kb_folder_id,
|
"parent_id": kb_folder_id,
|
||||||
@ -321,8 +326,8 @@ class FileService(CommonService):
|
|||||||
def move_file(cls, file_ids, folder_id):
|
def move_file(cls, file_ids, folder_id):
|
||||||
try:
|
try:
|
||||||
cls.filter_update((cls.model.id << file_ids, ), { 'parent_id': folder_id })
|
cls.filter_update((cls.model.id << file_ids, ), { 'parent_id': folder_id })
|
||||||
except Exception as e:
|
except Exception:
|
||||||
print(e)
|
logging.exception("move_file")
|
||||||
raise RuntimeError("Database error (File move)!")
|
raise RuntimeError("Database error (File move)!")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -354,8 +359,17 @@ class FileService(CommonService):
|
|||||||
location += "_"
|
location += "_"
|
||||||
blob = file.read()
|
blob = file.read()
|
||||||
STORAGE_IMPL.put(kb.id, location, blob)
|
STORAGE_IMPL.put(kb.id, location, blob)
|
||||||
|
|
||||||
|
doc_id = get_uuid()
|
||||||
|
|
||||||
|
img = thumbnail_img(filename, blob)
|
||||||
|
thumbnail_location = ''
|
||||||
|
if img is not None:
|
||||||
|
thumbnail_location = f'thumbnail_{doc_id}.png'
|
||||||
|
STORAGE_IMPL.put(kb.id, thumbnail_location, img)
|
||||||
|
|
||||||
doc = {
|
doc = {
|
||||||
"id": get_uuid(),
|
"id": doc_id,
|
||||||
"kb_id": kb.id,
|
"kb_id": kb.id,
|
||||||
"parser_id": self.get_parser(filetype, filename, kb.parser_id),
|
"parser_id": self.get_parser(filetype, filename, kb.parser_id),
|
||||||
"parser_config": kb.parser_config,
|
"parser_config": kb.parser_config,
|
||||||
@ -364,7 +378,7 @@ class FileService(CommonService):
|
|||||||
"name": filename,
|
"name": filename,
|
||||||
"location": location,
|
"location": location,
|
||||||
"size": len(blob),
|
"size": len(blob),
|
||||||
"thumbnail": thumbnail(filename, blob)
|
"thumbnail": thumbnail_location
|
||||||
}
|
}
|
||||||
DocumentService.insert(doc)
|
DocumentService.insert(doc)
|
||||||
|
|
||||||
@ -375,6 +389,41 @@ class FileService(CommonService):
|
|||||||
|
|
||||||
return err, files
|
return err, files
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_docs(file_objs, user_id):
|
||||||
|
from rag.app import presentation, picture, naive, audio, email
|
||||||
|
|
||||||
|
def dummy(prog=None, msg=""):
|
||||||
|
pass
|
||||||
|
|
||||||
|
FACTORY = {
|
||||||
|
ParserType.PRESENTATION.value: presentation,
|
||||||
|
ParserType.PICTURE.value: picture,
|
||||||
|
ParserType.AUDIO.value: audio,
|
||||||
|
ParserType.EMAIL.value: email
|
||||||
|
}
|
||||||
|
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": False}
|
||||||
|
exe = ThreadPoolExecutor(max_workers=12)
|
||||||
|
threads = []
|
||||||
|
for file in file_objs:
|
||||||
|
kwargs = {
|
||||||
|
"lang": "English",
|
||||||
|
"callback": dummy,
|
||||||
|
"parser_config": parser_config,
|
||||||
|
"from_page": 0,
|
||||||
|
"to_page": 100000,
|
||||||
|
"tenant_id": user_id
|
||||||
|
}
|
||||||
|
filetype = filename_type(file.filename)
|
||||||
|
blob = file.read()
|
||||||
|
threads.append(exe.submit(FACTORY.get(FileService.get_parser(filetype, file.filename, ""), naive).chunk, file.filename, blob, **kwargs))
|
||||||
|
|
||||||
|
res = []
|
||||||
|
for th in threads:
|
||||||
|
res.append("\n".join([ck["content_with_weight"] for ck in th.result()]))
|
||||||
|
|
||||||
|
return "\n\n".join(res)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_parser(doc_type, filename, default):
|
def get_parser(doc_type, filename, default):
|
||||||
if doc_type == FileType.VISUAL:
|
if doc_type == FileType.VISUAL:
|
||||||
|
|||||||
@ -14,8 +14,9 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from api.db import StatusEnum, TenantPermission
|
from api.db import StatusEnum, TenantPermission
|
||||||
from api.db.db_models import Knowledgebase, DB, Tenant
|
from api.db.db_models import Knowledgebase, DB, Tenant, User, UserTenant,Document
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
|
from peewee import fn
|
||||||
|
|
||||||
|
|
||||||
class KnowledgebaseService(CommonService):
|
class KnowledgebaseService(CommonService):
|
||||||
@ -23,54 +24,76 @@ class KnowledgebaseService(CommonService):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
def list_documents_by_ids(cls,kb_ids):
|
||||||
page_number, items_per_page, orderby, desc):
|
doc_ids=cls.model.select(Document.id.alias("document_id")).join(Document,on=(cls.model.id == Document.kb_id)).where(
|
||||||
kbs = cls.model.select().where(
|
cls.model.id.in_(kb_ids)
|
||||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
|
||||||
TenantPermission.TEAM.value)) | (
|
|
||||||
cls.model.tenant_id == user_id))
|
|
||||||
& (cls.model.status == StatusEnum.VALID.value)
|
|
||||||
)
|
)
|
||||||
if desc:
|
doc_ids =list(doc_ids.dicts())
|
||||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
doc_ids = [doc["document_id"] for doc in doc_ids]
|
||||||
else:
|
return doc_ids
|
||||||
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
|
||||||
|
|
||||||
kbs = kbs.paginate(page_number, items_per_page)
|
|
||||||
|
|
||||||
return list(kbs.dicts())
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_tenant_ids_by_offset(cls, joined_tenant_ids, user_id, offset, count, orderby, desc):
|
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
||||||
kbs = cls.model.select().where(
|
page_number, items_per_page, orderby, desc, keywords):
|
||||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
fields = [
|
||||||
TenantPermission.TEAM.value)) | (
|
cls.model.id,
|
||||||
cls.model.tenant_id == user_id))
|
cls.model.avatar,
|
||||||
& (cls.model.status == StatusEnum.VALID.value)
|
cls.model.name,
|
||||||
)
|
cls.model.language,
|
||||||
|
cls.model.description,
|
||||||
|
cls.model.permission,
|
||||||
|
cls.model.doc_num,
|
||||||
|
cls.model.token_num,
|
||||||
|
cls.model.chunk_num,
|
||||||
|
cls.model.parser_id,
|
||||||
|
cls.model.embd_id,
|
||||||
|
User.nickname,
|
||||||
|
User.avatar.alias('tenant_avatar'),
|
||||||
|
cls.model.update_time
|
||||||
|
]
|
||||||
|
if keywords:
|
||||||
|
kbs = cls.model.select(*fields).join(User, on=(cls.model.tenant_id == User.id)).where(
|
||||||
|
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||||
|
TenantPermission.TEAM.value)) | (
|
||||||
|
cls.model.tenant_id == user_id))
|
||||||
|
& (cls.model.status == StatusEnum.VALID.value),
|
||||||
|
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
kbs = cls.model.select(*fields).join(User, on=(cls.model.tenant_id == User.id)).where(
|
||||||
|
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||||
|
TenantPermission.TEAM.value)) | (
|
||||||
|
cls.model.tenant_id == user_id))
|
||||||
|
& (cls.model.status == StatusEnum.VALID.value)
|
||||||
|
)
|
||||||
if desc:
|
if desc:
|
||||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||||
else:
|
else:
|
||||||
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
||||||
|
|
||||||
kbs = list(kbs.dicts())
|
count = kbs.count()
|
||||||
|
|
||||||
kbs_length = len(kbs)
|
kbs = kbs.paginate(page_number, items_per_page)
|
||||||
if offset < 0 or offset > kbs_length:
|
|
||||||
raise IndexError("Offset is out of the valid range.")
|
|
||||||
|
|
||||||
if count == -1:
|
return list(kbs.dicts()), count
|
||||||
return kbs[offset:]
|
|
||||||
|
|
||||||
return kbs[offset:offset+count]
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_kb_ids(cls, tenant_id):
|
||||||
|
fields = [
|
||||||
|
cls.model.id,
|
||||||
|
]
|
||||||
|
kbs = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id)
|
||||||
|
kb_ids = [kb.id for kb in kbs]
|
||||||
|
return kb_ids
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_detail(cls, kb_id):
|
def get_detail(cls, kb_id):
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.id,
|
cls.model.id,
|
||||||
#Tenant.embd_id,
|
# Tenant.embd_id,
|
||||||
cls.model.embd_id,
|
cls.model.embd_id,
|
||||||
cls.model.avatar,
|
cls.model.avatar,
|
||||||
cls.model.name,
|
cls.model.name,
|
||||||
@ -81,16 +104,17 @@ class KnowledgebaseService(CommonService):
|
|||||||
cls.model.token_num,
|
cls.model.token_num,
|
||||||
cls.model.chunk_num,
|
cls.model.chunk_num,
|
||||||
cls.model.parser_id,
|
cls.model.parser_id,
|
||||||
cls.model.parser_config]
|
cls.model.parser_config,
|
||||||
|
cls.model.pagerank]
|
||||||
kbs = cls.model.select(*fields).join(Tenant, on=(
|
kbs = cls.model.select(*fields).join(Tenant, on=(
|
||||||
(Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value))).where(
|
(Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value))).where(
|
||||||
(cls.model.id == kb_id),
|
(cls.model.id == kb_id),
|
||||||
(cls.model.status == StatusEnum.VALID.value)
|
(cls.model.status == StatusEnum.VALID.value)
|
||||||
)
|
)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
return
|
return
|
||||||
d = kbs[0].to_dict()
|
d = kbs[0].to_dict()
|
||||||
#d["embd_id"] = kbs[0].tenant.embd_id
|
# d["embd_id"] = kbs[0].tenant.embd_id
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -142,3 +166,65 @@ class KnowledgebaseService(CommonService):
|
|||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_all_ids(cls):
|
def get_all_ids(cls):
|
||||||
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]
|
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_list(cls, joined_tenant_ids, user_id,
|
||||||
|
page_number, items_per_page, orderby, desc, id, name):
|
||||||
|
kbs = cls.model.select()
|
||||||
|
if id:
|
||||||
|
kbs = kbs.where(cls.model.id == id)
|
||||||
|
if name:
|
||||||
|
kbs = kbs.where(cls.model.name == name)
|
||||||
|
kbs = kbs.where(
|
||||||
|
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||||
|
TenantPermission.TEAM.value)) | (
|
||||||
|
cls.model.tenant_id == user_id))
|
||||||
|
& (cls.model.status == StatusEnum.VALID.value)
|
||||||
|
)
|
||||||
|
if desc:
|
||||||
|
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||||
|
else:
|
||||||
|
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
||||||
|
|
||||||
|
kbs = kbs.paginate(page_number, items_per_page)
|
||||||
|
|
||||||
|
return list(kbs.dicts())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def accessible(cls, kb_id, user_id):
|
||||||
|
docs = cls.model.select(
|
||||||
|
cls.model.id).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||||
|
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||||
|
docs = docs.dicts()
|
||||||
|
if not docs:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_kb_by_id(cls, kb_id, user_id):
|
||||||
|
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||||
|
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||||
|
kbs = kbs.dicts()
|
||||||
|
return list(kbs)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_kb_by_name(cls, kb_name, user_id):
|
||||||
|
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||||
|
).where(cls.model.name == kb_name, UserTenant.user_id == user_id).paginate(0, 1)
|
||||||
|
kbs = kbs.dicts()
|
||||||
|
return list(kbs)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def accessible4deletion(cls, kb_id, user_id):
|
||||||
|
docs = cls.model.select(
|
||||||
|
cls.model.id).where(cls.model.id == kb_id, cls.model.created_by == user_id).paginate(0, 1)
|
||||||
|
docs = docs.dicts()
|
||||||
|
if not docs:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|||||||
@ -13,8 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
from api.db.services.user_service import TenantService
|
from api.db.services.user_service import TenantService
|
||||||
from api.settings import database_logger
|
from api.utils.file_utils import get_project_base_directory
|
||||||
from rag.llm import EmbeddingModel, CvModel, ChatModel, RerankModel, Seq2txtModel, TTSModel
|
from rag.llm import EmbeddingModel, CvModel, ChatModel, RerankModel, Seq2txtModel, TTSModel
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.db_models import DB
|
from api.db.db_models import DB
|
||||||
@ -36,11 +40,11 @@ class TenantLLMService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_api_key(cls, tenant_id, model_name):
|
def get_api_key(cls, tenant_id, model_name):
|
||||||
arr = model_name.split("@")
|
mdlnm, fid = TenantLLMService.split_model_name_and_factory(model_name)
|
||||||
if len(arr) < 2:
|
if not fid:
|
||||||
objs = cls.query(tenant_id=tenant_id, llm_name=model_name)
|
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm)
|
||||||
else:
|
else:
|
||||||
objs = cls.query(tenant_id=tenant_id, llm_name=arr[0], llm_factory=arr[1])
|
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm, llm_factory=fid)
|
||||||
if not objs:
|
if not objs:
|
||||||
return
|
return
|
||||||
return objs[0]
|
return objs[0]
|
||||||
@ -61,6 +65,23 @@ class TenantLLMService(CommonService):
|
|||||||
|
|
||||||
return list(objs)
|
return list(objs)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def split_model_name_and_factory(model_name):
|
||||||
|
arr = model_name.split("@")
|
||||||
|
if len(arr) < 2:
|
||||||
|
return model_name, None
|
||||||
|
if len(arr) > 2:
|
||||||
|
return "@".join(arr[0:-1]), arr[-1]
|
||||||
|
try:
|
||||||
|
fact = json.load(open(os.path.join(get_project_base_directory(), "conf/llm_factories.json"), "r"))["factory_llm_infos"]
|
||||||
|
fact = set([f["name"] for f in fact])
|
||||||
|
if arr[-1] not in fact:
|
||||||
|
return model_name, None
|
||||||
|
return arr[0], arr[-1]
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(f"TenantLLMService.split_model_name_and_factory got exception: {e}")
|
||||||
|
return model_name, None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def model_instance(cls, tenant_id, llm_type,
|
def model_instance(cls, tenant_id, llm_type,
|
||||||
@ -85,10 +106,9 @@ class TenantLLMService(CommonService):
|
|||||||
assert False, "LLM type error"
|
assert False, "LLM type error"
|
||||||
|
|
||||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||||
tmp = mdlnm.split("@")
|
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||||
fid = None if len(tmp) < 2 else tmp[1]
|
if model_config:
|
||||||
mdlnm = tmp[0]
|
model_config = model_config.to_dict()
|
||||||
if model_config: model_config = model_config.to_dict()
|
|
||||||
if not model_config:
|
if not model_config:
|
||||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||||
@ -133,7 +153,8 @@ class TenantLLMService(CommonService):
|
|||||||
if model_config["llm_factory"] not in Seq2txtModel:
|
if model_config["llm_factory"] not in Seq2txtModel:
|
||||||
return
|
return
|
||||||
return Seq2txtModel[model_config["llm_factory"]](
|
return Seq2txtModel[model_config["llm_factory"]](
|
||||||
model_config["api_key"], model_config["llm_name"], lang,
|
key=model_config["api_key"], model_name=model_config["llm_name"],
|
||||||
|
lang=lang,
|
||||||
base_url=model_config["api_base"]
|
base_url=model_config["api_base"]
|
||||||
)
|
)
|
||||||
if llm_type == LLMType.TTS:
|
if llm_type == LLMType.TTS:
|
||||||
@ -167,14 +188,23 @@ class TenantLLMService(CommonService):
|
|||||||
else:
|
else:
|
||||||
assert False, "LLM type error"
|
assert False, "LLM type error"
|
||||||
|
|
||||||
|
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||||
|
|
||||||
num = 0
|
num = 0
|
||||||
try:
|
try:
|
||||||
for u in cls.query(tenant_id=tenant_id, llm_name=mdlnm):
|
if llm_factory:
|
||||||
num += cls.model.update(used_tokens=u.used_tokens + used_tokens)\
|
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory)
|
||||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == mdlnm)\
|
else:
|
||||||
|
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name)
|
||||||
|
if not tenant_llms:
|
||||||
|
return num
|
||||||
|
else:
|
||||||
|
tenant_llm = tenant_llms[0]
|
||||||
|
num = cls.model.update(used_tokens=tenant_llm.used_tokens + used_tokens)\
|
||||||
|
.where(cls.model.tenant_id == tenant_id, cls.model.llm_factory == tenant_llm.llm_factory, cls.model.llm_name == llm_name)\
|
||||||
.execute()
|
.execute()
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
logging.exception("TenantLLMService.increase_usage got exception")
|
||||||
return num
|
return num
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -195,51 +225,51 @@ class LLMBundle(object):
|
|||||||
self.llm_name = llm_name
|
self.llm_name = llm_name
|
||||||
self.mdl = TenantLLMService.model_instance(
|
self.mdl = TenantLLMService.model_instance(
|
||||||
tenant_id, llm_type, llm_name, lang=lang)
|
tenant_id, llm_type, llm_name, lang=lang)
|
||||||
assert self.mdl, "Can't find mole for {}/{}/{}".format(
|
assert self.mdl, "Can't find model for {}/{}/{}".format(
|
||||||
tenant_id, llm_type, llm_name)
|
tenant_id, llm_type, llm_name)
|
||||||
self.max_length = 8192
|
self.max_length = 8192
|
||||||
for lm in LLMService.query(llm_name=llm_name):
|
for lm in LLMService.query(llm_name=llm_name):
|
||||||
self.max_length = lm.max_tokens
|
self.max_length = lm.max_tokens
|
||||||
break
|
break
|
||||||
|
|
||||||
def encode(self, texts: list, batch_size=32):
|
def encode(self, texts: list):
|
||||||
emd, used_tokens = self.mdl.encode(texts, batch_size)
|
embeddings, used_tokens = self.mdl.encode(texts)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
self.tenant_id, self.llm_type, used_tokens):
|
||||||
database_logger.error(
|
logging.error(
|
||||||
"Can't update token usage for {}/EMBEDDING".format(self.tenant_id))
|
"LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
return emd, used_tokens
|
return embeddings, used_tokens
|
||||||
|
|
||||||
def encode_queries(self, query: str):
|
def encode_queries(self, query: str):
|
||||||
emd, used_tokens = self.mdl.encode_queries(query)
|
emd, used_tokens = self.mdl.encode_queries(query)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
self.tenant_id, self.llm_type, used_tokens):
|
||||||
database_logger.error(
|
logging.error(
|
||||||
"Can't update token usage for {}/EMBEDDING".format(self.tenant_id))
|
"LLMBundle.encode_queries can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
return emd, used_tokens
|
return emd, used_tokens
|
||||||
|
|
||||||
def similarity(self, query: str, texts: list):
|
def similarity(self, query: str, texts: list):
|
||||||
sim, used_tokens = self.mdl.similarity(query, texts)
|
sim, used_tokens = self.mdl.similarity(query, texts)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
self.tenant_id, self.llm_type, used_tokens):
|
||||||
database_logger.error(
|
logging.error(
|
||||||
"Can't update token usage for {}/RERANK".format(self.tenant_id))
|
"LLMBundle.similarity can't update token usage for {}/RERANK used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
return sim, used_tokens
|
return sim, used_tokens
|
||||||
|
|
||||||
def describe(self, image, max_tokens=300):
|
def describe(self, image, max_tokens=300):
|
||||||
txt, used_tokens = self.mdl.describe(image, max_tokens)
|
txt, used_tokens = self.mdl.describe(image, max_tokens)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
self.tenant_id, self.llm_type, used_tokens):
|
||||||
database_logger.error(
|
logging.error(
|
||||||
"Can't update token usage for {}/IMAGE2TEXT".format(self.tenant_id))
|
"LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
return txt
|
return txt
|
||||||
|
|
||||||
def transcription(self, audio):
|
def transcription(self, audio):
|
||||||
txt, used_tokens = self.mdl.transcription(audio)
|
txt, used_tokens = self.mdl.transcription(audio)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
self.tenant_id, self.llm_type, used_tokens):
|
||||||
database_logger.error(
|
logging.error(
|
||||||
"Can't update token usage for {}/SEQUENCE2TXT".format(self.tenant_id))
|
"LLMBundle.transcription can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
return txt
|
return txt
|
||||||
|
|
||||||
def tts(self, text):
|
def tts(self, text):
|
||||||
@ -247,17 +277,17 @@ class LLMBundle(object):
|
|||||||
if isinstance(chunk,int):
|
if isinstance(chunk,int):
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(
|
||||||
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||||
database_logger.error(
|
logging.error(
|
||||||
"Can't update token usage for {}/TTS".format(self.tenant_id))
|
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||||
return
|
return
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf):
|
def chat(self, system, history, gen_conf):
|
||||||
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
|
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
|
||||||
if not TenantLLMService.increase_usage(
|
if isinstance(txt, int) and not TenantLLMService.increase_usage(
|
||||||
self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||||
database_logger.error(
|
logging.error(
|
||||||
"Can't update token usage for {}/CHAT".format(self.tenant_id))
|
"LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name, used_tokens))
|
||||||
return txt
|
return txt
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf):
|
def chat_streamly(self, system, history, gen_conf):
|
||||||
@ -265,7 +295,7 @@ class LLMBundle(object):
|
|||||||
if isinstance(txt, int):
|
if isinstance(txt, int):
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(
|
||||||
self.tenant_id, self.llm_type, txt, self.llm_name):
|
self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||||
database_logger.error(
|
logging.error(
|
||||||
"Can't update token usage for {}/CHAT".format(self.tenant_id))
|
"LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name, txt))
|
||||||
return
|
return
|
||||||
yield txt
|
yield txt
|
||||||
|
|||||||
@ -15,6 +15,8 @@
|
|||||||
#
|
#
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
import xxhash
|
||||||
|
import bisect
|
||||||
|
|
||||||
from api.db.db_utils import bulk_insert_into_db
|
from api.db.db_utils import bulk_insert_into_db
|
||||||
from deepdoc.parser import PdfParser
|
from deepdoc.parser import PdfParser
|
||||||
@ -29,6 +31,18 @@ from deepdoc.parser.excel_parser import RAGFlowExcelParser
|
|||||||
from rag.settings import SVR_QUEUE_NAME
|
from rag.settings import SVR_QUEUE_NAME
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
from rag.utils.redis_conn import REDIS_CONN
|
from rag.utils.redis_conn import REDIS_CONN
|
||||||
|
from api import settings
|
||||||
|
from rag.nlp import search
|
||||||
|
|
||||||
|
|
||||||
|
def trim_header_by_lines(text: str, max_length) -> str:
|
||||||
|
len_text = len(text)
|
||||||
|
if len_text <= max_length:
|
||||||
|
return text
|
||||||
|
for i in range(len_text):
|
||||||
|
if text[i] == '\n' and len_text - i <= max_length:
|
||||||
|
return text[i + 1:]
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
class TaskService(CommonService):
|
class TaskService(CommonService):
|
||||||
@ -36,7 +50,7 @@ class TaskService(CommonService):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_tasks(cls, task_id):
|
def get_task(cls, task_id):
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.id,
|
cls.model.id,
|
||||||
cls.model.doc_id,
|
cls.model.doc_id,
|
||||||
@ -53,92 +67,143 @@ class TaskService(CommonService):
|
|||||||
Knowledgebase.tenant_id,
|
Knowledgebase.tenant_id,
|
||||||
Knowledgebase.language,
|
Knowledgebase.language,
|
||||||
Knowledgebase.embd_id,
|
Knowledgebase.embd_id,
|
||||||
|
Knowledgebase.pagerank,
|
||||||
Tenant.img2txt_id,
|
Tenant.img2txt_id,
|
||||||
Tenant.asr_id,
|
Tenant.asr_id,
|
||||||
Tenant.llm_id,
|
Tenant.llm_id,
|
||||||
cls.model.update_time]
|
cls.model.update_time,
|
||||||
docs = cls.model.select(*fields) \
|
]
|
||||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
docs = (
|
||||||
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id)) \
|
cls.model.select(*fields)
|
||||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
|
.join(Document, on=(cls.model.doc_id == Document.id))
|
||||||
.where(cls.model.id == task_id)
|
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id))
|
||||||
|
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||||
|
.where(cls.model.id == task_id)
|
||||||
|
)
|
||||||
docs = list(docs.dicts())
|
docs = list(docs.dicts())
|
||||||
if not docs: return []
|
if not docs:
|
||||||
|
return None
|
||||||
|
|
||||||
msg = "\nTask has been received."
|
msg = "\nTask has been received."
|
||||||
prog = random.random() / 10.
|
prog = random.random() / 10.0
|
||||||
if docs[0]["retry_count"] >= 3:
|
if docs[0]["retry_count"] >= 3:
|
||||||
msg = "\nERROR: Task is abandoned after 3 times attempts."
|
msg = "\nERROR: Task is abandoned after 3 times attempts."
|
||||||
prog = -1
|
prog = -1
|
||||||
|
|
||||||
cls.model.update(progress_msg=cls.model.progress_msg + msg,
|
cls.model.update(
|
||||||
progress=prog,
|
progress_msg=cls.model.progress_msg + msg,
|
||||||
retry_count=docs[0]["retry_count"]+1
|
progress=prog,
|
||||||
).where(
|
retry_count=docs[0]["retry_count"] + 1,
|
||||||
cls.model.id == docs[0]["id"]).execute()
|
).where(cls.model.id == docs[0]["id"]).execute()
|
||||||
|
|
||||||
if docs[0]["retry_count"] >= 3: return []
|
if docs[0]["retry_count"] >= 3:
|
||||||
|
return None
|
||||||
|
|
||||||
return docs
|
return docs[0]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_tasks(cls, doc_id: str):
|
||||||
|
fields = [
|
||||||
|
cls.model.id,
|
||||||
|
cls.model.from_page,
|
||||||
|
cls.model.progress,
|
||||||
|
cls.model.digest,
|
||||||
|
cls.model.chunk_ids,
|
||||||
|
]
|
||||||
|
tasks = (
|
||||||
|
cls.model.select(*fields).order_by(cls.model.from_page.asc(), cls.model.create_time.desc())
|
||||||
|
.where(cls.model.doc_id == doc_id)
|
||||||
|
)
|
||||||
|
tasks = list(tasks.dicts())
|
||||||
|
if not tasks:
|
||||||
|
return None
|
||||||
|
return tasks
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def update_chunk_ids(cls, id: str, chunk_ids: str):
|
||||||
|
cls.model.update(chunk_ids=chunk_ids).where(cls.model.id == id).execute()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_ongoing_doc_name(cls):
|
def get_ongoing_doc_name(cls):
|
||||||
with DB.lock("get_task", -1):
|
with DB.lock("get_task", -1):
|
||||||
docs = cls.model.select(*[Document.id, Document.kb_id, Document.location, File.parent_id]) \
|
docs = (
|
||||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
cls.model.select(
|
||||||
.join(File2Document, on=(File2Document.document_id == Document.id), join_type=JOIN.LEFT_OUTER) \
|
*[Document.id, Document.kb_id, Document.location, File.parent_id]
|
||||||
.join(File, on=(File2Document.file_id == File.id), join_type=JOIN.LEFT_OUTER) \
|
)
|
||||||
.where(
|
.join(Document, on=(cls.model.doc_id == Document.id))
|
||||||
|
.join(
|
||||||
|
File2Document,
|
||||||
|
on=(File2Document.document_id == Document.id),
|
||||||
|
join_type=JOIN.LEFT_OUTER,
|
||||||
|
)
|
||||||
|
.join(
|
||||||
|
File,
|
||||||
|
on=(File2Document.file_id == File.id),
|
||||||
|
join_type=JOIN.LEFT_OUTER,
|
||||||
|
)
|
||||||
|
.where(
|
||||||
Document.status == StatusEnum.VALID.value,
|
Document.status == StatusEnum.VALID.value,
|
||||||
Document.run == TaskStatus.RUNNING.value,
|
Document.run == TaskStatus.RUNNING.value,
|
||||||
~(Document.type == FileType.VIRTUAL.value),
|
~(Document.type == FileType.VIRTUAL.value),
|
||||||
cls.model.progress < 1,
|
cls.model.progress < 1,
|
||||||
cls.model.create_time >= current_timestamp() - 1000 * 600
|
cls.model.create_time >= current_timestamp() - 1000 * 600,
|
||||||
)
|
)
|
||||||
|
)
|
||||||
docs = list(docs.dicts())
|
docs = list(docs.dicts())
|
||||||
if not docs: return []
|
if not docs:
|
||||||
|
return []
|
||||||
|
|
||||||
return list(set([(d["parent_id"] if d["parent_id"] else d["kb_id"], d["location"]) for d in docs]))
|
return list(
|
||||||
|
set(
|
||||||
|
[
|
||||||
|
(
|
||||||
|
d["parent_id"] if d["parent_id"] else d["kb_id"],
|
||||||
|
d["location"],
|
||||||
|
)
|
||||||
|
for d in docs
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def do_cancel(cls, id):
|
def do_cancel(cls, id):
|
||||||
try:
|
task = cls.model.get_by_id(id)
|
||||||
task = cls.model.get_by_id(id)
|
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
|
||||||
except Exception as e:
|
|
||||||
pass
|
|
||||||
return False
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_progress(cls, id, info):
|
def update_progress(cls, id, info):
|
||||||
if os.environ.get("MACOS"):
|
if os.environ.get("MACOS"):
|
||||||
if info["progress_msg"]:
|
if info["progress_msg"]:
|
||||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]).where(
|
task = cls.model.get_by_id(id)
|
||||||
cls.model.id == id).execute()
|
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 1000)
|
||||||
|
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||||
if "progress" in info:
|
if "progress" in info:
|
||||||
cls.model.update(progress=info["progress"]).where(
|
cls.model.update(progress=info["progress"]).where(
|
||||||
cls.model.id == id).execute()
|
cls.model.id == id
|
||||||
|
).execute()
|
||||||
return
|
return
|
||||||
|
|
||||||
with DB.lock("update_progress", -1):
|
with DB.lock("update_progress", -1):
|
||||||
if info["progress_msg"]:
|
if info["progress_msg"]:
|
||||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]).where(
|
task = cls.model.get_by_id(id)
|
||||||
cls.model.id == id).execute()
|
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 1000)
|
||||||
|
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||||
if "progress" in info:
|
if "progress" in info:
|
||||||
cls.model.update(progress=info["progress"]).where(
|
cls.model.update(progress=info["progress"]).where(
|
||||||
cls.model.id == id).execute()
|
cls.model.id == id
|
||||||
|
).execute()
|
||||||
|
|
||||||
|
|
||||||
def queue_tasks(doc: dict, bucket: str, name: str):
|
def queue_tasks(doc: dict, bucket: str, name: str):
|
||||||
def new_task():
|
def new_task():
|
||||||
return {
|
return {"id": get_uuid(), "doc_id": doc["id"], "progress": 0.0, "from_page": 0, "to_page": 100000000}
|
||||||
"id": get_uuid(),
|
|
||||||
"doc_id": doc["id"]
|
|
||||||
}
|
|
||||||
tsks = []
|
tsks = []
|
||||||
|
|
||||||
if doc["type"] == FileType.PDF.value:
|
if doc["type"] == FileType.PDF.value:
|
||||||
@ -172,8 +237,57 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
|||||||
else:
|
else:
|
||||||
tsks.append(new_task())
|
tsks.append(new_task())
|
||||||
|
|
||||||
|
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||||
|
for task in tsks:
|
||||||
|
hasher = xxhash.xxh64()
|
||||||
|
for field in sorted(chunking_config.keys()):
|
||||||
|
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||||
|
for field in ["doc_id", "from_page", "to_page"]:
|
||||||
|
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||||
|
task_digest = hasher.hexdigest()
|
||||||
|
task["digest"] = task_digest
|
||||||
|
task["progress"] = 0.0
|
||||||
|
|
||||||
|
prev_tasks = TaskService.get_tasks(doc["id"])
|
||||||
|
ck_num = 0
|
||||||
|
if prev_tasks:
|
||||||
|
for task in tsks:
|
||||||
|
ck_num += reuse_prev_task_chunks(task, prev_tasks, chunking_config)
|
||||||
|
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||||
|
chunk_ids = []
|
||||||
|
for task in prev_tasks:
|
||||||
|
if task["chunk_ids"]:
|
||||||
|
chunk_ids.extend(task["chunk_ids"].split())
|
||||||
|
if chunk_ids:
|
||||||
|
settings.docStoreConn.delete({"id": chunk_ids}, search.index_name(chunking_config["tenant_id"]),
|
||||||
|
chunking_config["kb_id"])
|
||||||
|
DocumentService.update_by_id(doc["id"], {"chunk_num": ck_num})
|
||||||
|
|
||||||
bulk_insert_into_db(Task, tsks, True)
|
bulk_insert_into_db(Task, tsks, True)
|
||||||
DocumentService.begin2parse(doc["id"])
|
DocumentService.begin2parse(doc["id"])
|
||||||
|
|
||||||
|
tsks = [task for task in tsks if task["progress"] < 1.0]
|
||||||
for t in tsks:
|
for t in tsks:
|
||||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=t), "Can't access Redis. Please check the Redis' status."
|
assert REDIS_CONN.queue_product(
|
||||||
|
SVR_QUEUE_NAME, message=t
|
||||||
|
), "Can't access Redis. Please check the Redis' status."
|
||||||
|
|
||||||
|
|
||||||
|
def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
|
||||||
|
idx = bisect.bisect_left(prev_tasks, (task.get("from_page", 0), task.get("digest", "")),
|
||||||
|
key=lambda x: (x.get("from_page", 0), x.get("digest", "")))
|
||||||
|
if idx >= len(prev_tasks):
|
||||||
|
return 0
|
||||||
|
prev_task = prev_tasks[idx]
|
||||||
|
if prev_task["progress"] < 1.0 or prev_task["digest"] != task["digest"] or not prev_task["chunk_ids"]:
|
||||||
|
return 0
|
||||||
|
task["chunk_ids"] = prev_task["chunk_ids"]
|
||||||
|
task["progress"] = 1.0
|
||||||
|
if "from_page" in task and "to_page" in task:
|
||||||
|
task["progress_msg"] = f"Page({task['from_page']}~{task['to_page']}): "
|
||||||
|
else:
|
||||||
|
task["progress_msg"] = ""
|
||||||
|
task["progress_msg"] += "reused previous task's chunks."
|
||||||
|
prev_task["chunk_ids"] = ""
|
||||||
|
|
||||||
|
return len(task["chunk_ids"].split())
|
||||||
|
|||||||
@ -22,7 +22,7 @@ from api.db import UserTenantRole
|
|||||||
from api.db.db_models import DB, UserTenant
|
from api.db.db_models import DB, UserTenant
|
||||||
from api.db.db_models import User, Tenant
|
from api.db.db_models import User, Tenant
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
from api.utils import get_uuid, get_format_time, current_timestamp, datetime_format
|
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
|
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ class TenantService(CommonService):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_user_id(cls, user_id):
|
def get_info_by(cls, user_id):
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.id.alias("tenant_id"),
|
cls.model.id.alias("tenant_id"),
|
||||||
cls.model.name,
|
cls.model.name,
|
||||||
@ -100,7 +100,7 @@ class TenantService(CommonService):
|
|||||||
cls.model.parser_ids,
|
cls.model.parser_ids,
|
||||||
UserTenant.role]
|
UserTenant.role]
|
||||||
return list(cls.model.select(*fields)
|
return list(cls.model.select(*fields)
|
||||||
.join(UserTenant, on=((cls.model.id == UserTenant.tenant_id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value)))
|
.join(UserTenant, on=((cls.model.id == UserTenant.tenant_id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value) & (UserTenant.role == UserTenantRole.OWNER)))
|
||||||
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -115,7 +115,7 @@ class TenantService(CommonService):
|
|||||||
cls.model.img2txt_id,
|
cls.model.img2txt_id,
|
||||||
UserTenant.role]
|
UserTenant.role]
|
||||||
return list(cls.model.select(*fields)
|
return list(cls.model.select(*fields)
|
||||||
.join(UserTenant, on=((cls.model.id == UserTenant.tenant_id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value) & (UserTenant.role == UserTenantRole.NORMAL.value)))
|
.join(UserTenant, on=((cls.model.id == UserTenant.tenant_id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value) & (UserTenant.role == UserTenantRole.NORMAL)))
|
||||||
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -143,9 +143,8 @@ class UserTenantService(CommonService):
|
|||||||
def get_by_tenant_id(cls, tenant_id):
|
def get_by_tenant_id(cls, tenant_id):
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.user_id,
|
cls.model.user_id,
|
||||||
cls.model.tenant_id,
|
|
||||||
cls.model.role,
|
|
||||||
cls.model.status,
|
cls.model.status,
|
||||||
|
cls.model.role,
|
||||||
User.nickname,
|
User.nickname,
|
||||||
User.email,
|
User.email,
|
||||||
User.avatar,
|
User.avatar,
|
||||||
@ -153,8 +152,24 @@ class UserTenantService(CommonService):
|
|||||||
User.is_active,
|
User.is_active,
|
||||||
User.is_anonymous,
|
User.is_anonymous,
|
||||||
User.status,
|
User.status,
|
||||||
|
User.update_date,
|
||||||
User.is_superuser]
|
User.is_superuser]
|
||||||
return list(cls.model.select(*fields)
|
return list(cls.model.select(*fields)
|
||||||
.join(User, on=((cls.model.user_id == User.id) & (cls.model.status == StatusEnum.VALID.value)))
|
.join(User, on=((cls.model.user_id == User.id) & (cls.model.status == StatusEnum.VALID.value) & (cls.model.role != UserTenantRole.OWNER)))
|
||||||
.where(cls.model.tenant_id == tenant_id)
|
.where(cls.model.tenant_id == tenant_id)
|
||||||
.dicts())
|
.dicts())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_tenants_by_user_id(cls, user_id):
|
||||||
|
fields = [
|
||||||
|
cls.model.tenant_id,
|
||||||
|
cls.model.role,
|
||||||
|
User.nickname,
|
||||||
|
User.email,
|
||||||
|
User.avatar,
|
||||||
|
User.update_date
|
||||||
|
]
|
||||||
|
return list(cls.model.select(*fields)
|
||||||
|
.join(User, on=((cls.model.tenant_id == User.id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value)))
|
||||||
|
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user