mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
861 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 552023ee4b | |||
| 6c9b8ec860 | |||
| f9e6ad86b7 | |||
| e604634d2a | |||
| 590b9dabab | |||
| c283ea57fd | |||
| 50ff16e7a4 | |||
| 453287b06b | |||
| e166f132b3 | |||
| 42f4d4dbc8 | |||
| 7cb8368e0f | |||
| 0d7cfce6e1 | |||
| 2d7c1368f0 | |||
| db4371c745 | |||
| e6cd799d8a | |||
| ab29b58316 | |||
| 3f037c9786 | |||
| 53b991aa0e | |||
| 9e80f39caa | |||
| bdc2b74e8f | |||
| 1fd92e6bee | |||
| 02fd381072 | |||
| b6f3a6a68a | |||
| ae70512f5d | |||
| d4a123d6dd | |||
| ce816edb5f | |||
| ac2643700b | |||
| 558b252c5a | |||
| 754a5e1cee | |||
| e3e7c7ddaa | |||
| 76b278af8e | |||
| 1c6320828c | |||
| d72468426e | |||
| 796f4032b8 | |||
| 1ae7b942d9 | |||
| fed1221302 | |||
| 6ed81d6774 | |||
| 115850945e | |||
| 8e87436725 | |||
| e8e2a95165 | |||
| b908c33464 | |||
| 0ebf05440e | |||
| 7df1bd4b4a | |||
| 5d21cc3660 | |||
| b0275b8483 | |||
| 86c6fee320 | |||
| c0bee906d2 | |||
| bfaa469b9a | |||
| d73a08b9eb | |||
| a1f06a4fdc | |||
| cb26564d50 | |||
| 59705a1c1d | |||
| 205974c359 | |||
| 04edf9729f | |||
| bb1268ef4b | |||
| c5826d4720 | |||
| deb2faf7aa | |||
| 2777941b4e | |||
| ae8b628f0a | |||
| 0e9ff8c1f7 | |||
| d373c46976 | |||
| 008e55a65e | |||
| 772992812a | |||
| a8542508b7 | |||
| 0b4d366514 | |||
| e7a84bdac2 | |||
| d2b346cf9e | |||
| 1d0dcddf61 | |||
| d49025b501 | |||
| dd0fd13ea8 | |||
| 36e32dde1a | |||
| 53a2c8e452 | |||
| 5218ff775c | |||
| 5d5dbb3bcb | |||
| 5a0273e3ea | |||
| ce81e470e3 | |||
| 4ac61fc470 | |||
| bfe97d896d | |||
| e7a6a9e47e | |||
| d06431f670 | |||
| 2fa8e3309f | |||
| fe3b2acde0 | |||
| 01330fa428 | |||
| b4cc37f3c1 | |||
| a8dbb5d3b0 | |||
| 321a280031 | |||
| 5c9025918a | |||
| 573d46a4ef | |||
| 4ae8f87754 | |||
| 63af158086 | |||
| 3877bcfc21 | |||
| f8cc557892 | |||
| e39ceb2bd1 | |||
| 992398bca3 | |||
| baa108f5cc | |||
| 4a891f2d67 | |||
| 514c08a932 | |||
| d05e8a173d | |||
| ad412380cb | |||
| af35e84655 | |||
| 29f45a85e4 | |||
| ea5e8caa69 | |||
| 473aa28422 | |||
| ef0c4b134d | |||
| 35e36cb945 | |||
| 31718581b5 | |||
| 6bd7d572ec | |||
| 5b626870d0 | |||
| 2ccec93d71 | |||
| 2fe332d01d | |||
| a14865e6bb | |||
| d66c17ab5c | |||
| b781207752 | |||
| 34ec550014 | |||
| c2c63b07c3 | |||
| 332e6ffbd4 | |||
| 5352bdf4da | |||
| 138778b51b | |||
| 17e7571639 | |||
| 0fbca63e9d | |||
| 1657755b5d | |||
| 9d3dd13fef | |||
| 3827c47515 | |||
| e9053b6ed4 | |||
| e349635a3d | |||
| 014a1535f2 | |||
| 7b57ab5dea | |||
| e300d90c00 | |||
| 87317bcfc4 | |||
| 9849230a04 | |||
| fa32a2d0fd | |||
| 27ffc0ed74 | |||
| 539876af11 | |||
| b1c8746984 | |||
| bc3160f75a | |||
| 75b24ba02a | |||
| 953b3e1b3f | |||
| c98933499a | |||
| 2f768b96e8 | |||
| d6cc6453d1 | |||
| 45dfaf230c | |||
| 65537b8200 | |||
| 60787f8d5d | |||
| c4b3d3af95 | |||
| f29a5de9f5 | |||
| cb37f00a8f | |||
| fc379e90d1 | |||
| fea9d970ec | |||
| 6e7dd54a50 | |||
| f56b651acb | |||
| 2dbcc0a1bf | |||
| 1f82889001 | |||
| e6c824e606 | |||
| e2b0bceb1b | |||
| 713c055e04 | |||
| 1fc52033ba | |||
| ab27609a64 | |||
| 538a408608 | |||
| 093d280528 | |||
| de166d0ff2 | |||
| 942b94fc3c | |||
| 77bb7750e9 | |||
| 78380fa181 | |||
| c88e4b3fc0 | |||
| 552475dd5c | |||
| c69fbca24f | |||
| 5bb1c383ac | |||
| c7310f7fb2 | |||
| 3a43043c8a | |||
| dbfa859ca3 | |||
| 53c59c47a1 | |||
| af393b0003 | |||
| 1a5608d0f8 | |||
| 23dcbc94ef | |||
| af770c5ced | |||
| 8ce5e69b2f | |||
| 1aa97600df | |||
| 969c596d4c | |||
| 67b087019c | |||
| 6a45d93005 | |||
| 43e507d554 | |||
| a4be6c50cf | |||
| 5043143bc5 | |||
| bdebd1b2e3 | |||
| dadd8d9f94 | |||
| 3da8776a3c | |||
| 3052006ba8 | |||
| 1662c7eda3 | |||
| fef44a71c5 | |||
| b271cc34b3 | |||
| eead838353 | |||
| 02cc867c06 | |||
| 6e98cd311c | |||
| 97a13ef1ab | |||
| 7e1464a950 | |||
| e6a4d6bcf0 | |||
| c8c3b756b0 | |||
| 9a8dda8fc7 | |||
| ff442c48b5 | |||
| 216cd7474b | |||
| 2c62652ea8 | |||
| 4e8fd73a20 | |||
| 19931cd9ed | |||
| 0b460a9a12 | |||
| 4e31eea55f | |||
| 1366712560 | |||
| 51d9bde5a3 | |||
| 94181a990b | |||
| 03672df691 | |||
| e9669e7fb1 | |||
| 9a1ac8020d | |||
| b44bbd11b8 | |||
| 1e91318445 | |||
| f35ff65c36 | |||
| ba0e363d5a | |||
| dde8c26feb | |||
| 64dd187498 | |||
| 67dee2d74e | |||
| bcac195a0c | |||
| 8fca8faa7d | |||
| 1cc17eb611 | |||
| c8194f5fd0 | |||
| f2c9ffc056 | |||
| 10432a1be7 | |||
| e7f83b13ca | |||
| ad220a0a3c | |||
| 91c5a5c08f | |||
| 8362ab405c | |||
| 68b9dae6c0 | |||
| 9b956ac1a9 | |||
| d4dbdfb61d | |||
| 487aed419e | |||
| 8b8a2f2949 | |||
| 42e236f464 | |||
| 1b4016317e | |||
| b1798bafb0 | |||
| 86f76df586 | |||
| db82c15de4 | |||
| 627fd002ae | |||
| 9e7d052c8d | |||
| d9927f5185 | |||
| 5d253e0a34 | |||
| de5727f90a | |||
| 9c2dd70839 | |||
| e0e78112a2 | |||
| 48730e00a8 | |||
| e5f9d148e7 | |||
| f6b280e372 | |||
| 5af2d57086 | |||
| 7a34159737 | |||
| b1fa5a0754 | |||
| 018ff4dd0a | |||
| ed352710ec | |||
| 0a0c1edce3 | |||
| 18eb76f6b8 | |||
| ed5f81b02e | |||
| 23c5ce48d1 | |||
| de766ba628 | |||
| 5aae73c230 | |||
| b578451e6a | |||
| 53c653b099 | |||
| b70abe52b2 | |||
| 98670c3755 | |||
| 9b789c2ae9 | |||
| ffb9f01bea | |||
| ed7244f5f5 | |||
| e54c0e39b5 | |||
| 056ea68e52 | |||
| d9266ed65a | |||
| 6051abb4a3 | |||
| 4b125f0ffe | |||
| 43cf321942 | |||
| 9283e91aa0 | |||
| dc59aba132 | |||
| 8fb5edd927 | |||
| 3bb1e012e6 | |||
| 22758a2763 | |||
| a008b38cf5 | |||
| d0897312ac | |||
| aa99c6b896 | |||
| ae107f31d9 | |||
| 9d9f2dacd2 | |||
| 08bc5d3521 | |||
| 6e7fb75618 | |||
| c26c38ee12 | |||
| dc2c74b249 | |||
| a20439bf81 | |||
| a1fb32908d | |||
| 0b89458eb8 | |||
| 14a3efd756 | |||
| d64c6870bb | |||
| dc87c91f9d | |||
| d4574ffb49 | |||
| 5a8c479ff3 | |||
| c6b26a3159 | |||
| 2a5ad74ac6 | |||
| 2caf15b24c | |||
| f49588756e | |||
| 57e760883e | |||
| b213e88cca | |||
| e8f46c9207 | |||
| cded812b97 | |||
| 2acb02366e | |||
| 9ecc78feeb | |||
| fdc410e743 | |||
| 5b5558300a | |||
| b5918e7158 | |||
| 58f8026632 | |||
| a73fbc61ff | |||
| 0d1c5fdd2f | |||
| 6c77ef5a5e | |||
| e7a2a4b7ff | |||
| 724a36fcdb | |||
| 9ce6521582 | |||
| 160bf4ccb3 | |||
| aa25d09b0c | |||
| 2471a6e115 | |||
| fc02929946 | |||
| 3ae1e9e3c4 | |||
| 117f18240d | |||
| 31296ad70f | |||
| 132eae9d5b | |||
| ead5f7aba9 | |||
| 58e6e7b668 | |||
| 20b8ccd1e9 | |||
| d0dca16fee | |||
| fc21dd0a4a | |||
| 61c0dfab70 | |||
| 67330833af | |||
| ece59034f7 | |||
| 0a42e5777e | |||
| e2b66628f4 | |||
| 46b5e32cd7 | |||
| 7d9dd1e5d3 | |||
| 1985ff7918 | |||
| 60b9c027c8 | |||
| 2793c8e4fe | |||
| 805a8f1f47 | |||
| d4a3e9a7cc | |||
| ad4e59edb2 | |||
| aca4cf4369 | |||
| 9aa047257a | |||
| 65a8cd1772 | |||
| 563a84beaf | |||
| d32a35d8fd | |||
| 2632493c8b | |||
| c61df5dd25 | |||
| 1fbc4870f0 | |||
| f304492716 | |||
| f35c226ce7 | |||
| 0b48a2e0d1 | |||
| fd614a7aef | |||
| 0758c04941 | |||
| fe0396bbb9 | |||
| 974a467cf6 | |||
| 36b62e0fab | |||
| d2043ff9f2 | |||
| ecc9605a32 | |||
| 70dc56d26b | |||
| 82ccbd2cba | |||
| c4998d0e09 | |||
| 5eabfe3912 | |||
| df3890827d | |||
| 6599db1e99 | |||
| b7d7ad536a | |||
| 24d8ff7425 | |||
| 735d9dd949 | |||
| cc5f4a5efa | |||
| 93c26ae1ef | |||
| cc8029a732 | |||
| 6bf26e2a81 | |||
| 7a677cb095 | |||
| 12ad746ee6 | |||
| 163e71d06f | |||
| c8c91fd827 | |||
| d17970ebd0 | |||
| bf483fdf02 | |||
| b2b7ed8927 | |||
| 0a79dfd5cf | |||
| 1d73baf3d8 | |||
| f3ae4a3bae | |||
| 814a210f5d | |||
| 60c3a253ad | |||
| 384b6549a6 | |||
| b2ec39c59d | |||
| 095fc84cf2 | |||
| 542cf16292 | |||
| 27989eb9a5 | |||
| 05997e8215 | |||
| 5d9afce12d | |||
| ee6a0bd9db | |||
| b6f3242c6c | |||
| 390086c6ab | |||
| a40c5aea83 | |||
| f691b4ddd2 | |||
| 3c57a9986c | |||
| 5e0a77df2b | |||
| 66e557b6c0 | |||
| 200b6f55c6 | |||
| b77ce4e846 | |||
| 85eb367ede | |||
| 0b63346a1a | |||
| 85eb3775d6 | |||
| e4c8d703b5 | |||
| 60afb63d44 | |||
| ee5aa51d43 | |||
| a6aed0da46 | |||
| d77380f024 | |||
| efc4796f01 | |||
| d869e4d43f | |||
| 8eefc8b5fe | |||
| 4091af4560 | |||
| 394d1a86f6 | |||
| d88964f629 | |||
| 0e0ebaac5f | |||
| 8b7e53e643 | |||
| 979cdc3626 | |||
| a2a4bfe3e3 | |||
| 85480f6292 | |||
| f537b6ca00 | |||
| b5471978b0 | |||
| efdfb39a33 | |||
| 7cc5603a82 | |||
| 9ed004e90d | |||
| d83911b632 | |||
| bc58ecbfd7 | |||
| 221eae2c59 | |||
| 37303e38ec | |||
| b754bd523a | |||
| 1bb990719e | |||
| 7f80d7304d | |||
| ca9c3e59fa | |||
| 674f94228b | |||
| ef7e96e486 | |||
| dba0caa00b | |||
| 1d9ca172e3 | |||
| f0c4b28c6b | |||
| 6784e0dfee | |||
| 95497b4aab | |||
| 5b04b7d972 | |||
| 4eb3a8e1cc | |||
| 9611185eb4 | |||
| e4380843c4 | |||
| 046f0bba74 | |||
| e0c436b616 | |||
| dbf2ee56c6 | |||
| 1d6760dd84 | |||
| 344727f9ba | |||
| d17ec26c56 | |||
| 4236d81cfc | |||
| bb869aca33 | |||
| 9cad60fa6d | |||
| 42e89e4a92 | |||
| 8daec9a4c5 | |||
| 53ac27c3ff | |||
| e689532e6e | |||
| c2302abaf1 | |||
| 8157285a79 | |||
| c6e1a2ca8a | |||
| 41e112294b | |||
| 49086964b8 | |||
| dd81c30976 | |||
| 1d8daad223 | |||
| f540559c41 | |||
| d16033dd2c | |||
| 7eb417b24f | |||
| 9515ed401f | |||
| f982771131 | |||
| a087d13ccb | |||
| 6e5cbd0196 | |||
| 6e8d0e3177 | |||
| 5cf610af40 | |||
| 897fe85b5c | |||
| 57cbefa589 | |||
| 09291db805 | |||
| e9a6675c40 | |||
| 1333d3c02a | |||
| 222a2c8fa5 | |||
| 5841aa8189 | |||
| 1b9f63f799 | |||
| 1b130546f8 | |||
| 7e4d693054 | |||
| b0b4b7ba33 | |||
| d0eda83697 | |||
| 503e5829bb | |||
| 79482ff672 | |||
| 3a99c2b5f4 | |||
| 45fe02c8b3 | |||
| 2c3c4274be | |||
| 501c017a26 | |||
| d36420a87a | |||
| 5983803c8b | |||
| fabc5e9259 | |||
| 5748d58c74 | |||
| bfa8d342b3 | |||
| 37f3486483 | |||
| 3e19044dee | |||
| 8495036ff9 | |||
| 7f701a5756 | |||
| 634e7a41c5 | |||
| d1d651080a | |||
| 0fa44c5dd3 | |||
| 89a69eed72 | |||
| 1842ca0334 | |||
| e5a8b23684 | |||
| 4fffee6695 | |||
| 485bc7d7d6 | |||
| b5ba8b783a | |||
| d7774cf049 | |||
| 9d94acbedb | |||
| b77e844fc3 | |||
| a6ab2c71c3 | |||
| 5c8ad6702a | |||
| f0601afa75 | |||
| 56e984f657 | |||
| 5d75b6be62 | |||
| 12c3023a22 | |||
| 56b228f187 | |||
| 42eb99554f | |||
| c85b468b8d | |||
| 7463241896 | |||
| c00def5b71 | |||
| f16418ccf7 | |||
| 2d4a60cae6 | |||
| 47926f7d21 | |||
| 940072592f | |||
| 4ff609b6a8 | |||
| 0a877941f4 | |||
| baf3b9be7c | |||
| 4df4bf68a2 | |||
| 471bd92b4c | |||
| 3af1063737 | |||
| 9c8060f619 | |||
| e213873852 | |||
| 56acb340d2 | |||
| e05cdc2f9c | |||
| 3571270191 | |||
| bd5eb47441 | |||
| 7cd37c37cd | |||
| d660f6b9a5 | |||
| 80389ae61e | |||
| 6e13922bdc | |||
| c57f16d16f | |||
| 3c43a7aee8 | |||
| dd8779b257 | |||
| 46bdfb9661 | |||
| e3ea4b7ec2 | |||
| 41c67ce8dd | |||
| 870a6e93da | |||
| 80f87913bb | |||
| 45123dcc0a | |||
| 49d560583f | |||
| 1c663b32b9 | |||
| caecaa7562 | |||
| ed11be23bf | |||
| 7bd5a52019 | |||
| 87763ef0a0 | |||
| 939e668096 | |||
| 45318e7575 | |||
| 8250b9f6b0 | |||
| 1abf03351d | |||
| 46b95d5cfe | |||
| 59ba4777ee | |||
| d44739283c | |||
| 9c953a67a6 | |||
| bd3fa317e7 | |||
| 715e2b48ca | |||
| 90d18143ba | |||
| 4b6809b32d | |||
| 7b96146d3f | |||
| 21c55a2e0f | |||
| 8e965040ce | |||
| 780ee2b2be | |||
| 6f9cd96ec5 | |||
| 47e244ee9f | |||
| df11fe75d3 | |||
| bf0d516e49 | |||
| b18da35da6 | |||
| 8ba1e6c183 | |||
| d4f84f0b54 | |||
| 6ec6ca6971 | |||
| 1163e9e409 | |||
| 15736c57c3 | |||
| fa817a8ab3 | |||
| 8b99635eb3 | |||
| 1919780880 | |||
| 82f5d901c8 | |||
| dc4d4342cd | |||
| e05658685c | |||
| b29539b442 | |||
| b1a46d5adc | |||
| 50c510d16b | |||
| 8a84d1048c | |||
| 2ad852d8df | |||
| ca39f5204d | |||
| 5b0e38060a | |||
| 66938e0b68 | |||
| 64c6cc4cf3 | |||
| 3418984848 | |||
| 3c79990934 | |||
| da3f279495 | |||
| b1bbb9e210 | |||
| 0e3e129a83 | |||
| c87b58511e | |||
| 8d61dcc8ab | |||
| 06b29d7da4 | |||
| 5229a76f68 | |||
| 4f9504305a | |||
| 27153dde85 | |||
| 9fc7174612 | |||
| 8fb8374dfc | |||
| ff35c140dc | |||
| df9b7b2fe9 | |||
| 48f3f49e80 | |||
| 94d7af00b8 | |||
| 251ba7f058 | |||
| 28296955f1 | |||
| 1b2fc3cc9a | |||
| b8da2eeb69 | |||
| 5f62f0c9d7 | |||
| a54843cc65 | |||
| 4326873af6 | |||
| a64f4539e7 | |||
| ec68ab1c8c | |||
| e5041749a2 | |||
| 78b2e0be89 | |||
| b6aded378d | |||
| 11e3f5e8b2 | |||
| f65c3ae62b | |||
| 02c955babb | |||
| ca04ae9540 | |||
| b0c21b00d9 | |||
| 47684fa17c | |||
| 148a7e7002 | |||
| 76e8285904 | |||
| 555c70672e | |||
| 850e218051 | |||
| fb4b5b0a06 | |||
| f256e1a59a | |||
| 9816b868f9 | |||
| 6e828f0fcb | |||
| 4d6484b03e | |||
| afe9269534 | |||
| 688cb8f19d | |||
| f6dd2cd1af | |||
| 69dc14f5d6 | |||
| 202acbd628 | |||
| a283fefd18 | |||
| d9bbaf5d6c | |||
| 1075b975c5 | |||
| c813c1ff4c | |||
| abac2ca2c5 | |||
| 64e9702a26 | |||
| 76cb4cd174 | |||
| 65d7c19979 | |||
| b67697b6f2 | |||
| 131f272e69 | |||
| 03d1265cfd | |||
| c190086707 | |||
| 5d89a8010b | |||
| 7a81fa00e9 | |||
| 606ed0c8ab | |||
| 8b1a4365ed | |||
| 8a2542157f | |||
| d6836444c9 | |||
| 3b30799b7e | |||
| e61da33672 | |||
| 6a71314d70 | |||
| 06e0c7d1a9 | |||
| 7600ebd263 | |||
| 21943ce0e2 | |||
| aa313e112a | |||
| 2c7428e2ee | |||
| 014f2ef900 | |||
| b418ce5643 | |||
| fe1c48178e | |||
| 35f13e882e | |||
| 85924e898e | |||
| 622b72db4b | |||
| a0a7b46cff | |||
| 37aacb3960 | |||
| 79bc9d97c9 | |||
| f150687dbc | |||
| b2a5482d2c | |||
| 5fdfb8d465 | |||
| 8b2c04abc4 | |||
| 83d0949498 | |||
| 244cf49ba4 | |||
| 651422127c | |||
| 11de7599e5 | |||
| 7a6e70d6b3 | |||
| 230865c4f7 | |||
| 4c9a3e918f | |||
| 5beb022ee1 | |||
| 170abf9b7f | |||
| afaa7144a5 | |||
| eaa1adb3b2 | |||
| fa76974e24 | |||
| f372bd8809 | |||
| 0284248c93 | |||
| d9dd1171a3 | |||
| fefea3a2a5 | |||
| 0e920a91dd | |||
| 63e3398f49 | |||
| cdcaae17c6 | |||
| 96e9d50060 | |||
| 5cab6c4ccb | |||
| b3b341173f | |||
| a9e4695b74 | |||
| 4f40f685d9 | |||
| ffb4cda475 | |||
| 5859a3df72 | |||
| 5c6a7cb4b8 | |||
| 4e2afcd3b8 | |||
| 11e6d84d46 | |||
| 53b9e7b52f | |||
| e5e9ca0015 | |||
| 150ab9c6a4 | |||
| f789463982 | |||
| 955801db2e | |||
| 93b2e80eb8 | |||
| 1a41b92f77 | |||
| 58a8f1f1b0 | |||
| daddfc9e1b | |||
| ecf5f6976f | |||
| e2448fb6dd | |||
| 9c9f2dbe3f | |||
| b3d579e2c1 | |||
| eb72d598b1 | |||
| 033a4cf21e | |||
| fda9b58ab7 | |||
| ca865df87f | |||
| f9f75aa119 | |||
| db42d0e0ae | |||
| df3d0f61bd | |||
| c6bc69cbc5 | |||
| 8c9df482ab | |||
| 1137b04154 | |||
| ec96426c00 | |||
| 4d22daefa7 | |||
| bcc92e04c9 | |||
| 9aa222f738 | |||
| 605cfdb8dc | |||
| 041d72b755 | |||
| 569e40544d | |||
| 3d605a23fe | |||
| 4f2816c01c | |||
| a0b461a18e | |||
| 7ce675030b | |||
| 217caecfda | |||
| ef8847eda7 | |||
| d78010c376 | |||
| 3444cb15e3 | |||
| 0151d42156 | |||
| 392f28882f | |||
| cdb3e6434a | |||
| bf5f6ec262 | |||
| 1a755e75c5 | |||
| 46ff897107 | |||
| f5d63bb7df | |||
| c54ec09519 | |||
| 7b3d700d5f | |||
| 744ff55c62 | |||
| c326f14fed | |||
| 07ddb8fcff | |||
| 84bcd8b3bc | |||
| f52970b038 | |||
| 39b96849a9 | |||
| f298e55ded | |||
| ed943b1b5b | |||
| 0c6d787f92 | |||
| a4f9aa2172 | |||
| c432ce6be5 | |||
| c5b32b2211 | |||
| 24efa86f26 | |||
| 38e551cc3d | |||
| ef95f08c48 | |||
| 3ced290eb5 | |||
| fab0f07379 | |||
| 8525f55ad0 | |||
| e6c024f8bf | |||
| c28bc41a96 | |||
| 29a59ed7e2 | |||
| f8b80f3f93 | |||
| 189007e44d | |||
| 3cffadc7a2 | |||
| 18e43831bc | |||
| 3356de55ed | |||
| 375e727f9a | |||
| a2b8ba472f | |||
| 00c7ddbc9b | |||
| 3e0bc9e36b | |||
| d6ba4bd255 | |||
| 84b4b38cbb | |||
| 4694604836 | |||
| 224c5472c8 | |||
| 409310aae9 | |||
| 9ff825f39d | |||
| 7b5d831296 | |||
| 42ee209084 | |||
| e4096fbc33 | |||
| 3aa5c2a699 | |||
| 2ddf278e2d | |||
| f46448d04c | |||
| ab17606e79 | |||
| 7c90b87715 | |||
| d2929e432e | |||
| 88daa349f9 | |||
| f29da49893 | |||
| 194e8ea696 | |||
| 810f997276 | |||
| 6daae7f226 | |||
| f9fe6ac642 | |||
| b4ad565df6 | |||
| 754d5ea364 | |||
| 26add87c3d | |||
| 986062a604 | |||
| 29ceeba95f | |||
| 849d9eb463 | |||
| dce7053c24 | |||
| 042f4c90c6 | |||
| c1583a3e1d | |||
| 17fa2e9e8e | |||
| ff237f2dbc | |||
| 50c99599f2 | |||
| 891ee85fa6 | |||
| a03f5dd9f6 | |||
| 415c4b7ed5 | |||
| d599707154 | |||
| 7f06712a30 | |||
| b08bb56f6c | |||
| 9bcccadebd | |||
| 1287558f24 | |||
| 6b389e01b5 | |||
| 8fcca1b958 | |||
| a1cf792245 | |||
| 978b580dcf | |||
| d197f33646 | |||
| 521d25d4e6 | |||
| ca1648052a | |||
| f34b913bd8 | |||
| 0d3ed37b48 | |||
| bc68f18c48 | |||
| 6e42687e65 | |||
| e4bd879686 | |||
| 78982d88e0 | |||
| fa5c7edab4 | |||
| 6fa34d5532 | |||
| 9e5427dc6e | |||
| a357190eff | |||
| bfcc2abe47 | |||
| f64ae9dc33 | |||
| 5a51bdd824 | |||
| b48c85dcf9 | |||
| f374dd38b6 | |||
| ccb72e6787 | |||
| 55823dbdf6 | |||
| 588207d7c1 | |||
| 2aa0cdde8f | |||
| 44d798d8f0 | |||
| 4150805073 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@ -1 +1,2 @@
|
||||
*.sh text eol=lf
|
||||
docker/entrypoint.sh text eol=lf executable
|
||||
|
||||
16
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
16
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,14 +1,20 @@
|
||||
name: Bug Report
|
||||
name: "🐞 Bug Report"
|
||||
description: Create a bug issue for RAGFlow
|
||||
title: "[Bug]: "
|
||||
labels: [bug]
|
||||
labels: ["🐞 bug"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for the same bug?
|
||||
description: Please check if an issue already exists for the bug you encountered.
|
||||
label: Self Checks
|
||||
description: "Please check the following in order to be responded in time :)"
|
||||
options:
|
||||
- label: I have checked the existing issues.
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,10 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
title: '[Feature Request]: '
|
||||
about: Suggest an idea for RAGFlow
|
||||
labels: ''
|
||||
---
|
||||
|
||||
**Summary**
|
||||
|
||||
Description for this feature.
|
||||
16
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
16
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@ -1,14 +1,20 @@
|
||||
name: Feature request
|
||||
name: "💞 Feature request"
|
||||
description: Propose a feature request for RAGFlow.
|
||||
title: "[Feature Request]: "
|
||||
labels: [feature request]
|
||||
labels: ["💞 feature"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for the same feature request?
|
||||
description: Please check if an issue already exists for the feature you request.
|
||||
label: Self Checks
|
||||
description: "Please check the following in order to be responded in time :)"
|
||||
options:
|
||||
- label: I have checked the existing issues.
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
|
||||
17
.github/ISSUE_TEMPLATE/question.yml
vendored
17
.github/ISSUE_TEMPLATE/question.yml
vendored
@ -1,8 +1,21 @@
|
||||
name: Question
|
||||
name: "🙋♀️ Question"
|
||||
description: Ask questions on RAGFlow
|
||||
title: "[Question]: "
|
||||
labels: [question]
|
||||
labels: ["🙋♀️ question"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "Please check the following in order to be responded in time :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@ -75,12 +75,6 @@ jobs:
|
||||
# The body field does not support environment variable substitution directly.
|
||||
body_path: release_body.md
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# https://github.com/marketplace/actions/docker-login
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
|
||||
54
.github/workflows/tests.yml
vendored
54
.github/workflows/tests.yml
vendored
@ -15,6 +15,8 @@ on:
|
||||
- 'docs/**'
|
||||
- '*.md'
|
||||
- '*.mdx'
|
||||
schedule:
|
||||
- cron: '0 16 * * *' # This schedule runs every 16:00:00Z(00:00:00+08:00)
|
||||
|
||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||
concurrency:
|
||||
@ -32,12 +34,9 @@ jobs:
|
||||
# https://github.com/hmarr/debug-action
|
||||
#- uses: hmarr/debug-action@v2
|
||||
|
||||
- name: Show PR labels
|
||||
- name: Show who triggered this workflow
|
||||
run: |
|
||||
echo "Workflow triggered by ${{ github.event_name }}"
|
||||
if [[ ${{ github.event_name }} == 'pull_request' ]]; then
|
||||
echo "PR labels: ${{ join(github.event.pull_request.labels.*.name, ', ') }}"
|
||||
fi
|
||||
|
||||
- name: Ensure workspace ownership
|
||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
@ -51,10 +50,10 @@ jobs:
|
||||
|
||||
# https://github.com/astral-sh/ruff-action
|
||||
- name: Static check with Ruff
|
||||
uses: astral-sh/ruff-action@v2
|
||||
uses: astral-sh/ruff-action@v3
|
||||
with:
|
||||
version: ">=0.8.2"
|
||||
args: "check --ignore E402"
|
||||
version: ">=0.11.x"
|
||||
args: "check"
|
||||
|
||||
- name: Build ragflow:nightly-slim
|
||||
run: |
|
||||
@ -68,7 +67,7 @@ jobs:
|
||||
|
||||
- name: Start ragflow:nightly-slim
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Stop ragflow:nightly-slim
|
||||
@ -78,7 +77,7 @@ jobs:
|
||||
|
||||
- name: Start ragflow:nightly
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Run sdk tests against Elasticsearch
|
||||
@ -89,7 +88,7 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
cd sdk/python && uv sync --python 3.10 --group test --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
|
||||
- name: Run frontend api tests against Elasticsearch
|
||||
run: |
|
||||
@ -99,8 +98,22 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Elasticsearch
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL}
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
@ -119,7 +132,7 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
cd sdk/python && uv sync --python 3.10 --group test --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
|
||||
- name: Run frontend api tests against Infinity
|
||||
run: |
|
||||
@ -129,7 +142,22 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Infinity
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_http_api && DOC_ENGINE=infinity pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL}
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
|
||||
149
.gitignore
vendored
149
.gitignore
vendored
@ -38,3 +38,152 @@ sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
huggingface.co/
|
||||
nltk_data/
|
||||
|
||||
# Exclude hash-like temporary files like 9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
*[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]*
|
||||
.lh/
|
||||
.venv
|
||||
docker/data
|
||||
|
||||
|
||||
#--------------------------------------------------#
|
||||
# The following was generated with gitignore.nvim: #
|
||||
#--------------------------------------------------#
|
||||
# Gitignore for the following technologies: Node
|
||||
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
.stylelintcache
|
||||
|
||||
# Microbundle cache
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
.cache
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
.nuxt
|
||||
dist
|
||||
|
||||
# Gatsby files
|
||||
.cache/
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
.temp
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
.vscode-test
|
||||
|
||||
# yarn v2
|
||||
.yarn/cache
|
||||
.yarn/unplugged
|
||||
.yarn/build-state.yml
|
||||
.yarn/install-state.gz
|
||||
.pnp.*
|
||||
|
||||
# Serverless Webpack directories
|
||||
.webpack/
|
||||
|
||||
# SvelteKit build / generate output
|
||||
.svelte-kit
|
||||
|
||||
|
||||
19
.pre-commit-config.yaml
Normal file
19
.pre-commit-config.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: mixed-line-ending
|
||||
- id: check-symlinks
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.11.6
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [ --fix ]
|
||||
- id: ruff-format
|
||||
24
Dockerfile
24
Dockerfile
@ -21,9 +21,7 @@ RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co
|
||||
if [ "$LIGHTEN" != "1" ]; then \
|
||||
(tar -cf - \
|
||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
||||
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
||||
fi
|
||||
|
||||
@ -46,7 +44,8 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||
sed -i 's|http://ports.ubuntu.com|http://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||
sed -i 's|http://archive.ubuntu.com|http://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||
fi; \
|
||||
rm -f /etc/apt/apt.conf.d/docker-clean && \
|
||||
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache && \
|
||||
@ -59,14 +58,16 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt install -y default-jdk && \
|
||||
apt install -y libatk-bridge2.0-0 && \
|
||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||
apt install -y libjemalloc-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less && \
|
||||
apt install -y ghostscript
|
||||
|
||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
|
||||
pip3 config set global.trusted-host pypi.tuna.tsinghua.edu.cn; \
|
||||
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||
pip3 config set global.trusted-host mirrors.aliyun.com; \
|
||||
mkdir -p /etc/uv && \
|
||||
echo "[[index]]" > /etc/uv/uv.toml && \
|
||||
echo 'url = "https://pypi.tuna.tsinghua.edu.cn/simple"' >> /etc/uv/uv.toml && \
|
||||
echo 'url = "https://mirrors.aliyun.com/pypi/simple"' >> /etc/uv/uv.toml && \
|
||||
echo "default = true" >> /etc/uv/uv.toml; \
|
||||
fi; \
|
||||
pipx install uv
|
||||
@ -150,9 +151,9 @@ COPY pyproject.toml uv.lock ./
|
||||
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
||||
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
sed -i 's|pypi.org|pypi.tuna.tsinghua.edu.cn|g' uv.lock; \
|
||||
sed -i 's|pypi.org|mirrors.aliyun.com/pypi|g' uv.lock; \
|
||||
else \
|
||||
sed -i 's|pypi.tuna.tsinghua.edu.cn|pypi.org|g' uv.lock; \
|
||||
sed -i 's|mirrors.aliyun.com/pypi|pypi.org|g' uv.lock; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
uv sync --python 3.10 --frozen; \
|
||||
@ -196,10 +197,13 @@ COPY deepdoc deepdoc
|
||||
COPY rag rag
|
||||
COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY agentic_reasoning agentic_reasoning
|
||||
COPY pyproject.toml uv.lock ./
|
||||
COPY mcp mcp
|
||||
COPY plugin plugin
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh docker/entrypoint-parser.sh ./
|
||||
COPY docker/entrypoint.sh ./
|
||||
RUN chmod +x ./entrypoint*.sh
|
||||
|
||||
# Copy compiled web pages
|
||||
|
||||
@ -33,6 +33,7 @@ ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
ADD ./agent ./agent
|
||||
ADD ./graphrag ./graphrag
|
||||
ADD ./plugin ./plugin
|
||||
|
||||
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||
|
||||
90
README.md
90
README.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -36,7 +36,7 @@
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
@ -78,11 +78,10 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Latest Updates
|
||||
|
||||
- 2025-02-05 Updates the model list of 'SILICONFLOW' and adds support for Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-03-19 Supports using a multi-modal model to make sense of images within PDF or DOCX files.
|
||||
- 2025-02-28 Combined with Internet search (Tavily), supports reasoning like Deep Research for any LLMs.
|
||||
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in Deepdoc.
|
||||
- 2024-12-04 Adds support for pagerank score in knowledge base.
|
||||
- 2024-11-22 Adds more variables to Agent.
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||
- 2024-08-22 Support text to SQL statements through RAG.
|
||||
|
||||
@ -138,8 +137,10 @@ releases! 🌟
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux),
|
||||
> see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Required only if you intend to use the code executor (sandbox) feature of RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
@ -173,17 +174,25 @@ releases! 🌟
|
||||
|
||||
3. Start up the server using the pre-built Docker images:
|
||||
|
||||
> The command below downloads the `v0.16.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download an RAGFlow edition different from `v0.16.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` for the full edition `v0.16.0`.
|
||||
> [!CAUTION]
|
||||
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||
|
||||
> The command below downloads the `v0.19.0-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.19.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` for the full edition `v0.19.0`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
|-------------------|-----------------|-----------------------|--------------------------|
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -204,9 +213,6 @@ releases! 🌟
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||
@ -240,7 +246,7 @@ to `<YOUR_SERVING_PORT>:80`.
|
||||
Updates to the above configurations require a reboot of all containers to take effect:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Switch doc engine from Elasticsearch to Infinity
|
||||
@ -253,12 +259,15 @@ RAGFlow uses Elasticsearch by default for storing full text and vectors. To swit
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> `-v` will delete the docker container volumes, and the existing data will be cleared.
|
||||
|
||||
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||
|
||||
3. Start the containers:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
@ -271,7 +280,7 @@ This image is approximately 2 GB in size and relies on external LLM and embeddin
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Build a Docker image including embedding models
|
||||
@ -281,7 +290,7 @@ This image is approximately 9 GB in size. As it includes embedding models, it re
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Launch service from source for development
|
||||
@ -289,7 +298,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
1. Install uv, or skip this step if it is already installed:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone the source code and install Python dependencies:
|
||||
@ -298,6 +307,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||
@ -309,7 +320,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/.env** to `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||
@ -318,7 +329,16 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Launch backend service:
|
||||
5. If your operating system does not have jemalloc, please install it as follows:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. Launch backend service:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -326,12 +346,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Install frontend dependencies:
|
||||
7. Install frontend dependencies:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. Launch frontend service:
|
||||
|
||||
8. Launch frontend service:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -341,12 +363,22 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
9. Stop RAGFlow front-end and back-end service after development is complete:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
@ -354,11 +386,11 @@ See the [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214
|
||||
|
||||
## 🏄 Community
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Contributing
|
||||
|
||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community.
|
||||
If you would like to be a part, review our [Contribution Guidelines](./CONTRIBUTING.md) first.
|
||||
If you would like to be a part, review our [Contribution Guidelines](https://ragflow.io/docs/dev/contributing) first.
|
||||
|
||||
106
README_id.md
106
README_id.md
@ -22,7 +22,7 @@
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
@ -36,12 +36,12 @@
|
||||
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Peta Jalan</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Daftar Isi</b></summary>
|
||||
<summary><b>📕 Daftar Isi </b> </summary>
|
||||
|
||||
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
@ -75,11 +75,10 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Pembaruan Terbaru
|
||||
|
||||
- 2025-02-05 Memperbarui daftar model 'SILICONFLOW' dan menambahkan dukungan untuk Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-03-19 Mendukung penggunaan model multi-modal untuk memahami gambar di dalam file PDF atau DOCX.
|
||||
- 2025-02-28 dikombinasikan dengan pencarian Internet (TAVILY), mendukung penelitian mendalam untuk LLM apa pun.
|
||||
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di Deepdoc.
|
||||
- 2024-12-04 Mendukung skor pagerank ke basis pengetahuan.
|
||||
- 2024-11-22 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
|
||||
@ -133,6 +132,10 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Hanya diperlukan jika Anda ingin menggunakan fitur eksekutor kode (sandbox) dari RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> Jika Anda belum menginstal Docker di komputer lokal Anda (Windows, Mac, atau Linux), lihat [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Menjalankan Server
|
||||
|
||||
@ -166,21 +169,29 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Bangun image Docker pre-built dan jalankan server:
|
||||
|
||||
> Perintah di bawah ini mengunduh edisi v0.16.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.16.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0 untuk edisi lengkap v0.16.0.
|
||||
> [!CAUTION]
|
||||
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
```
|
||||
> Perintah di bawah ini mengunduh edisi v0.19.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.19.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0 untuk edisi lengkap v0.19.0.
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
4. Periksa status server setelah server aktif dan berjalan:
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
1. Periksa status server setelah server aktif dan berjalan:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -197,18 +208,15 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
> karena RAGFlow mungkin belum sepenuhnya siap.
|
||||
|
||||
5. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||
6. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
3. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||
|
||||
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||
@ -230,7 +238,7 @@ menjadi `<YOUR_SERVING_PORT>:80`.
|
||||
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||
@ -240,7 +248,7 @@ Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
||||
@ -250,7 +258,7 @@ Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hany
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||
@ -258,7 +266,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
1. Instal uv, atau lewati langkah ini jika sudah terinstal:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone kode sumber dan instal dependensi Python:
|
||||
@ -267,6 +275,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||
@ -278,7 +288,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **conf/service_conf.yaml** ke `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||
@ -287,7 +297,16 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Jalankan aplikasi backend:
|
||||
5. Jika sistem operasi Anda tidak memiliki jemalloc, instal sebagai berikut:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. Jalankan aplikasi backend:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -295,12 +314,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instal dependensi frontend:
|
||||
7. Instal dependensi frontend:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. Jalankan aplikasi frontend:
|
||||
|
||||
8. Jalankan aplikasi frontend:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -310,12 +331,23 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
|
||||
9. Hentikan layanan front-end dan back-end RAGFlow setelah pengembangan selesai:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Dokumentasi
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [Panduan Pengguna](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Referensi](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
@ -323,11 +355,11 @@ Lihat [Roadmap RAGFlow 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Komunitas
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Kontribusi
|
||||
|
||||
RAGFlow berkembang melalui kolaborasi open-source. Dalam semangat ini, kami menerima kontribusi dari komunitas.
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](https://ragflow.io/docs/dev/contributing).
|
||||
|
||||
93
README_ja.md
93
README_ja.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -36,7 +36,7 @@
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
@ -55,11 +55,10 @@
|
||||
|
||||
## 🔥 最新情報
|
||||
|
||||
- 2025-02-05 シリコン フローの St およびモデル リストを更新し、Deep Seek-R1/Deep Seek-V3 のサポートを追加しました。
|
||||
- 2025-03-19 PDFまたはDOCXファイル内の画像を理解するために、多モーダルモデルを使用することをサポートします。
|
||||
- 2025-02-28 インターネット検索 (TAVILY) と組み合わせて、あらゆる LLM の詳細な調査をサポートします。
|
||||
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
||||
- 2024-12-18 Deepdoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-12-04 ナレッジ ベースへのページランク スコアをサポートしました。
|
||||
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
||||
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||
|
||||
@ -113,7 +112,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): RAGFlowのコード実行(サンドボックス)機能を利用する場合のみ必要です。
|
||||
|
||||
> [!TIP]
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
|
||||
### 🚀 サーバーを起動
|
||||
|
||||
@ -146,21 +148,29 @@
|
||||
|
||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.16.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.16.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.16.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0 と設定します。
|
||||
> [!CAUTION]
|
||||
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.19.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.19.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.19.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0 と設定します。
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -176,16 +186,13 @@
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||
|
||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
2. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||
|
||||
@ -208,7 +215,7 @@
|
||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
||||
@ -219,11 +226,12 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` は docker コンテナのボリュームを削除し、既存のデータをクリアします。
|
||||
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
||||
|
||||
3. 起動コンテナ:
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||
@ -235,7 +243,7 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 ソースコードをコンパイルした Docker イメージ(埋め込みモデルを含む)
|
||||
@ -245,7 +253,7 @@ docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-s
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 ソースコードからサービスを起動する方法
|
||||
@ -253,7 +261,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
1. uv をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||
@ -262,6 +270,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
||||
@ -273,7 +283,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||
@ -282,7 +292,16 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. バックエンドサービスを起動する:
|
||||
5. オペレーティングシステムにjemallocがない場合は、次のようにインストールします:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. バックエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -290,12 +309,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. フロントエンドの依存関係をインストールする:
|
||||
7. フロントエンドの依存関係をインストールする:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. フロントエンドサービスを起動する:
|
||||
|
||||
8. フロントエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -305,12 +326,22 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
9. 開発が完了したら、RAGFlow のフロントエンド サービスとバックエンド サービスを停止します:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 ドキュメンテーション
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
@ -318,10 +349,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 🏄 コミュニティ
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](./CONTRIBUTING.md)をご覧ください。
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](https://ragflow.io/docs/dev/contributing)をご覧ください。
|
||||
|
||||
93
README_ko.md
93
README_ko.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -36,7 +36,7 @@
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
@ -55,12 +55,10 @@
|
||||
|
||||
## 🔥 업데이트
|
||||
|
||||
- 2025-02-05 'SILICONFLOW' 모델 목록을 업데이트하고 Deepseek-R1/DeepSeek-V3에 대한 지원을 추가합니다.
|
||||
- 2025-03-19 PDF 또는 DOCX 파일 내의 이미지를 이해하기 위해 다중 모드 모델을 사용하는 것을 지원합니다.
|
||||
- 2025-02-28 인터넷 검색(TAVILY)과 결합되어 모든 LLM에 대한 심층 연구를 지원합니다.
|
||||
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
||||
- 2024-12-18 Deepdoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
- 2024-12-04 지식베이스에 대한 페이지랭크 점수를 지원합니다.
|
||||
|
||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
@ -114,7 +112,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): RAGFlow의 코드 실행기(샌드박스) 기능을 사용하려는 경우에만 필요합니다.
|
||||
|
||||
> [!TIP]
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||
|
||||
### 🚀 서버 시작하기
|
||||
|
||||
@ -147,21 +148,29 @@
|
||||
|
||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.16.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.16.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.16.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0로 설정합니다.
|
||||
> [!CAUTION]
|
||||
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.19.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.19.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.19.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0로 설정합니다.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
1. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -177,16 +186,13 @@
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||
|
||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
2. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
|
||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||
|
||||
@ -209,7 +215,7 @@
|
||||
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
||||
@ -220,6 +226,7 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 는 docker 컨테이너의 볼륨을 삭제하고 기존 데이터를 지우며, 이 작업은 컨테이너를 중지하는 것과 동일합니다.
|
||||
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||
3. 컨테이너 부팅:
|
||||
```bash
|
||||
@ -235,7 +242,7 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
||||
@ -245,7 +252,7 @@ docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-s
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||
@ -253,7 +260,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
1. uv를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
||||
@ -262,6 +269,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Docker Compose를 사용하여 의존 서비스(MinIO, Elasticsearch, Redis 및 MySQL)를 시작합니다:
|
||||
@ -273,7 +282,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
||||
@ -282,7 +291,16 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 백엔드 서비스를 시작합니다:
|
||||
5. 만약 운영 체제에 jemalloc이 없으면 다음 방식으로 설치하세요:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. 백엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -290,12 +308,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 프론트엔드 의존성을 설치합니다:
|
||||
7. 프론트엔드 의존성을 설치합니다:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
8. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -305,12 +325,23 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
|
||||
9. 개발이 완료된 후 RAGFlow 프론트엔드 및 백엔드 서비스를 중지합니다.
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 문서
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 로드맵
|
||||
|
||||
@ -318,10 +349,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 🏄 커뮤니티
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 컨트리뷰션
|
||||
|
||||
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](./CONTRIBUTING.md)을 검토해 주세요.
|
||||
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](https://ragflow.io/docs/dev/contributing)을 검토해 주세요.
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||
@ -36,12 +36,12 @@
|
||||
<a href="https://ragflow.io/docs/dev/">Documentação</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Índice</b></summary>
|
||||
<summary><b>📕 Índice</b></summary>
|
||||
|
||||
- 💡 [O que é o RAGFlow?](#-o-que-é-o-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
@ -75,11 +75,10 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Últimas Atualizações
|
||||
|
||||
- 05-02-2025 Atualiza a lista de modelos de 'SILICONFLOW' e adiciona suporte para Deepseek-R1/DeepSeek-V3.
|
||||
- 19-03-2025 Suporta o uso de um modelo multi-modal para entender imagens dentro de arquivos PDF ou DOCX.
|
||||
- 28-02-2025 combinado com a pesquisa na Internet (T AVI LY), suporta pesquisas profundas para qualquer LLM.
|
||||
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no Deepdoc.
|
||||
- 04-12-2024 Adiciona suporte para pontuação de pagerank na base de conhecimento.
|
||||
- 22-11-2024 Adiciona mais variáveis para o Agente.
|
||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
||||
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
||||
|
||||
@ -133,7 +132,10 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
- RAM >= 16 GB
|
||||
- Disco >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Necessário apenas se você pretende usar o recurso de executor de código (sandbox) do RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Iniciar o servidor
|
||||
|
||||
@ -166,17 +168,25 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
||||
|
||||
> O comando abaixo baixa a edição `v0.16.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.16.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` para a edição completa `v0.16.0`.
|
||||
> [!CAUTION]
|
||||
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||
|
||||
> O comando abaixo baixa a edição `v0.19.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.19.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` para a edição completa `v0.19.0`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
||||
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
||||
| v0.16.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.16.0-slim | ~2 | ❌ | Lançamento estável |
|
||||
| v0.19.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.19.0-slim | ~2 | ❌ | Lançamento estável |
|
||||
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
||||
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
||||
|
||||
@ -196,9 +206,6 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Rodando em todos os endereços (0.0.0.0)
|
||||
* Rodando em http://127.0.0.1:9380
|
||||
* Rodando em http://x.x.x.x:9380
|
||||
INFO:werkzeug:Pressione CTRL+C para sair
|
||||
```
|
||||
|
||||
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||
@ -228,7 +235,7 @@ Para atualizar a porta HTTP de serviço padrão (80), vá até [docker-compose.y
|
||||
Atualizações nas configurações acima exigem um reinício de todos os contêineres para que tenham efeito:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Mudar o mecanismo de documentos de Elasticsearch para Infinity
|
||||
@ -240,13 +247,13 @@ O RAGFlow usa o Elasticsearch por padrão para armazenar texto completo e vetore
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
|
||||
Note: `-v` irá deletar os volumes do contêiner, e os dados existentes serão apagados.
|
||||
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
||||
|
||||
3. Inicie os contêineres:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!ATENÇÃO]
|
||||
@ -259,7 +266,7 @@ Esta imagem tem cerca de 2 GB de tamanho e depende de serviços externos de LLM
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Criar uma imagem Docker incluindo modelos de incorporação
|
||||
@ -269,7 +276,7 @@ Esta imagem tem cerca de 9 GB de tamanho. Como inclui modelos de incorporação,
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
||||
@ -277,7 +284,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
1. Instale o `uv`, ou pule esta etapa se ele já estiver instalado:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone o código-fonte e instale as dependências Python:
|
||||
@ -286,6 +293,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # instala os módulos Python dependentes do RAGFlow
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Inicie os serviços dependentes (MinIO, Elasticsearch, Redis e MySQL) usando Docker Compose:
|
||||
@ -297,7 +306,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
Adicione a seguinte linha ao arquivo `/etc/hosts` para resolver todos os hosts especificados em **docker/.env** para `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. Se não conseguir acessar o HuggingFace, defina a variável de ambiente `HF_ENDPOINT` para usar um site espelho:
|
||||
@ -306,7 +315,16 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Lance o serviço de back-end:
|
||||
5. Se o seu sistema operacional não tiver jemalloc, instale-o da seguinte maneira:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum instalar jemalloc
|
||||
```
|
||||
|
||||
6. Lance o serviço de back-end:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -314,14 +332,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instale as dependências do front-end:
|
||||
7. Instale as dependências do front-end:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
7. Lance o serviço de front-end:
|
||||
8. Lance o serviço de front-end:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -331,12 +349,22 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
9. Pare os serviços de front-end e back-end do RAGFlow após a conclusão do desenvolvimento:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Documentação
|
||||
|
||||
- [Início rápido](https://ragflow.io/docs/dev/)
|
||||
- [Guia do usuário](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Referências](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
@ -344,11 +372,11 @@ Veja o [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Comunidade
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Contribuindo
|
||||
|
||||
O RAGFlow prospera por meio da colaboração de código aberto. Com esse espírito, abraçamos contribuições diversas da comunidade.
|
||||
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](./CONTRIBUTING.md).
|
||||
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](https://ragflow.io/docs/dev/contributing).
|
||||
|
||||
112
README_tzh.md
112
README_tzh.md
@ -21,7 +21,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -35,7 +35,7 @@
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
@ -54,11 +54,10 @@
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-02-05 更新「SILICONFLOW」的型號清單並新增 Deepseek-R1/DeepSeek-V3 的支援。
|
||||
- 2025-03-19 PDF和DOCX中的圖支持用多模態大模型去解析得到描述.
|
||||
- 2025-02-28 結合網路搜尋(Tavily),對於任意大模型實現類似 Deep Research 的推理功能.
|
||||
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
||||
- 2024-12-18 升級了 Deepdoc 的文檔佈局分析模型。
|
||||
- 2024-12-04 支援知識庫的 Pagerank 分數。
|
||||
- 2024-11-22 完善了 Agent 中的變數定義和使用。
|
||||
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
||||
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
||||
|
||||
@ -112,7 +111,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): 僅在您打算使用 RAGFlow 的代碼執行器(沙箱)功能時才需要安裝。
|
||||
|
||||
> [!TIP]
|
||||
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||
|
||||
### 🚀 啟動伺服器
|
||||
|
||||
@ -145,17 +147,25 @@
|
||||
|
||||
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.16.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.16.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` 來下載 RAGFlow 鏡像的 `v0.16.0` 完整發行版。
|
||||
> [!CAUTION]
|
||||
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.19.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.19.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` 來下載 RAGFlow 鏡像的 `v0.19.0` 完整發行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -181,9 +191,6 @@
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||
@ -200,7 +207,7 @@
|
||||
|
||||
系統配置涉及以下三份文件:
|
||||
|
||||
- [.env](./docker/.env):存放一些基本的系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||
- [.env](./docker/.env):存放一些系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template):設定各類別後台服務。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 系統依賴該檔案完成啟動。
|
||||
|
||||
@ -215,7 +222,7 @@
|
||||
> 所有系統配置都需要透過系統重新啟動生效:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
###把文檔引擎從 Elasticsearch 切換成為 Infinity
|
||||
@ -227,13 +234,14 @@ RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 將會刪除 docker 容器的 volumes,已有的資料會被清空。
|
||||
|
||||
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
||||
|
||||
3. 啟動容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
@ -246,7 +254,7 @@ RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 原始碼編譯 Docker 映像(包含 embedding 模型)
|
||||
@ -256,7 +264,7 @@ docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t in
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 以原始碼啟動服務
|
||||
@ -264,8 +272,8 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
1. 安裝 uv。如已安裝,可跳過此步驟:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
export UV_INDEX=https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pipx install uv pre-commit
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
2. 下載原始碼並安裝 Python 依賴:
|
||||
@ -274,6 +282,8 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. 透過 Docker Compose 啟動依賴的服務(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
@ -285,7 +295,7 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
在 `/etc/hosts` 中加入以下程式碼,將 **conf/service_conf.yaml** 檔案中的所有 host 位址都解析為 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. 如果無法存取 HuggingFace,可以把環境變數 `HF_ENDPOINT` 設為對應的鏡像網站:
|
||||
@ -294,24 +304,34 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5.啟動後端服務:
|
||||
『`bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||
|
||||
```
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. 安裝前端依賴:
|
||||
『`bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
6. 啟動後端服務:
|
||||
|
||||
7. 啟動前端服務:
|
||||
『`bash
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
7. 安裝前端依賴:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
8. 啟動前端服務:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
|
||||
```
|
||||
|
||||
以下界面說明系統已成功啟動:_
|
||||
@ -319,12 +339,22 @@ npm install
|
||||

|
||||
```
|
||||
|
||||
9. 開發完成後停止 RAGFlow 前端和後端服務:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 技術文檔
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 路線圖
|
||||
|
||||
@ -332,13 +362,13 @@ npm install
|
||||
|
||||
## 🏄 開源社群
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/zd4qPW6t)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 貢獻指南
|
||||
|
||||
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](./CONTRIBUTING.md) 。
|
||||
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](https://ragflow.io/docs/dev/contributing) 。
|
||||
|
||||
## 🤝 商務合作
|
||||
|
||||
|
||||
94
README_zh.md
94
README_zh.md
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -36,7 +36,7 @@
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
@ -55,11 +55,10 @@
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-02-05 更新硅基流动的模型列表,增加了对 Deepseek-R1/DeepSeek-V3 的支持。
|
||||
- 2025-03-19 PDF和DOCX中的图支持用多模态大模型去解析得到描述.
|
||||
- 2025-02-28 结合互联网搜索(Tavily),对于任意大模型实现类似 Deep Research 的推理功能.
|
||||
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
||||
- 2024-12-18 升级了 Deepdoc 的文档布局分析模型。
|
||||
- 2024-12-04 支持知识库的 Pagerank 分数。
|
||||
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
||||
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||
|
||||
@ -113,7 +112,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): 仅在你打算使用 RAGFlow 的代码执行器(沙箱)功能时才需要安装。
|
||||
|
||||
> [!TIP]
|
||||
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
||||
|
||||
### 🚀 启动服务器
|
||||
|
||||
@ -146,17 +148,25 @@
|
||||
|
||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.16.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.16.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` 来下载 RAGFlow 镜像的 `v0.16.0` 完整发行版。
|
||||
> [!CAUTION]
|
||||
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.19.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.19.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` 来下载 RAGFlow 镜像的 `v0.19.0` 完整发行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -182,12 +192,9 @@
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||
> 如果您在没有看到上面的提示信息出来之前,就尝试登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`。
|
||||
|
||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
@ -216,7 +223,7 @@
|
||||
> 所有系统配置都需要通过系统重启生效:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
||||
@ -228,13 +235,14 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 将会删除 docker 容器的 volumes,已有的数据会被清空。
|
||||
|
||||
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
||||
|
||||
3. 启动容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
@ -247,7 +255,7 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
||||
@ -257,7 +265,7 @@ docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t in
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 以源代码启动服务
|
||||
@ -265,8 +273,8 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
1. 安装 uv。如已经安装,可跳过本步骤:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
export UV_INDEX=https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pipx install uv pre-commit
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
2. 下载源代码并安装 Python 依赖:
|
||||
@ -275,6 +283,8 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
@ -283,19 +293,27 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
在 `/etc/hosts` 中添加以下代码,将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
在 `/etc/hosts` 中添加以下代码,目的是将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 启动后端服务:
|
||||
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
```
|
||||
|
||||
6. 启动后端服务:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -303,12 +321,14 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 安装前端依赖:
|
||||
7. 安装前端依赖:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. 启动前端服务:
|
||||
|
||||
8. 启动前端服务:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -318,12 +338,22 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
|
||||

|
||||
|
||||
9. 开发完成后停止 RAGFlow 前端和后端服务:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 技术文档
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 路线图
|
||||
|
||||
@ -331,13 +361,13 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
|
||||
## 🏄 开源社区
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/zd4qPW6t)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 贡献指南
|
||||
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](./CONTRIBUTING.md) 。
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](https://ragflow.io/docs/dev/contributing) 。
|
||||
|
||||
## 🤝 商务合作
|
||||
|
||||
|
||||
@ -15,17 +15,15 @@
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
from abc import ABC
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent.component import component_class
|
||||
from agent.component.base import ComponentBase
|
||||
|
||||
|
||||
class Canvas(ABC):
|
||||
class Canvas:
|
||||
"""
|
||||
dsl = {
|
||||
"components": {
|
||||
@ -162,13 +160,16 @@ class Canvas(ABC):
|
||||
self.components[k]["obj"].reset()
|
||||
self._embed_id = ""
|
||||
|
||||
def get_compnent_name(self, cid):
|
||||
def get_component_name(self, cid):
|
||||
for n in self.dsl["graph"]["nodes"]:
|
||||
if cid == n["id"]:
|
||||
return n["data"]["name"]
|
||||
return ""
|
||||
|
||||
def run(self, **kwargs):
|
||||
def run(self, running_hint_text = "is running...🕞", **kwargs):
|
||||
if not running_hint_text or not isinstance(running_hint_text, str):
|
||||
running_hint_text = "is running...🕞"
|
||||
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
@ -210,7 +211,7 @@ class Canvas(ABC):
|
||||
if c not in waiting:
|
||||
waiting.append(c)
|
||||
continue
|
||||
yield "*'{}'* is running...🕞".format(self.get_compnent_name(c))
|
||||
yield "*'{}'* {}".format(self.get_component_name(c), running_hint_text)
|
||||
|
||||
if cpn.component_name.lower() == "iteration":
|
||||
st_cpn = cpn.get_start()
|
||||
@ -236,7 +237,7 @@ class Canvas(ABC):
|
||||
pid = self.components[cid]["parent_id"]
|
||||
o, _ = self.components[cid]["obj"].output(allow_partial=False)
|
||||
oo, _ = self.components[pid]["obj"].output(allow_partial=False)
|
||||
self.components[pid]["obj"].set(pd.concat([oo, o], ignore_index=True))
|
||||
self.components[pid]["obj"].set_output(pd.concat([oo, o], ignore_index=True).dropna())
|
||||
downstream = [pid]
|
||||
|
||||
for m in prepare2run(downstream):
|
||||
@ -253,20 +254,20 @@ class Canvas(ABC):
|
||||
if loop:
|
||||
raise OverflowError(f"Too much loops: {loop}")
|
||||
|
||||
downstream = []
|
||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||
assert switch_out in self.components, \
|
||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||
for m in prepare2run([switch_out]):
|
||||
yield {"content": m, "running_status": True}
|
||||
continue
|
||||
|
||||
downstream = [switch_out]
|
||||
else:
|
||||
downstream = cpn["downstream"]
|
||||
|
||||
if not downstream and cpn.get("parent_id"):
|
||||
pid = cpn["parent_id"]
|
||||
_, o = cpn["obj"].output(allow_partial=False)
|
||||
_, oo = self.components[pid]["obj"].output(allow_partial=False)
|
||||
self.components[pid]["obj"].set_output(pd.concat([oo.dropna(axis=1), o.dropna(axis=1)], ignore_index=True))
|
||||
self.components[pid]["obj"].set_output(pd.concat([oo.dropna(axis=1), o.dropna(axis=1)], ignore_index=True).dropna())
|
||||
downstream = [pid]
|
||||
|
||||
for m in prepare2run(downstream):
|
||||
@ -364,3 +365,6 @@ class Canvas(ABC):
|
||||
|
||||
def get_component_input_elements(self, cpnnm):
|
||||
return self.components[cpnnm]["obj"].get_input_elements()
|
||||
|
||||
def set_component_infor(self, cpn_id, infor):
|
||||
self.components[cpn_id]["obj"].set_infor(infor)
|
||||
|
||||
@ -50,6 +50,7 @@ from .template import Template, TemplateParam
|
||||
from .email import Email, EmailParam
|
||||
from .iteration import Iteration, IterationParam
|
||||
from .iterationitem import IterationItem, IterationItemParam
|
||||
from .code import Code, CodeParam
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
@ -129,5 +130,7 @@ __all__ = [
|
||||
"TemplateParam",
|
||||
"Email",
|
||||
"EmailParam",
|
||||
"Code",
|
||||
"CodeParam",
|
||||
"component_class"
|
||||
]
|
||||
|
||||
@ -17,6 +17,7 @@ import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
@ -44,14 +45,25 @@ class Baidu(ComponentBase, ABC):
|
||||
return Baidu.be_output("")
|
||||
|
||||
try:
|
||||
url = 'http://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
url = 'https://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
|
||||
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
|
||||
'Connection': 'keep-alive',
|
||||
}
|
||||
response = requests.get(url=url, headers=headers)
|
||||
|
||||
url_res = re.findall(r"'url': \\\"(.*?)\\\"}", response.text)
|
||||
title_res = re.findall(r"'title': \\\"(.*?)\\\",\\n", response.text)
|
||||
body_res = re.findall(r"\"contentText\":\"(.*?)\"", response.text)
|
||||
# check if request success
|
||||
if response.status_code == 200:
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
url_res = []
|
||||
title_res = []
|
||||
body_res = []
|
||||
for item in soup.select('.result.c-container'):
|
||||
# extract title
|
||||
title_res.append(item.select_one('h3 a').get_text(strip=True))
|
||||
url_res.append(item.select_one('h3 a')['href'])
|
||||
body_res.append(item.select_one('.c-abstract').get_text(strip=True) if item.select_one('.c-abstract') else '')
|
||||
baidu_res = [{"content": re.sub('<em>|</em>', '', '<a href="' + url + '">' + title + '</a> ' + body)} for
|
||||
url, title, body in zip(url_res, title_res, body_res)]
|
||||
del body_res, url_res, title_res
|
||||
|
||||
@ -19,7 +19,7 @@ import json
|
||||
import os
|
||||
import logging
|
||||
from functools import partial
|
||||
from typing import Tuple, Union
|
||||
from typing import Any, Tuple, Union
|
||||
|
||||
import pandas as pd
|
||||
|
||||
@ -34,6 +34,7 @@ _IS_RAW_CONF = "_is_raw_conf"
|
||||
class ComponentParamBase(ABC):
|
||||
def __init__(self):
|
||||
self.output_var_name = "output"
|
||||
self.infor_var_name = "infor"
|
||||
self.message_history_window_size = 22
|
||||
self.query = []
|
||||
self.inputs = []
|
||||
@ -384,6 +385,11 @@ class ComponentBase(ABC):
|
||||
"params": {}
|
||||
}
|
||||
"""
|
||||
out = getattr(self._param, self._param.output_var_name)
|
||||
if isinstance(out, pd.DataFrame) and "chunks" in out:
|
||||
del out["chunks"]
|
||||
setattr(self._param, self._param.output_var_name, out)
|
||||
|
||||
return """{{
|
||||
"component_name": "{}",
|
||||
"params": {},
|
||||
@ -396,6 +402,8 @@ class ComponentBase(ABC):
|
||||
)
|
||||
|
||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||
from agent.canvas import Canvas # Local import to avoid cyclic dependency
|
||||
assert isinstance(canvas, Canvas), "canvas must be an instance of Canvas"
|
||||
self._canvas = canvas
|
||||
self._id = id
|
||||
self._param = param
|
||||
@ -429,7 +437,7 @@ class ComponentBase(ABC):
|
||||
if not isinstance(o, partial):
|
||||
if not isinstance(o, pd.DataFrame):
|
||||
if isinstance(o, list):
|
||||
return self._param.output_var_name, pd.DataFrame(o)
|
||||
return self._param.output_var_name, pd.DataFrame(o).dropna()
|
||||
if o is None:
|
||||
return self._param.output_var_name, pd.DataFrame()
|
||||
return self._param.output_var_name, pd.DataFrame([{"content": str(o)}])
|
||||
@ -437,15 +445,15 @@ class ComponentBase(ABC):
|
||||
|
||||
if allow_partial or not isinstance(o, partial):
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
return pd.DataFrame(o if isinstance(o, list) else [o])
|
||||
return pd.DataFrame(o if isinstance(o, list) else [o]).dropna()
|
||||
return self._param.output_var_name, o
|
||||
|
||||
outs = None
|
||||
for oo in o():
|
||||
if not isinstance(oo, pd.DataFrame):
|
||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo]).dropna()
|
||||
else:
|
||||
outs = oo
|
||||
outs = oo.dropna()
|
||||
return self._param.output_var_name, outs
|
||||
|
||||
def reset(self):
|
||||
@ -455,27 +463,18 @@ class ComponentBase(ABC):
|
||||
def set_output(self, v):
|
||||
setattr(self._param, self._param.output_var_name, v)
|
||||
|
||||
def get_input(self):
|
||||
if self._param.debug_inputs:
|
||||
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs if v.get("value")])
|
||||
def set_infor(self, v):
|
||||
setattr(self._param, self._param.infor_var_name, v)
|
||||
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
if self._param.query:
|
||||
self._param.inputs = []
|
||||
def _fetch_outputs_from(self, sources: list[dict[str, Any]]) -> list[pd.DataFrame]:
|
||||
outs = []
|
||||
for q in self._param.query:
|
||||
for q in sources:
|
||||
if q.get("component_id"):
|
||||
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||
if "@" in q["component_id"] and q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = q["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
outs.append(pd.DataFrame([{"content": p.get("value", "")}]))
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": p.get("value", "")})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
@ -484,19 +483,46 @@ class ComponentBase(ABC):
|
||||
if q["component_id"].lower().find("answer") == 0:
|
||||
txt = []
|
||||
for r, c in self._canvas.history[::-1][:self._param.message_history_window_size][::-1]:
|
||||
txt.append(f"{r.upper()}: {c}")
|
||||
txt.append(f"{r.upper()}:{c}")
|
||||
txt = "\n".join(txt)
|
||||
self._param.inputs.append({"content": txt, "component_id": q["component_id"]})
|
||||
outs.append(pd.DataFrame([{"content": txt}]))
|
||||
continue
|
||||
|
||||
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": "\n".join(
|
||||
[str(d["content"]) for d in outs[-1].to_dict('records')])})
|
||||
elif q.get("value"):
|
||||
self._param.inputs.append({"component_id": None, "content": q["value"]})
|
||||
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||
return outs
|
||||
def get_input(self):
|
||||
if self._param.debug_inputs:
|
||||
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs if v.get("value")])
|
||||
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
up_cpns = self.get_upstream()
|
||||
reversed_up_cpnts = [cpn for cpn in reversed_cpnts if cpn in up_cpns]
|
||||
|
||||
if self._param.query:
|
||||
self._param.inputs = []
|
||||
outs = self._fetch_outputs_from(self._param.query)
|
||||
|
||||
for out in outs:
|
||||
records = out.to_dict("records")
|
||||
content: str
|
||||
|
||||
if len(records) > 1:
|
||||
content = "\n".join(
|
||||
[str(d["content"]) for d in records]
|
||||
)
|
||||
else:
|
||||
content = records[0]["content"]
|
||||
|
||||
self._param.inputs.append({
|
||||
"component_id": records[0].get("component_id"),
|
||||
"content": content
|
||||
})
|
||||
|
||||
if outs:
|
||||
df = pd.concat(outs, ignore_index=True)
|
||||
if "content" in df:
|
||||
@ -505,7 +531,7 @@ class ComponentBase(ABC):
|
||||
|
||||
upstream_outs = []
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
for u in reversed_up_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "concentrator"]:
|
||||
continue
|
||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||
@ -545,7 +571,7 @@ class ComponentBase(ABC):
|
||||
return df
|
||||
|
||||
def get_input_elements(self):
|
||||
assert self._param.query, "Please identify input parameters firstly."
|
||||
assert self._param.query, "Please verify the input parameters first."
|
||||
eles = []
|
||||
for q in self._param.query:
|
||||
if q.get("component_id"):
|
||||
@ -555,7 +581,7 @@ class ComponentBase(ABC):
|
||||
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||
continue
|
||||
|
||||
eles.append({"name": self._canvas.get_compnent_name(cpn_id), "key": cpn_id})
|
||||
eles.append({"name": self._canvas.get_component_name(cpn_id), "key": cpn_id})
|
||||
else:
|
||||
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||
return eles
|
||||
@ -565,8 +591,10 @@ class ComponentBase(ABC):
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
up_cpns = self.get_upstream()
|
||||
reversed_up_cpnts = [cpn for cpn in reversed_cpnts if cpn in up_cpns]
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
for u in reversed_up_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "answer"]:
|
||||
continue
|
||||
return self._canvas.get_component(u)["obj"].output()[1]
|
||||
@ -584,3 +612,7 @@ class ComponentBase(ABC):
|
||||
def get_parent(self):
|
||||
pid = self._canvas.get_component(self._id)["parent_id"]
|
||||
return self._canvas.get_component(pid)["obj"]
|
||||
|
||||
def get_upstream(self):
|
||||
cpn_nms = self._canvas.get_component(self._id)['upstream']
|
||||
return cpn_nms
|
||||
|
||||
@ -50,26 +50,29 @@ class CategorizeParam(GenerateParam):
|
||||
for c, desc in self.category_description.items():
|
||||
if desc.get("description"):
|
||||
descriptions.append(
|
||||
"--------------------\nCategory: {}\nDescription: {}\n".format(c, desc["description"]))
|
||||
"\nCategory: {}\nDescription: {}".format(c, desc["description"]))
|
||||
|
||||
self.prompt = """
|
||||
You're a text classifier. You need to categorize the user’s questions into {} categories,
|
||||
namely: {}
|
||||
Here's description of each category:
|
||||
{}
|
||||
Role: You're a text classifier.
|
||||
Task: You need to categorize the user’s questions into {} categories, namely: {}
|
||||
|
||||
You could learn from the following examples:
|
||||
{}
|
||||
You could learn from the above examples.
|
||||
Just mention the category names, no need for any additional words.
|
||||
Here's description of each category:
|
||||
{}
|
||||
|
||||
---- Real Data ----
|
||||
{}
|
||||
You could learn from the following examples:
|
||||
{}
|
||||
You could learn from the above examples.
|
||||
|
||||
Requirements:
|
||||
- Just mention the category names, no need for any additional words.
|
||||
|
||||
---- Real Data ----
|
||||
USER: {}\n
|
||||
""".format(
|
||||
len(self.category_description.keys()),
|
||||
"/".join(list(self.category_description.keys())),
|
||||
"\n".join(descriptions),
|
||||
"- ".join(cate_lines),
|
||||
"\n\n- ".join(cate_lines),
|
||||
chat_hist
|
||||
)
|
||||
return self.prompt
|
||||
@ -80,18 +83,28 @@ class Categorize(Generate, ABC):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = self.get_input()
|
||||
input = " - ".join(input["content"]) if "content" in input else ""
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
self._canvas.set_component_infor(self._id, {"prompt":self._param.get_prompt(input),"messages": [{"role": "user", "content": "\nCategory: "}],"conf": self._param.gen_conf()})
|
||||
|
||||
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
||||
self._param.gen_conf())
|
||||
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||
# Count the number of times each category appears in the answer.
|
||||
category_counts = {}
|
||||
for c in self._param.category_description.keys():
|
||||
if ans.lower().find(c.lower()) >= 0:
|
||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
||||
count = ans.lower().count(c.lower())
|
||||
category_counts[c] = count
|
||||
|
||||
# If a category is found, return the category with the highest count.
|
||||
if any(category_counts.values()):
|
||||
max_category = max(category_counts.items(), key=lambda x: x[1])
|
||||
return Categorize.be_output(self._param.category_description[max_category[0]]["to"])
|
||||
|
||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||
|
||||
def debug(self, **kwargs):
|
||||
df = self._run([], **kwargs)
|
||||
cpn_id = df.iloc[0, 0]
|
||||
return Categorize.be_output(self._canvas.get_compnent_name(cpn_id))
|
||||
return Categorize.be_output(self._canvas.get_component_name(cpn_id))
|
||||
|
||||
|
||||
138
agent/component/code.py
Normal file
138
agent/component/code.py
Normal file
@ -0,0 +1,138 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import base64
|
||||
from abc import ABC
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from api import settings
|
||||
|
||||
|
||||
class Language(str, Enum):
|
||||
PYTHON = "python"
|
||||
NODEJS = "nodejs"
|
||||
|
||||
|
||||
class CodeExecutionRequest(BaseModel):
|
||||
code_b64: str = Field(..., description="Base64 encoded code string")
|
||||
language: Language = Field(default=Language.PYTHON, description="Programming language")
|
||||
arguments: Optional[dict] = Field(default={}, description="Arguments")
|
||||
|
||||
@field_validator("code_b64")
|
||||
@classmethod
|
||||
def validate_base64(cls, v: str) -> str:
|
||||
try:
|
||||
base64.b64decode(v, validate=True)
|
||||
return v
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid base64 encoding: {str(e)}")
|
||||
|
||||
@field_validator("language", mode="before")
|
||||
@classmethod
|
||||
def normalize_language(cls, v) -> str:
|
||||
if isinstance(v, str):
|
||||
low = v.lower()
|
||||
if low in ("python", "python3"):
|
||||
return "python"
|
||||
elif low in ("javascript", "nodejs"):
|
||||
return "nodejs"
|
||||
raise ValueError(f"Unsupported language: {v}")
|
||||
|
||||
|
||||
class CodeParam(ComponentParamBase):
|
||||
"""
|
||||
Define the code sandbox component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.lang = "python"
|
||||
self.script = ""
|
||||
self.arguments = []
|
||||
self.address = f"http://{settings.SANDBOX_HOST}:9385/run"
|
||||
self.enable_network = True
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.lang, "Support languages", ["python", "python3", "nodejs", "javascript"])
|
||||
self.check_defined_type(self.enable_network, "Enable network", ["bool"])
|
||||
|
||||
|
||||
class Code(ComponentBase, ABC):
|
||||
component_name = "Code"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
arguments = {}
|
||||
for input in self._param.arguments:
|
||||
if "@" in input["component_id"]:
|
||||
component_id = input["component_id"].split("@")[0]
|
||||
refered_component_key = input["component_id"].split("@")[1]
|
||||
refered_component = self._canvas.get_component(component_id)["obj"]
|
||||
|
||||
for param in refered_component._param.query:
|
||||
if param["key"] == refered_component_key:
|
||||
if "value" in param:
|
||||
arguments[input["name"]] = param["value"]
|
||||
else:
|
||||
cpn = self._canvas.get_component(input["component_id"])["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
arguments[input["name"]] = self._canvas.get_history(1)[0]["content"]
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if not out.empty:
|
||||
arguments[input["name"]] = "\n".join(out["content"])
|
||||
|
||||
return self._execute_code(
|
||||
language=self._param.lang,
|
||||
code=self._param.script,
|
||||
arguments=arguments,
|
||||
address=self._param.address,
|
||||
enable_network=self._param.enable_network,
|
||||
)
|
||||
|
||||
def _execute_code(self, language: str, code: str, arguments: dict, address: str, enable_network: bool):
|
||||
import requests
|
||||
|
||||
try:
|
||||
code_b64 = self._encode_code(code)
|
||||
code_req = CodeExecutionRequest(code_b64=code_b64, language=language, arguments=arguments).model_dump()
|
||||
except Exception as e:
|
||||
return Code.be_output("**Error**: construct code request error: " + str(e))
|
||||
|
||||
try:
|
||||
resp = requests.post(url=address, json=code_req, timeout=10)
|
||||
body = resp.json()
|
||||
if body:
|
||||
stdout = body.get("stdout")
|
||||
stderr = body.get("stderr")
|
||||
return Code.be_output(stdout or stderr)
|
||||
else:
|
||||
return Code.be_output("**Error**: There is no response from sanbox")
|
||||
|
||||
except Exception as e:
|
||||
return Code.be_output("**Error**: Internal error in sanbox: " + str(e))
|
||||
|
||||
def _encode_code(self, code: str) -> str:
|
||||
return base64.b64encode(code.encode("utf-8")).decode("utf-8")
|
||||
|
||||
def get_input_elements(self):
|
||||
elements = []
|
||||
for input in self._param.arguments:
|
||||
cpn_id = input["component_id"]
|
||||
elements.append({"key": cpn_id, "name": input["name"]})
|
||||
return elements
|
||||
@ -82,7 +82,10 @@ class Email(ComponentBase, ABC):
|
||||
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
||||
|
||||
context = smtplib.ssl.create_default_context()
|
||||
with smtplib.SMTP_SSL(self._param.smtp_server, self._param.smtp_port, context=context) as server:
|
||||
with smtplib.SMTP(self._param.smtp_server, self._param.smtp_port) as server:
|
||||
server.ehlo()
|
||||
server.starttls(context=context)
|
||||
server.ehlo()
|
||||
# Login
|
||||
logging.info(f"Attempting to login with email: {self._param.email}")
|
||||
server.login(self._param.email, self._param.password)
|
||||
|
||||
@ -52,15 +52,16 @@ class ExeSQLParam(GenerateParam):
|
||||
self.check_positive_integer(self.top_n, "Number of records")
|
||||
if self.database == "rag_flow":
|
||||
if self.host == "ragflow-mysql":
|
||||
raise ValueError("The host is not accessible.")
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
if self.password == "infini_rag_flow":
|
||||
raise ValueError("The host is not accessible.")
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
|
||||
|
||||
class ExeSQL(Generate, ABC):
|
||||
component_name = "ExeSQL"
|
||||
|
||||
def _refactor(self,ans):
|
||||
def _refactor(self, ans):
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||
if match:
|
||||
ans = match.group(1) # Query content
|
||||
@ -78,7 +79,6 @@ class ExeSQL(Generate, ABC):
|
||||
ans = self.get_input()
|
||||
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||
ans = self._refactor(ans)
|
||||
logging.info("db_type: ",self._param.db_type)
|
||||
if self._param.db_type in ["mysql", "mariadb"]:
|
||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
@ -101,51 +101,51 @@ class ExeSQL(Generate, ABC):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
self._loop += 1
|
||||
input_list=re.split(r';', ans.replace(r"\n", " "))
|
||||
input_list = re.split(r';', ans.replace(r"\n", " "))
|
||||
sql_res = []
|
||||
for i in range(len(input_list)):
|
||||
single_sql=input_list[i]
|
||||
single_sql = input_list[i]
|
||||
single_sql = single_sql.replace('```','')
|
||||
while self._loop <= self._param.loop:
|
||||
self._loop+=1
|
||||
self._loop += 1
|
||||
if not single_sql:
|
||||
break
|
||||
try:
|
||||
logging.info("single_sql: ", single_sql)
|
||||
cursor.execute(single_sql)
|
||||
if cursor.rowcount == 0:
|
||||
sql_res.append({"content": "No record in the database!"})
|
||||
break
|
||||
if self._param.db_type == 'mssql':
|
||||
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),columns = [desc[0] for desc in cursor.description])
|
||||
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),
|
||||
columns=[desc[0] for desc in cursor.description])
|
||||
else:
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in cursor.description]
|
||||
sql_res.append({"content": single_res.to_markdown()})
|
||||
sql_res.append({"content": single_res.to_markdown(index=False, floatfmt=".6f")})
|
||||
break
|
||||
except Exception as e:
|
||||
single_sql = self._regenerate_sql(single_sql, str(e), **kwargs)
|
||||
single_sql = self._refactor(single_sql)
|
||||
if self._loop > self._param.loop:
|
||||
sql_res.append({"content": "Can't query the correct data via SQL statement."})
|
||||
# raise Exception("Maximum loop time exceeds. Can't query the correct data via SQL statement.")
|
||||
db.close()
|
||||
if not sql_res:
|
||||
return ExeSQL.be_output("")
|
||||
return pd.DataFrame(sql_res)
|
||||
|
||||
def _regenerate_sql(self, failed_sql, error_message,**kwargs):
|
||||
def _regenerate_sql(self, failed_sql, error_message, **kwargs):
|
||||
prompt = f'''
|
||||
## You are the Repair SQL Statement Helper, please modify the original SQL statement based on the SQL query error report.
|
||||
## The original SQL statement is as follows:{failed_sql}.
|
||||
## The contents of the SQL query error report is as follows:{error_message}.
|
||||
## Answer only the modified SQL statement. Please do not give any explanation, just answer the code.
|
||||
'''
|
||||
self._param.prompt=prompt
|
||||
self._param.prompt = prompt
|
||||
kwargs_ = deepcopy(kwargs)
|
||||
kwargs_["stream"] = False
|
||||
response = Generate._run(self, [], **kwargs_)
|
||||
try:
|
||||
regenerated_sql = response.loc[0,"content"]
|
||||
regenerated_sql = response.loc[0, "content"]
|
||||
return regenerated_sql
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to regenerate SQL: {e}")
|
||||
|
||||
@ -13,15 +13,30 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.db.services.dialog_service import message_fit_in
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from plugin import GlobalPluginManager
|
||||
from plugin.llm_tool_plugin import llm_tool_metadata_to_openai_tool
|
||||
from rag.llm.chat_model import ToolCallSession
|
||||
from rag.prompts import message_fit_in
|
||||
|
||||
|
||||
class LLMToolPluginCallSession(ToolCallSession):
|
||||
def tool_call(self, name: str, arguments: dict[str, Any]) -> str:
|
||||
tool = GlobalPluginManager.get_llm_tool_by_name(name)
|
||||
|
||||
if tool is None:
|
||||
raise ValueError(f"LLM tool {name} does not exist")
|
||||
|
||||
return tool().invoke(**arguments)
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
@ -40,6 +55,7 @@ class GenerateParam(ComponentParamBase):
|
||||
self.frequency_penalty = 0
|
||||
self.cite = True
|
||||
self.parameters = []
|
||||
self.llm_enabled_tools = []
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.temperature, "[Generate] Temperature")
|
||||
@ -69,36 +85,35 @@ class Generate(ComponentBase):
|
||||
component_name = "Generate"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs[1:] if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def set_cite(self, retrieval_res, answer):
|
||||
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
||||
if "empty_response" in retrieval_res.columns:
|
||||
retrieval_res["empty_response"].fillna("", inplace=True)
|
||||
chunks = json.loads(retrieval_res["chunks"][0])
|
||||
answer, idx = settings.retrievaler.insert_citations(answer,
|
||||
[ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
||||
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
||||
[ck["content_ltks"] for ck in chunks],
|
||||
[ck["vector"] for ck in chunks],
|
||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
doc_ids = set([])
|
||||
recall_docs = []
|
||||
for i in idx:
|
||||
did = retrieval_res.loc[int(i), "doc_id"]
|
||||
did = chunks[int(i)]["doc_id"]
|
||||
if did in doc_ids:
|
||||
continue
|
||||
doc_ids.add(did)
|
||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
||||
recall_docs.append({"doc_id": did, "doc_name": chunks[int(i)]["docnm_kwd"]})
|
||||
|
||||
del retrieval_res["vector"]
|
||||
del retrieval_res["content_ltks"]
|
||||
for c in chunks:
|
||||
del c["vector"]
|
||||
del c["content_ltks"]
|
||||
|
||||
reference = {
|
||||
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
||||
"chunks": chunks,
|
||||
"doc_aggs": recall_docs
|
||||
}
|
||||
|
||||
@ -110,33 +125,56 @@ class Generate(ComponentBase):
|
||||
return res
|
||||
|
||||
def get_input_elements(self):
|
||||
if self._param.parameters:
|
||||
return [{"key": "user", "name": "Input your question here:"}, *self._param.parameters]
|
||||
|
||||
return [{"key": "user", "name": "Input your question here:"}]
|
||||
key_set = set([])
|
||||
res = [{"key": "user", "name": "Input your question here:"}]
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.prompt, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
|
||||
if len(self._param.llm_enabled_tools) > 0:
|
||||
tools = GlobalPluginManager.get_llm_tools_by_names(self._param.llm_enabled_tools)
|
||||
|
||||
chat_mdl.bind_tools(
|
||||
LLMToolPluginCallSession(),
|
||||
[llm_tool_metadata_to_openai_tool(t.get_metadata()) for t in tools]
|
||||
)
|
||||
|
||||
prompt = self._param.prompt
|
||||
|
||||
retrieval_res = []
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"):
|
||||
continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for para in self.get_input_elements()[1:]:
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
{"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
@ -152,8 +190,8 @@ class Generate(ComponentBase):
|
||||
else:
|
||||
if cpn.component_name.lower() == "retrieval":
|
||||
retrieval_res.append(out)
|
||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
kwargs[para["key"]] = " - " + "\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
|
||||
if retrieval_res:
|
||||
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||
@ -175,19 +213,20 @@ class Generate(ComponentBase):
|
||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
return pd.DataFrame([res])
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
self._canvas.set_component_infor(self._id, {"prompt":msg[0]["content"],"messages": msg[1:],"conf": self._param.gen_conf()})
|
||||
if self._param.cite and "chunks" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, ans)
|
||||
return pd.DataFrame([res])
|
||||
|
||||
@ -196,28 +235,30 @@ class Generate(ComponentBase):
|
||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||
res = None
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if msg and msg[0]['role'] == 'assistant':
|
||||
msg.pop(0)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": ""})
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
answer = ans
|
||||
yield res
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
if self._param.cite and "chunks" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, answer)
|
||||
yield res
|
||||
|
||||
self._canvas.set_component_infor(self._id, {"prompt":msg[0]["content"],"messages": msg[1:],"conf": self._param.gen_conf()})
|
||||
self.set_output(Generate.be_output(res))
|
||||
|
||||
def debug(self, **kwargs):
|
||||
@ -230,5 +271,6 @@ class Generate(ComponentBase):
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": kwargs.get("user", "")}], self._param.gen_conf())
|
||||
u = kwargs.get("user")
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": u if u else "Output: "}], self._param.gen_conf())
|
||||
return pd.DataFrame([ans])
|
||||
|
||||
@ -35,12 +35,14 @@ class InvokeParam(ComponentParamBase):
|
||||
self.url = ""
|
||||
self.timeout = 60
|
||||
self.clean_html = False
|
||||
self.datatype = "json" # New parameter to determine data posting type
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
||||
self.check_empty(self.url, "End point URL")
|
||||
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||
self.check_boolean(self.clean_html, "Clean HTML")
|
||||
self.check_valid_value(self.datatype.lower(), "Data post type", ['json', 'formdata']) # Check for valid datapost value
|
||||
|
||||
|
||||
class Invoke(ComponentBase, ABC):
|
||||
@ -94,6 +96,13 @@ class Invoke(ComponentBase, ABC):
|
||||
return Invoke.be_output(response.text)
|
||||
|
||||
if method == 'put':
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.put(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.put(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
@ -105,11 +114,18 @@ class Invoke(ComponentBase, ABC):
|
||||
return Invoke.be_output(response.text)
|
||||
|
||||
if method == 'post':
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.post(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.post(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
|
||||
@ -38,6 +38,10 @@ class IterationItem(ComponentBase, ABC):
|
||||
ans = parent.get_input()
|
||||
ans = parent._param.delimiter.join(ans["content"]) if "content" in ans else ""
|
||||
ans = [a.strip() for a in ans.split(parent._param.delimiter)]
|
||||
if not ans:
|
||||
self._idx = -1
|
||||
return pd.DataFrame()
|
||||
|
||||
df = pd.DataFrame([{"content": ans[self._idx]}])
|
||||
self._idx += 1
|
||||
if self._idx >= len(ans):
|
||||
|
||||
@ -51,12 +51,19 @@ class KeywordExtract(Generate, ABC):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
if hasattr(query, "to_dict") and "content" in query:
|
||||
query = ", ".join(map(str, query["content"].dropna()))
|
||||
else:
|
||||
query = str(query)
|
||||
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
self._canvas.set_component_infor(self._id, {"prompt":self._param.get_prompt(),"messages": [{"role": "user", "content": query}],"conf": self._param.gen_conf()})
|
||||
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": query}],
|
||||
self._param.gen_conf())
|
||||
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||
logging.debug(f"ans: {ans}")
|
||||
return KeywordExtract.be_output(ans)
|
||||
|
||||
@ -13,24 +13,28 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import label_question
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.app.tag import label_question
|
||||
from rag.prompts import kb_prompt
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class RetrievalParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Retrieval component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.similarity_threshold = 0.2
|
||||
@ -38,12 +42,15 @@ class RetrievalParam(ComponentParamBase):
|
||||
self.top_n = 8
|
||||
self.top_k = 1024
|
||||
self.kb_ids = []
|
||||
self.kb_vars = []
|
||||
self.rerank_id = ""
|
||||
self.empty_response = ""
|
||||
self.tavily_api_key = ""
|
||||
self.use_kg = False
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keyword similarity weight")
|
||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||
|
||||
|
||||
@ -53,14 +60,34 @@ class Retrieval(ComponentBase, ABC):
|
||||
def _run(self, history, **kwargs):
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
query = re.split(r"(USER:|ASSISTANT:)", query)[-1]
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
kb_ids: list[str] = self._param.kb_ids or []
|
||||
|
||||
kb_vars = self._fetch_outputs_from(self._param.kb_vars)
|
||||
|
||||
if len(kb_vars) > 0:
|
||||
for kb_var in kb_vars:
|
||||
if len(kb_var) == 1:
|
||||
kb_var_value = str(kb_var["content"][0])
|
||||
|
||||
for v in kb_var_value.split(","):
|
||||
kb_ids.append(v)
|
||||
else:
|
||||
for v in kb_var.to_dict("records"):
|
||||
kb_ids.append(v["content"])
|
||||
|
||||
filtered_kb_ids: list[str] = [kb_id for kb_id in kb_ids if kb_id]
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(filtered_kb_ids)
|
||||
if not kbs:
|
||||
return Retrieval.be_output("")
|
||||
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||
|
||||
embd_mdl = None
|
||||
if embd_nms:
|
||||
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
||||
self._canvas.set_embedding_model(embd_nms[0])
|
||||
|
||||
@ -68,11 +95,33 @@ class Retrieval(ComponentBase, ABC):
|
||||
if self._param.rerank_id:
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(query, kbs))
|
||||
if kbs:
|
||||
kbinfos = settings.retrievaler.retrieval(
|
||||
query,
|
||||
embd_mdl,
|
||||
[kb.tenant_id for kb in kbs],
|
||||
filtered_kb_ids,
|
||||
1,
|
||||
self._param.top_n,
|
||||
self._param.similarity_threshold,
|
||||
1 - self._param.keywords_similarity_weight,
|
||||
aggs=False,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(query, kbs),
|
||||
)
|
||||
else:
|
||||
kbinfos = {"chunks": [], "doc_aggs": []}
|
||||
|
||||
if self._param.use_kg and kbs:
|
||||
ck = settings.kg_retrievaler.retrieval(query, [kb.tenant_id for kb in kbs], filtered_kb_ids, embd_mdl, LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
if self._param.tavily_api_key:
|
||||
tav = Tavily(self._param.tavily_api_key)
|
||||
tav_res = tav.retrieve_chunks(query)
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
|
||||
if not kbinfos["chunks"]:
|
||||
df = Retrieval.be_output("")
|
||||
@ -80,10 +129,6 @@ class Retrieval(ComponentBase, ABC):
|
||||
df["empty_response"] = self._param.empty_response
|
||||
return df
|
||||
|
||||
df = pd.DataFrame(kbinfos["chunks"])
|
||||
df["content"] = df["content_with_weight"]
|
||||
del df["content_with_weight"]
|
||||
df = pd.DataFrame({"content": kb_prompt(kbinfos, 200000), "chunks": json.dumps(kbinfos["chunks"])})
|
||||
logging.debug("{} {}".format(query, df))
|
||||
return df
|
||||
|
||||
|
||||
return df.dropna()
|
||||
|
||||
@ -13,93 +13,82 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from rag.prompts import full_question
|
||||
|
||||
|
||||
class RewriteQuestionParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the QuestionRewrite component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.temperature = 0.9
|
||||
self.prompt = ""
|
||||
self.language = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
|
||||
def get_prompt(self, conv):
|
||||
self.prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
return f"""
|
||||
Role: A helpful assistant
|
||||
Task: Generate a full user question that would follow the conversation.
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
# Example 1
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
###############
|
||||
Output: What's the name of Donald Trump's mother?
|
||||
------------
|
||||
# Example 2
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
User: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
######################
|
||||
# Real Data
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
return self.prompt
|
||||
|
||||
|
||||
class RewriteQuestion(Generate, ABC):
|
||||
component_name = "RewriteQuestion"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
hist = self._canvas.get_history(self._param.message_history_window_size)
|
||||
conv = []
|
||||
for m in hist:
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(conv), [{"role": "user", "content": "Output: "}],
|
||||
self._param.gen_conf())
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
messages = [h for h in hist if h["role"]!="system"]
|
||||
if messages[-1]["role"] != "user":
|
||||
messages.append({"role": "user", "content": query})
|
||||
ans = full_question(self._canvas.get_tenant_id(), self._param.llm_id, messages, self.gen_lang(self._param.language))
|
||||
self._canvas.history.pop()
|
||||
self._canvas.history.append(("user", ans))
|
||||
|
||||
logging.debug(ans)
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
|
||||
|
||||
@staticmethod
|
||||
def gen_lang(language):
|
||||
# convert code lang to language word for the prompt
|
||||
language_dict = {'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'ws': 'Samoan', 'am': 'Amharic',
|
||||
'ar': 'Arabic', 'hy': 'Armenian', 'az': 'Azerbaijani', 'eu': 'Basque', 'be': 'Belarusian',
|
||||
'bem': 'Bemba', 'bn': 'Bengali', 'bh': 'Bihari',
|
||||
'xx-bork': 'Bork', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'bt': 'Bhutani',
|
||||
'km': 'Cambodian', 'ca': 'Catalan', 'chr': 'Cherokee', 'ny': 'Chichewa', 'zh-cn': 'Chinese',
|
||||
'zh-tw': 'Chinese', 'co': 'Corsican',
|
||||
'hr': 'Croatian', 'cs': 'Czech', 'da': 'Danish', 'nl': 'Dutch', 'xx-elmer': 'Elmer',
|
||||
'en': 'English', 'eo': 'Esperanto', 'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese',
|
||||
'tl': 'Filipino', 'fi': 'Finnish', 'fr': 'French',
|
||||
'fy': 'Frisian', 'gaa': 'Ga', 'gl': 'Galician', 'ka': 'Georgian', 'de': 'German',
|
||||
'el': 'Greek', 'kl': 'Greenlandic', 'gn': 'Guarani', 'gu': 'Gujarati', 'xx-hacker': 'Hacker',
|
||||
'ht': 'Haitian Creole', 'ha': 'Hausa', 'haw': 'Hawaiian',
|
||||
'iw': 'Hebrew', 'hi': 'Hindi', 'hu': 'Hungarian', 'is': 'Icelandic', 'ig': 'Igbo',
|
||||
'id': 'Indonesian', 'ia': 'Interlingua', 'ga': 'Irish', 'it': 'Italian', 'ja': 'Japanese',
|
||||
'jw': 'Javanese', 'kn': 'Kannada', 'kk': 'Kazakh', 'rw': 'Kinyarwanda',
|
||||
'rn': 'Kirundi', 'xx-klingon': 'Klingon', 'kg': 'Kongo', 'ko': 'Korean', 'kri': 'Krio',
|
||||
'ku': 'Kurdish', 'ckb': 'Kurdish (Sorani)', 'ky': 'Kyrgyz', 'lo': 'Laothian', 'la': 'Latin',
|
||||
'lv': 'Latvian', 'ln': 'Lingala', 'lt': 'Lithuanian',
|
||||
'loz': 'Lozi', 'lg': 'Luganda', 'ach': 'Luo', 'mk': 'Macedonian', 'mg': 'Malagasy',
|
||||
'ms': 'Malay', 'ml': 'Malayalam', 'mt': 'Maltese', 'mv': 'Maldivian', 'mi': 'Maori',
|
||||
'mr': 'Marathi', 'mfe': 'Mauritian Creole', 'mo': 'Moldavian', 'mn': 'Mongolian',
|
||||
'sr-me': 'Montenegrin', 'my': 'Burmese', 'ne': 'Nepali', 'pcm': 'Nigerian Pidgin',
|
||||
'nso': 'Northern Sotho', 'no': 'Norwegian', 'nn': 'Norwegian Nynorsk', 'oc': 'Occitan',
|
||||
'or': 'Oriya', 'om': 'Oromo', 'ps': 'Pashto', 'fa': 'Persian',
|
||||
'xx-pirate': 'Pirate', 'pl': 'Polish', 'pt': 'Portuguese', 'pt-br': 'Portuguese (Brazilian)',
|
||||
'pt-pt': 'Portuguese (Portugal)', 'pa': 'Punjabi', 'qu': 'Quechua', 'ro': 'Romanian',
|
||||
'rm': 'Romansh', 'nyn': 'Runyankole', 'ru': 'Russian', 'gd': 'Scots Gaelic',
|
||||
'sr': 'Serbian', 'sh': 'Serbo-Croatian', 'st': 'Sesotho', 'tn': 'Setswana',
|
||||
'crs': 'Seychellois Creole', 'sn': 'Shona', 'sd': 'Sindhi', 'si': 'Sinhalese', 'sk': 'Slovak',
|
||||
'sl': 'Slovenian', 'so': 'Somali', 'es': 'Spanish', 'es-419': 'Spanish (Latin America)',
|
||||
'su': 'Sundanese',
|
||||
'sw': 'Swahili', 'sv': 'Swedish', 'tg': 'Tajik', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu',
|
||||
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tongan', 'lua': 'Tshiluba', 'tum': 'Tumbuka',
|
||||
'tr': 'Turkish', 'tk': 'Turkmen', 'tw': 'Twi',
|
||||
'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 'vu': 'Vanuatu',
|
||||
'vi': 'Vietnamese', 'cy': 'Welsh', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish',
|
||||
'yo': 'Yoruba', 'zu': 'Zulu'}
|
||||
if language in language_dict:
|
||||
return language_dict[language]
|
||||
else:
|
||||
return ""
|
||||
|
||||
@ -54,7 +54,7 @@ class Switch(ComponentBase, ABC):
|
||||
for item in cond["items"]:
|
||||
if not item["cpn_id"]:
|
||||
continue
|
||||
if item["cpn_id"].find("begin") >= 0:
|
||||
if item["cpn_id"].lower().find("begin") >= 0 or item["cpn_id"].lower().find("answer") >= 0:
|
||||
continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
res.append(cid)
|
||||
@ -75,7 +75,7 @@ class Switch(ComponentBase, ABC):
|
||||
res.append(self.process_operator(p.get("value",""), item["operator"], item.get("value", "")))
|
||||
break
|
||||
else:
|
||||
out = self._canvas.get_component(cid)["obj"].output()[1]
|
||||
out = self._canvas.get_component(cid)["obj"].output(allow_partial=False)[1]
|
||||
cpn_input = "" if "content" not in out.columns else " ".join([str(s) for s in out["content"]])
|
||||
res.append(self.process_operator(cpn_input, item["operator"], item.get("value", "")))
|
||||
|
||||
|
||||
@ -38,27 +38,39 @@ class Template(ComponentBase):
|
||||
component_name = "Template"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set(
|
||||
[
|
||||
para["component_id"].split("@")[0]
|
||||
for para in self._param.parameters
|
||||
if para.get("component_id")
|
||||
and para["component_id"].lower().find("answer") < 0
|
||||
and para["component_id"].lower().find("begin") < 0
|
||||
]
|
||||
)
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def get_input_elements(self):
|
||||
key_set = set([])
|
||||
res = []
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.content, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
content = self._param.content
|
||||
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"):
|
||||
continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for para in self.get_input_elements():
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
value = p.get("value", "")
|
||||
@ -68,6 +80,7 @@ class Template(ComponentBase):
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
@ -96,6 +109,7 @@ class Template(ComponentBase):
|
||||
pass
|
||||
|
||||
for n, v in kwargs.items():
|
||||
if not isinstance(v, str):
|
||||
try:
|
||||
v = json.dumps(v, ensure_ascii=False)
|
||||
except Exception:
|
||||
@ -103,9 +117,6 @@ class Template(ComponentBase):
|
||||
content = re.sub(
|
||||
r"\{%s\}" % re.escape(n), v, content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(\\\"|\")", "", content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(#+)", r" \1 ", content
|
||||
)
|
||||
@ -114,7 +125,7 @@ class Template(ComponentBase):
|
||||
|
||||
def make_kwargs(self, para, kwargs, value):
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": value}
|
||||
{"component_id": para["key"], "content": value}
|
||||
)
|
||||
try:
|
||||
value = json.loads(value)
|
||||
|
||||
@ -8,9 +8,7 @@
|
||||
"components": {
|
||||
"Answer:SocialAdsWonder": {
|
||||
"downstream": [
|
||||
"Retrieval:SillyPartsCheer",
|
||||
"Retrieval:BrownStreetsRhyme",
|
||||
"Retrieval:OddSingersRefuse"
|
||||
"RewriteQuestion:WildIdeasTell"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
@ -19,8 +17,8 @@
|
||||
"params": {}
|
||||
},
|
||||
"upstream": [
|
||||
"begin",
|
||||
"ExeSQL:QuietRosesRun"
|
||||
"ExeSQL:QuietRosesRun",
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"ExeSQL:QuietRosesRun": {
|
||||
@ -55,42 +53,23 @@
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:CuteSidesBuy"
|
||||
"Generate:BlueShirtsLaugh"
|
||||
]
|
||||
},
|
||||
"Generate:CuteSidesBuy": {
|
||||
"Generate:BlueShirtsLaugh": {
|
||||
"downstream": [
|
||||
"ExeSQL:QuietRosesRun"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"inputs": [],
|
||||
"output": {},
|
||||
"params": {
|
||||
"cite": false,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 512,
|
||||
"message_history_window_size": 1,
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "Retrieval:SillyPartsCheer",
|
||||
"id": "2a77e574-a0a6-4a1a-af39-cb192f1d21f5",
|
||||
"key": "ddl_input"
|
||||
},
|
||||
{
|
||||
"component_id": "Retrieval:OddSingersRefuse",
|
||||
"id": "83941a85-0b59-408e-97e5-504964b0e090",
|
||||
"key": "db_input"
|
||||
},
|
||||
{
|
||||
"component_id": "Retrieval:BrownStreetsRhyme",
|
||||
"id": "c63d0ae6-7ee2-44a2-8a95-69d03c90cb44",
|
||||
"key": "sql_input"
|
||||
}
|
||||
],
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "\n##The user provides a question and you provide SQL. You will only respond with SQL code and not with any explanations.\n\n##You may use the following DDL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {ddl_input}.\n\n##You may use the following documentation as a reference for what tables might be available. Use responses to past questions also to guide you: {db_input}.\n\n##You may use the following SQL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {sql_input}.\n\n##Respond with only SQL code. Do not answer with any explanations -- just the code.",
|
||||
"prompt": "\n##The user provides a question and you provide SQL. You will only respond with SQL code and not with any explanations.\n\n##You may use the following DDL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:SillyPartsCheer}.\n\n##You may use the following documentation as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:OddSingersRefuse}.\n\n##You may use the following SQL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:BrownStreetsRhyme}.\n\n##Respond with only SQL code. Do not answer with any explanations -- just the code.",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
@ -103,7 +82,7 @@
|
||||
},
|
||||
"Retrieval:BrownStreetsRhyme": {
|
||||
"downstream": [
|
||||
"Generate:CuteSidesBuy"
|
||||
"Generate:BlueShirtsLaugh"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
@ -124,12 +103,12 @@
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Answer:SocialAdsWonder"
|
||||
"RewriteQuestion:WildIdeasTell"
|
||||
]
|
||||
},
|
||||
"Retrieval:OddSingersRefuse": {
|
||||
"downstream": [
|
||||
"Generate:CuteSidesBuy"
|
||||
"Generate:BlueShirtsLaugh"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
@ -150,12 +129,12 @@
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Answer:SocialAdsWonder"
|
||||
"RewriteQuestion:WildIdeasTell"
|
||||
]
|
||||
},
|
||||
"Retrieval:SillyPartsCheer": {
|
||||
"downstream": [
|
||||
"Generate:CuteSidesBuy"
|
||||
"Generate:BlueShirtsLaugh"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
@ -175,6 +154,34 @@
|
||||
"top_n": 18
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"RewriteQuestion:WildIdeasTell"
|
||||
]
|
||||
},
|
||||
"RewriteQuestion:WildIdeasTell": {
|
||||
"downstream": [
|
||||
"Retrieval:OddSingersRefuse",
|
||||
"Retrieval:BrownStreetsRhyme",
|
||||
"Retrieval:SillyPartsCheer"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "RewriteQuestion",
|
||||
"params": {
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": true,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 6,
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Answer:SocialAdsWonder"
|
||||
]
|
||||
@ -202,20 +209,34 @@
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-begin-Answer:SocialAdsWonderc",
|
||||
"id": "xy-edge__ExeSQL:QuietRosesRunc-Answer:SocialAdsWonderc",
|
||||
"markerEnd": "logo",
|
||||
"source": "begin",
|
||||
"sourceHandle": null,
|
||||
"source": "ExeSQL:QuietRosesRun",
|
||||
"sourceHandle": "c",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Answer:SocialAdsWonder",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Answer:SocialAdsWonderb-Retrieval:SillyPartsCheerc",
|
||||
"id": "xy-edge__begin-Answer:SocialAdsWonderc",
|
||||
"markerEnd": "logo",
|
||||
"source": "begin",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Answer:SocialAdsWonder",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Answer:SocialAdsWonderb-RewriteQuestion:WildIdeasTellc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Answer:SocialAdsWonder",
|
||||
"sourceHandle": "b",
|
||||
@ -223,27 +244,15 @@
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Retrieval:SillyPartsCheer",
|
||||
"target": "RewriteQuestion:WildIdeasTell",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Answer:SocialAdsWonderb-Retrieval:BrownStreetsRhymec",
|
||||
"id": "xy-edge__RewriteQuestion:WildIdeasTellb-Retrieval:OddSingersRefusec",
|
||||
"markerEnd": "logo",
|
||||
"source": "Answer:SocialAdsWonder",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Retrieval:BrownStreetsRhyme",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Answer:SocialAdsWonderb-Retrieval:OddSingersRefusec",
|
||||
"markerEnd": "logo",
|
||||
"source": "Answer:SocialAdsWonder",
|
||||
"source": "RewriteQuestion:WildIdeasTell",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
@ -251,51 +260,41 @@
|
||||
},
|
||||
"target": "Retrieval:OddSingersRefuse",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Retrieval:SillyPartsCheerb-Generate:CuteSidesBuyb",
|
||||
"id": "xy-edge__RewriteQuestion:WildIdeasTellb-Retrieval:BrownStreetsRhymec",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:SillyPartsCheer",
|
||||
"source": "RewriteQuestion:WildIdeasTell",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:CuteSidesBuy",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge"
|
||||
"target": "Retrieval:BrownStreetsRhyme",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Retrieval:BrownStreetsRhymeb-Generate:CuteSidesBuyb",
|
||||
"id": "xy-edge__RewriteQuestion:WildIdeasTellb-Retrieval:SillyPartsCheerc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:BrownStreetsRhyme",
|
||||
"source": "RewriteQuestion:WildIdeasTell",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:CuteSidesBuy",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge"
|
||||
"target": "Retrieval:SillyPartsCheer",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Retrieval:OddSingersRefuseb-Generate:CuteSidesBuyb",
|
||||
"id": "xy-edge__Generate:BlueShirtsLaughc-ExeSQL:QuietRosesRunb",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:OddSingersRefuse",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:CuteSidesBuy",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge"
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Generate:CuteSidesBuyc-ExeSQL:QuietRosesRunb",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:CuteSidesBuy",
|
||||
"source": "Generate:BlueShirtsLaugh",
|
||||
"sourceHandle": "c",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
@ -307,16 +306,44 @@
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__ExeSQL:QuietRosesRunc-Answer:SocialAdsWonderc",
|
||||
"id": "xy-edge__Retrieval:SillyPartsCheerb-Generate:BlueShirtsLaughb",
|
||||
"markerEnd": "logo",
|
||||
"source": "ExeSQL:QuietRosesRun",
|
||||
"sourceHandle": "c",
|
||||
"source": "Retrieval:SillyPartsCheer",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Answer:SocialAdsWonder",
|
||||
"targetHandle": "c",
|
||||
"target": "Generate:BlueShirtsLaugh",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Retrieval:BrownStreetsRhymeb-Generate:BlueShirtsLaughb",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:BrownStreetsRhyme",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:BlueShirtsLaugh",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Retrieval:OddSingersRefuseb-Generate:BlueShirtsLaughb",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:OddSingersRefuse",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:BlueShirtsLaugh",
|
||||
"targetHandle": "b",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
}
|
||||
@ -362,8 +389,8 @@
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -58.36886074370702,
|
||||
"y": 272.1213623212045
|
||||
"x": -265.59460323639587,
|
||||
"y": 271.1879130306969
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -58.36886074370702,
|
||||
@ -375,100 +402,6 @@
|
||||
"type": "logicNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "The large model modifies the original SQL statement based on the error message and returns the modified SQL statement."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N: Fix SQL Statement"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 172,
|
||||
"id": "Note:SevenDancersMarry",
|
||||
"measured": {
|
||||
"height": 172,
|
||||
"width": 228
|
||||
},
|
||||
"position": {
|
||||
"x": -62.91736862436424,
|
||||
"y": 93.08952291375991
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -62.91736862436424,
|
||||
"y": 93.08952291375991
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"style": {
|
||||
"height": 172,
|
||||
"width": 228
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 228
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": true,
|
||||
"max_tokens": 512,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "Retrieval:SillyPartsCheer",
|
||||
"id": "2a77e574-a0a6-4a1a-af39-cb192f1d21f5",
|
||||
"key": "ddl_input"
|
||||
},
|
||||
{
|
||||
"component_id": "Retrieval:OddSingersRefuse",
|
||||
"id": "83941a85-0b59-408e-97e5-504964b0e090",
|
||||
"key": "db_input"
|
||||
},
|
||||
{
|
||||
"component_id": "Retrieval:BrownStreetsRhyme",
|
||||
"id": "c63d0ae6-7ee2-44a2-8a95-69d03c90cb44",
|
||||
"key": "sql_input"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "\n##The user provides a question and you provide SQL. You will only respond with SQL code and not with any explanations.\n\n##You may use the following DDL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {ddl_input}.\n\n##You may use the following documentation as a reference for what tables might be available. Use responses to past questions also to guide you: {db_input}.\n\n##You may use the following SQL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {sql_input}.\n\n##Respond with only SQL code. Do not answer with any explanations -- just the code.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Generate SQL Statement LLM"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 232,
|
||||
"id": "Generate:CuteSidesBuy",
|
||||
"measured": {
|
||||
"height": 232,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 191.98081287844155,
|
||||
"y": -255.36496490928363
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 191.98081287844155,
|
||||
"y": -255.36496490928363
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
@ -495,8 +428,8 @@
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 198.3020069445181,
|
||||
"y": -0.9595420072386389
|
||||
"x": 194.69889765569846,
|
||||
"y": 61.49435233230193
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 198.3020069445181,
|
||||
@ -534,8 +467,8 @@
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 231.17453176754782,
|
||||
"y": 123.02661106951555
|
||||
"x": 240.78282320440022,
|
||||
"y": 162.66081324653166
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 231.17453176754782,
|
||||
@ -573,8 +506,8 @@
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 267.7575479510707,
|
||||
"y": 249.15603226400776
|
||||
"x": 284.5720579655624,
|
||||
"y": 246.75395940479467
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 267.7575479510707,
|
||||
@ -596,15 +529,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 176,
|
||||
"height": 165,
|
||||
"id": "Note:HeavyIconsFollow",
|
||||
"measured": {
|
||||
"height": 176,
|
||||
"width": 266
|
||||
"height": 165,
|
||||
"width": 347
|
||||
},
|
||||
"position": {
|
||||
"x": -626.6563777191027,
|
||||
"y": -48.82220889683933
|
||||
"x": -709.8631299685773,
|
||||
"y": 96.50319908555313
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -626.6563777191027,
|
||||
@ -619,7 +552,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 266
|
||||
"width": 347
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -631,15 +564,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 162,
|
||||
"height": 159,
|
||||
"id": "Note:PinkTaxesClean",
|
||||
"measured": {
|
||||
"height": 162,
|
||||
"width": 210
|
||||
"height": 159,
|
||||
"width": 259
|
||||
},
|
||||
"position": {
|
||||
"x": -52.004609812312424,
|
||||
"y": 336.95180237635077
|
||||
"x": -253.39933811515345,
|
||||
"y": 353.7538896054877
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -52.004609812312424,
|
||||
@ -654,7 +587,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 210
|
||||
"width": 259
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -701,15 +634,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 131,
|
||||
"height": 143,
|
||||
"id": "Note:HugeGroupsScream",
|
||||
"measured": {
|
||||
"height": 131,
|
||||
"width": 387
|
||||
"height": 143,
|
||||
"width": 390
|
||||
},
|
||||
"position": {
|
||||
"x": 606.1206536213404,
|
||||
"y": 113.09441734894426
|
||||
"x": 612.8793199038756,
|
||||
"y": 169.1868576959871
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 606.1206536213404,
|
||||
@ -724,7 +657,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 387
|
||||
"width": 390
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -736,15 +669,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 266,
|
||||
"height": 208,
|
||||
"id": "Note:GreenCrewsArrive",
|
||||
"measured": {
|
||||
"height": 266,
|
||||
"width": 266
|
||||
"height": 208,
|
||||
"width": 467
|
||||
},
|
||||
"position": {
|
||||
"x": 545.3423934788841,
|
||||
"y": -166.58872868890683
|
||||
"x": 649.3481710005742,
|
||||
"y": -87.70873445087781
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 545.3423934788841,
|
||||
@ -759,7 +692,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 266
|
||||
"width": 467
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -771,15 +704,15 @@
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 175,
|
||||
"height": 196,
|
||||
"id": "Note:EightTurtlesLike",
|
||||
"measured": {
|
||||
"height": 175,
|
||||
"width": 265
|
||||
"height": 196,
|
||||
"width": 341
|
||||
},
|
||||
"position": {
|
||||
"x": 222.2150747084395,
|
||||
"y": -445.32694170868734
|
||||
"x": 134.0070839275931,
|
||||
"y": -345.41228234051727
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 222.2150747084395,
|
||||
@ -794,34 +727,34 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 265
|
||||
"width": 341
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "Executes the SQL statement in the database and returns the result.\n\nAfter configuring an accessible database, press 'Test' to ensure the accessibility."
|
||||
"text": "Executes the SQL statement in the database and returns the result.\n\nAfter configuring an accessible database, press 'Test' to ensure the accessibility.\n\nThe large model modifies the original SQL statement based on the error message and returns the modified SQL statement."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "N: Execute SQL"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 178,
|
||||
"height": 276,
|
||||
"id": "Note:FreshKidsTalk",
|
||||
"measured": {
|
||||
"height": 178,
|
||||
"width": 346
|
||||
"height": 276,
|
||||
"width": 336
|
||||
},
|
||||
"position": {
|
||||
"x": -293.35258272850365,
|
||||
"y": -206.3839921107096
|
||||
"x": -304.3577648765364,
|
||||
"y": -288.054469323955
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -251.5866574377311,
|
||||
"y": -372.2192837064241
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": true,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"style": {
|
||||
"height": 178,
|
||||
@ -829,7 +762,7 @@
|
||||
},
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 346
|
||||
"width": 336
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
@ -856,7 +789,7 @@
|
||||
"username": "root"
|
||||
},
|
||||
"label": "ExeSQL",
|
||||
"name": "ExeSQL_0"
|
||||
"name": "ExeSQL"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "ExeSQL:QuietRosesRun",
|
||||
@ -872,6 +805,79 @@
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "ragNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": true,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 6,
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "RewriteQuestion",
|
||||
"name": "RefineQuestion"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "RewriteQuestion:WildIdeasTell",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -7.734116293705583,
|
||||
"y": 236.92372325779243
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "rewriteNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "\n##The user provides a question and you provide SQL. You will only respond with SQL code and not with any explanations.\n\n##You may use the following DDL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:SillyPartsCheer}.\n\n##You may use the following documentation as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:OddSingersRefuse}.\n\n##You may use the following SQL statements as a reference for what tables might be available. Use responses to past questions also to guide you: {Retrieval:BrownStreetsRhyme}.\n\n##Respond with only SQL code. Do not answer with any explanations -- just the code.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Generate SQL Statement LLM"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Generate:BlueShirtsLaugh",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 147.62383788095065,
|
||||
"y": -116.47462293167156
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -10,93 +10,103 @@
|
||||
"downstream": [],
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"debug_inputs": [],
|
||||
"inputs": [],
|
||||
"message_history_window_size": 22,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"post_answers": [],
|
||||
"query": []
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:ChubbyCougarsRush"
|
||||
"Generate:FuzzyEmusWork"
|
||||
]
|
||||
},
|
||||
"Generate:ChubbyCougarsRush": {
|
||||
"Generate:FuzzyEmusWork": {
|
||||
"downstream": [
|
||||
"Answer:TinyGamesGuess"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"cite": false,
|
||||
"debug_inputs": [],
|
||||
"frequency_penalty": 0.7,
|
||||
"inputs": [],
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 0,
|
||||
"message_history_window_size": 12,
|
||||
"message_history_window_size": 1,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "73f48a67-b78f-4bcd-8326-a83c31073ab9",
|
||||
"key": "target_lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "c9142975-25b3-4199-8fce-aa0bc29a31f2",
|
||||
"key": "source_text"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:RichWordsDeny",
|
||||
"id": "6c824b2a-fe3b-4336-95b5-e85f676bef39",
|
||||
"key": "translation_1"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:SlimyFrogsArgue",
|
||||
"id": "f3bd4569-4852-43fa-b80a-e0dd27dd9e1c",
|
||||
"key": "reflection"
|
||||
}
|
||||
],
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read, then edit, a translation to {target_lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS> \\\nas follows:\n\n<SOURCE_TEXT>\n{source_text}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{translation_1}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{reflection}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {target_lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||
"prompt": "Your task is to carefully read, then edit, a translation to {begin@lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS>\nas follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{Generate:ShinySquidsSneeze}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:SlimyFrogsArgue"
|
||||
"Generate:ShinySquidsSneeze"
|
||||
]
|
||||
},
|
||||
"Generate:RichWordsDeny": {
|
||||
"Generate:ShinySquidsSneeze": {
|
||||
"downstream": [
|
||||
"Generate:SlimyFrogsArgue"
|
||||
"Generate:FuzzyEmusWork"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"cite": false,
|
||||
"debug_inputs": [],
|
||||
"frequency_penalty": 0.7,
|
||||
"inputs": [],
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 0,
|
||||
"message_history_window_size": 12,
|
||||
"message_history_window_size": 1,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "a36e78fb-b431-4ae6-afa8-77839587fcf8",
|
||||
"key": "lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "f8a704b7-693b-4480-aa9a-da4a83250059",
|
||||
"key": "file"
|
||||
}
|
||||
],
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a professional translator proficient in {lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {lang}, ensuring that its style resembles that of popular science articles in {lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{file}\n\n<TRANSLATED_TEXT>",
|
||||
"prompt": "Your task is to carefully read a source text and a translation to {begin@lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {begin@lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:VastKeysKick"
|
||||
]
|
||||
},
|
||||
"Generate:VastKeysKick": {
|
||||
"downstream": [
|
||||
"Generate:ShinySquidsSneeze"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"cite": false,
|
||||
"debug_inputs": [],
|
||||
"frequency_penalty": 0.7,
|
||||
"inputs": [],
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 0,
|
||||
"message_history_window_size": 1,
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"parameters": [],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a professional translator proficient in {begin@lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {begin@lang}, ensuring that its style resembles that of popular science articles in {begin@lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{begin@file}\n\n<TRANSLATED_TEXT>",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
@ -106,58 +116,19 @@
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"Generate:SlimyFrogsArgue": {
|
||||
"downstream": [
|
||||
"Generate:ChubbyCougarsRush"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"cite": false,
|
||||
"frequency_penalty": 0.7,
|
||||
"inputs": [],
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"max_tokens": 0,
|
||||
"message_history_window_size": 12,
|
||||
"output_var_name": "output",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "b2f5e7ec-7f77-485f-af15-461d0f1ca913",
|
||||
"key": "target_lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "fbc44092-9f9e-4e85-b5b1-dbd808239d3d",
|
||||
"key": "source_text"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:RichWordsDeny",
|
||||
"id": "c253af54-61d4-40f3-9990-604e2212506f",
|
||||
"key": "translation_1"
|
||||
}
|
||||
],
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read a source text and a translation to {target_lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{source_text}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{translation_1}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {target_lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {target_lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||
"query": [],
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Generate:RichWordsDeny"
|
||||
]
|
||||
},
|
||||
"begin": {
|
||||
"downstream": [
|
||||
"Generate:RichWordsDeny"
|
||||
"Generate:VastKeysKick"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"inputs": [],
|
||||
"output": null,
|
||||
"params": {
|
||||
"debug_inputs": [],
|
||||
"inputs": [],
|
||||
"message_history_window_size": 22,
|
||||
"output": {},
|
||||
"output": null,
|
||||
"output_var_name": "output",
|
||||
"prologue": "",
|
||||
"query": [
|
||||
@ -165,15 +136,13 @@
|
||||
"key": "lang",
|
||||
"name": "Target Language",
|
||||
"optional": false,
|
||||
"type": "line",
|
||||
"value": ""
|
||||
"type": "line"
|
||||
},
|
||||
{
|
||||
"key": "file",
|
||||
"name": "Files",
|
||||
"optional": false,
|
||||
"type": "file",
|
||||
"value": ""
|
||||
"type": "file"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -185,48 +154,36 @@
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-begin-Generate:RichWordsDenyc",
|
||||
"id": "xy-edge__begin-Generate:VastKeysKickc",
|
||||
"markerEnd": "logo",
|
||||
"source": "begin",
|
||||
"sourceHandle": null,
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:RichWordsDeny",
|
||||
"target": "Generate:VastKeysKick",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Generate:RichWordsDenyb-Generate:SlimyFrogsArguec",
|
||||
"id": "xy-edge__Generate:VastKeysKickb-Generate:ShinySquidsSneezec",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:RichWordsDeny",
|
||||
"source": "Generate:VastKeysKick",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:SlimyFrogsArgue",
|
||||
"target": "Generate:ShinySquidsSneeze",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Generate:SlimyFrogsArgueb-Generate:ChubbyCougarsRushc",
|
||||
"id": "xy-edge__Generate:FuzzyEmusWorkb-Answer:TinyGamesGuessc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:SlimyFrogsArgue",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:ChubbyCougarsRush",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-Generate:ChubbyCougarsRushb-Answer:TinyGamesGuessc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:ChubbyCougarsRush",
|
||||
"source": "Generate:FuzzyEmusWork",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
@ -234,7 +191,22 @@
|
||||
},
|
||||
"target": "Answer:TinyGamesGuess",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge"
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Generate:ShinySquidsSneezeb-Generate:FuzzyEmusWorkc",
|
||||
"markerEnd": "logo",
|
||||
"source": "Generate:ShinySquidsSneeze",
|
||||
"sourceHandle": "b",
|
||||
"style": {
|
||||
"stroke": "rgb(202 197 245)",
|
||||
"strokeWidth": 2
|
||||
},
|
||||
"target": "Generate:FuzzyEmusWork",
|
||||
"targetHandle": "c",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
@ -247,15 +219,13 @@
|
||||
"key": "lang",
|
||||
"name": "Target Language",
|
||||
"optional": false,
|
||||
"type": "line",
|
||||
"value": ""
|
||||
"type": "line"
|
||||
},
|
||||
{
|
||||
"key": "file",
|
||||
"name": "Files",
|
||||
"optional": false,
|
||||
"type": "file",
|
||||
"value": ""
|
||||
"type": "file"
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -265,188 +235,24 @@
|
||||
"dragging": false,
|
||||
"height": 128,
|
||||
"id": "begin",
|
||||
"measured": {
|
||||
"height": 128,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -383.5,
|
||||
"y": 143.5
|
||||
"y": 142.62256327439624
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -383.5,
|
||||
"y": 143.5
|
||||
},
|
||||
"selected": false,
|
||||
"selected": true,
|
||||
"sourcePosition": "left",
|
||||
"targetPosition": "right",
|
||||
"type": "beginNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 12,
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "a36e78fb-b431-4ae6-afa8-77839587fcf8",
|
||||
"key": "lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "f8a704b7-693b-4480-aa9a-da4a83250059",
|
||||
"key": "file"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a professional translator proficient in {lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {lang}, ensuring that its style resembles that of popular science articles in {lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{file}\n\n<TRANSLATED_TEXT>",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Translate directly"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 190,
|
||||
"id": "Generate:RichWordsDeny",
|
||||
"position": {
|
||||
"x": -98,
|
||||
"y": 113.359375
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": -98,
|
||||
"y": 113.359375
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 512,
|
||||
"message_history_window_size": 12,
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "b2f5e7ec-7f77-485f-af15-461d0f1ca913",
|
||||
"key": "target_lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "fbc44092-9f9e-4e85-b5b1-dbd808239d3d",
|
||||
"key": "source_text"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:RichWordsDeny",
|
||||
"id": "c253af54-61d4-40f3-9990-604e2212506f",
|
||||
"key": "translation_1"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read a source text and a translation to {target_lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{source_text}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{translation_1}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {target_lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {target_lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Reflect"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 232,
|
||||
"id": "Generate:SlimyFrogsArgue",
|
||||
"position": {
|
||||
"x": 178.5,
|
||||
"y": 91.859375
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 178.5,
|
||||
"y": 91.859375
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 512,
|
||||
"message_history_window_size": 12,
|
||||
"parameter": "Precise",
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "begin@lang",
|
||||
"id": "73f48a67-b78f-4bcd-8326-a83c31073ab9",
|
||||
"key": "target_lang"
|
||||
},
|
||||
{
|
||||
"component_id": "begin@file",
|
||||
"id": "c9142975-25b3-4199-8fce-aa0bc29a31f2",
|
||||
"key": "source_text"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:RichWordsDeny",
|
||||
"id": "6c824b2a-fe3b-4336-95b5-e85f676bef39",
|
||||
"key": "translation_1"
|
||||
},
|
||||
{
|
||||
"component_id": "Generate:SlimyFrogsArgue",
|
||||
"id": "f3bd4569-4852-43fa-b80a-e0dd27dd9e1c",
|
||||
"key": "reflection"
|
||||
}
|
||||
],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read, then edit, a translation to {target_lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS> \\\nas follows:\n\n<SOURCE_TEXT>\n{source_text}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{translation_1}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{reflection}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {target_lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Improve"
|
||||
},
|
||||
"dragging": false,
|
||||
"height": 274,
|
||||
"id": "Generate:ChubbyCougarsRush",
|
||||
"position": {
|
||||
"x": 437,
|
||||
"y": 70.859375
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 437,
|
||||
"y": 70.859375
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode",
|
||||
"width": 200
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {},
|
||||
@ -456,9 +262,13 @@
|
||||
"dragging": false,
|
||||
"height": 44,
|
||||
"id": "Answer:TinyGamesGuess",
|
||||
"measured": {
|
||||
"height": 44,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 688.5,
|
||||
"y": 183.859375
|
||||
"x": 645.5056004454161,
|
||||
"y": 182.98193827439627
|
||||
},
|
||||
"positionAbsolute": {
|
||||
"x": 688.5,
|
||||
@ -482,6 +292,10 @@
|
||||
"dragging": false,
|
||||
"height": 227,
|
||||
"id": "Note:MoodyKnivesCheat",
|
||||
"measured": {
|
||||
"height": 227,
|
||||
"width": 703
|
||||
},
|
||||
"position": {
|
||||
"x": 46.02198421645994,
|
||||
"y": -267.69527832581736
|
||||
@ -504,7 +318,7 @@
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "Many businesses use specialized terms that are not widely used on the internet and that LLMs thus don\u2019t know about, and there are also many terms that can be translated in multiple ways. For example, \u201dopen source\u201d in Spanish can be \u201cC\u00f3digo abierto\u201d or \u201cFuente abierta\u201d; both are fine, but it\u2019d better to pick one and stick with it for a single document.\n\nYou can add those glossary translation into prompt to any of `Translate directly` or 'Reflect'."
|
||||
"text": "Many businesses use specialized terms that are not widely used on the internet and that LLMs thus don’t know about, and there are also many terms that can be translated in multiple ways. For example, ”open source” in Spanish can be “Código abierto” or “Fuente abierta”; both are fine, but it’d better to pick one and stick with it for a single document.\n\nYou can add those glossary translation into prompt to any of `Translate directly` or 'Reflect'."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Tip: Add glossary "
|
||||
@ -513,6 +327,10 @@
|
||||
"dragging": false,
|
||||
"height": 181,
|
||||
"id": "Note:SourCarrotsAct",
|
||||
"measured": {
|
||||
"height": 181,
|
||||
"width": 832
|
||||
},
|
||||
"position": {
|
||||
"x": 65.0676250238289,
|
||||
"y": 397.6323270065299
|
||||
@ -531,6 +349,120 @@
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 832
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Role: You are a professional translator proficient in {begin@lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {begin@lang}, ensuring that its style resembles that of popular science articles in {begin@lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{begin@file}\n\n<TRANSLATED_TEXT>",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Translate directly"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Generate:VastKeysKick",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -132.6338674989604,
|
||||
"y": 153.70663786774483
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read a source text and a translation to {begin@lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {begin@lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Reflect"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Generate:ShinySquidsSneeze",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 121.1675336631696,
|
||||
"y": 152.92865408917177
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cite": false,
|
||||
"frequencyPenaltyEnabled": true,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_tokens": 256,
|
||||
"message_history_window_size": 1,
|
||||
"parameter": "Precise",
|
||||
"parameters": [],
|
||||
"presencePenaltyEnabled": true,
|
||||
"presence_penalty": 0.4,
|
||||
"prompt": "Your task is to carefully read, then edit, a translation to {begin@lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS>\nas follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{Generate:ShinySquidsSneeze}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": true,
|
||||
"topPEnabled": true,
|
||||
"top_p": 0.3
|
||||
},
|
||||
"label": "Generate",
|
||||
"name": "Improve"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Generate:FuzzyEmusWork",
|
||||
"measured": {
|
||||
"height": 106,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 383.1474420163898,
|
||||
"y": 152.0472805236579
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "generateNode"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1
agentic_reasoning/__init__.py
Normal file
1
agentic_reasoning/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
from .deep_research import DeepResearcher as DeepResearcher
|
||||
223
agentic_reasoning/deep_research.py
Normal file
223
agentic_reasoning/deep_research.py
Normal file
@ -0,0 +1,223 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
from functools import partial
|
||||
from agentic_reasoning.prompts import BEGIN_SEARCH_QUERY, BEGIN_SEARCH_RESULT, END_SEARCH_RESULT, MAX_SEARCH_LIMIT, \
|
||||
END_SEARCH_QUERY, REASON_PROMPT, RELEVANT_EXTRACTION_PROMPT
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from rag.nlp import extract_between
|
||||
from rag.prompts import kb_prompt
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DeepResearcher:
|
||||
def __init__(self,
|
||||
chat_mdl: LLMBundle,
|
||||
prompt_config: dict,
|
||||
kb_retrieve: partial = None,
|
||||
kg_retrieve: partial = None
|
||||
):
|
||||
self.chat_mdl = chat_mdl
|
||||
self.prompt_config = prompt_config
|
||||
self._kb_retrieve = kb_retrieve
|
||||
self._kg_retrieve = kg_retrieve
|
||||
|
||||
@staticmethod
|
||||
def _remove_query_tags(text):
|
||||
"""Remove query tags from text"""
|
||||
pattern = re.escape(BEGIN_SEARCH_QUERY) + r"(.*?)" + re.escape(END_SEARCH_QUERY)
|
||||
return re.sub(pattern, "", text)
|
||||
|
||||
@staticmethod
|
||||
def _remove_result_tags(text):
|
||||
"""Remove result tags from text"""
|
||||
pattern = re.escape(BEGIN_SEARCH_RESULT) + r"(.*?)" + re.escape(END_SEARCH_RESULT)
|
||||
return re.sub(pattern, "", text)
|
||||
|
||||
def _generate_reasoning(self, msg_history):
|
||||
"""Generate reasoning steps"""
|
||||
query_think = ""
|
||||
if msg_history[-1]["role"] != "user":
|
||||
msg_history.append({"role": "user", "content": "Continues reasoning with the new information.\n"})
|
||||
else:
|
||||
msg_history[-1]["content"] += "\n\nContinues reasoning with the new information.\n"
|
||||
|
||||
for ans in self.chat_mdl.chat_streamly(REASON_PROMPT, msg_history, {"temperature": 0.7}):
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
query_think = ans
|
||||
yield query_think
|
||||
return query_think
|
||||
|
||||
def _extract_search_queries(self, query_think, question, step_index):
|
||||
"""Extract search queries from thinking"""
|
||||
queries = extract_between(query_think, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
|
||||
if not queries and step_index == 0:
|
||||
# If this is the first step and no queries are found, use the original question as the query
|
||||
queries = [question]
|
||||
return queries
|
||||
|
||||
def _truncate_previous_reasoning(self, all_reasoning_steps):
|
||||
"""Truncate previous reasoning steps to maintain a reasonable length"""
|
||||
truncated_prev_reasoning = ""
|
||||
for i, step in enumerate(all_reasoning_steps):
|
||||
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
|
||||
|
||||
prev_steps = truncated_prev_reasoning.split('\n\n')
|
||||
if len(prev_steps) <= 5:
|
||||
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
||||
else:
|
||||
truncated_prev_reasoning = ''
|
||||
for i, step in enumerate(prev_steps):
|
||||
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
||||
truncated_prev_reasoning += step + '\n\n'
|
||||
else:
|
||||
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
||||
truncated_prev_reasoning += '...\n\n'
|
||||
|
||||
return truncated_prev_reasoning.strip('\n')
|
||||
|
||||
def _retrieve_information(self, search_query):
|
||||
"""Retrieve information from different sources"""
|
||||
# 1. Knowledge base retrieval
|
||||
kbinfos = self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
|
||||
|
||||
# 2. Web retrieval (if Tavily API is configured)
|
||||
if self.prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(self.prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(search_query)
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
|
||||
# 3. Knowledge graph retrieval (if configured)
|
||||
if self.prompt_config.get("use_kg") and self._kg_retrieve:
|
||||
ck = self._kg_retrieve(question=search_query)
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
return kbinfos
|
||||
|
||||
def _update_chunk_info(self, chunk_info, kbinfos):
|
||||
"""Update chunk information for citations"""
|
||||
if not chunk_info["chunks"]:
|
||||
# If this is the first retrieval, use the retrieval results directly
|
||||
for k in chunk_info.keys():
|
||||
chunk_info[k] = kbinfos[k]
|
||||
else:
|
||||
# Merge newly retrieved information, avoiding duplicates
|
||||
cids = [c["chunk_id"] for c in chunk_info["chunks"]]
|
||||
for c in kbinfos["chunks"]:
|
||||
if c["chunk_id"] not in cids:
|
||||
chunk_info["chunks"].append(c)
|
||||
|
||||
dids = [d["doc_id"] for d in chunk_info["doc_aggs"]]
|
||||
for d in kbinfos["doc_aggs"]:
|
||||
if d["doc_id"] not in dids:
|
||||
chunk_info["doc_aggs"].append(d)
|
||||
|
||||
def _extract_relevant_info(self, truncated_prev_reasoning, search_query, kbinfos):
|
||||
"""Extract and summarize relevant information"""
|
||||
summary_think = ""
|
||||
for ans in self.chat_mdl.chat_streamly(
|
||||
RELEVANT_EXTRACTION_PROMPT.format(
|
||||
prev_reasoning=truncated_prev_reasoning,
|
||||
search_query=search_query,
|
||||
document="\n".join(kb_prompt(kbinfos, 4096))
|
||||
),
|
||||
[{"role": "user",
|
||||
"content": f'Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.'}],
|
||||
{"temperature": 0.7}):
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
summary_think = ans
|
||||
yield summary_think
|
||||
|
||||
return summary_think
|
||||
|
||||
def thinking(self, chunk_info: dict, question: str):
|
||||
executed_search_queries = []
|
||||
msg_history = [{"role": "user", "content": f'Question:\"{question}\"\n'}]
|
||||
all_reasoning_steps = []
|
||||
think = "<think>"
|
||||
|
||||
for step_index in range(MAX_SEARCH_LIMIT + 1):
|
||||
# Check if the maximum search limit has been reached
|
||||
if step_index == MAX_SEARCH_LIMIT - 1:
|
||||
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
||||
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_history.append({"role": "assistant", "content": summary_think})
|
||||
break
|
||||
|
||||
# Step 1: Generate reasoning
|
||||
query_think = ""
|
||||
for ans in self._generate_reasoning(msg_history):
|
||||
query_think = ans
|
||||
yield {"answer": think + self._remove_query_tags(query_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
think += self._remove_query_tags(query_think)
|
||||
all_reasoning_steps.append(query_think)
|
||||
|
||||
# Step 2: Extract search queries
|
||||
queries = self._extract_search_queries(query_think, question, step_index)
|
||||
if not queries and step_index > 0:
|
||||
# If not the first step and no queries, end the search process
|
||||
break
|
||||
|
||||
# Process each search query
|
||||
for search_query in queries:
|
||||
logging.info(f"[THINK]Query: {step_index}. {search_query}")
|
||||
msg_history.append({"role": "assistant", "content": search_query})
|
||||
think += f"\n\n> {step_index + 1}. {search_query}\n\n"
|
||||
yield {"answer": think + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
# Check if the query has already been executed
|
||||
if search_query in executed_search_queries:
|
||||
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
||||
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_history.append({"role": "user", "content": summary_think})
|
||||
think += summary_think
|
||||
continue
|
||||
|
||||
executed_search_queries.append(search_query)
|
||||
|
||||
# Step 3: Truncate previous reasoning steps
|
||||
truncated_prev_reasoning = self._truncate_previous_reasoning(all_reasoning_steps)
|
||||
|
||||
# Step 4: Retrieve information
|
||||
kbinfos = self._retrieve_information(search_query)
|
||||
|
||||
# Step 5: Update chunk information
|
||||
self._update_chunk_info(chunk_info, kbinfos)
|
||||
|
||||
# Step 6: Extract relevant information
|
||||
think += "\n\n"
|
||||
summary_think = ""
|
||||
for ans in self._extract_relevant_info(truncated_prev_reasoning, search_query, kbinfos):
|
||||
summary_think = ans
|
||||
yield {"answer": think + self._remove_result_tags(summary_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_history.append(
|
||||
{"role": "user", "content": f"\n\n{BEGIN_SEARCH_RESULT}{summary_think}{END_SEARCH_RESULT}\n\n"})
|
||||
think += self._remove_result_tags(summary_think)
|
||||
logging.info(f"[THINK]Summary: {step_index}. {summary_think}")
|
||||
|
||||
yield think + "</think>"
|
||||
113
agentic_reasoning/prompts.py
Normal file
113
agentic_reasoning/prompts.py
Normal file
@ -0,0 +1,113 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
||||
END_SEARCH_QUERY = "<|end_search_query|>"
|
||||
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
||||
END_SEARCH_RESULT = "<|end_search_result|>"
|
||||
MAX_SEARCH_LIMIT = 6
|
||||
|
||||
REASON_PROMPT = (
|
||||
"You are a reasoning assistant with the ability to perform dataset searches to help "
|
||||
"you answer the user's question accurately. You have special tools:\n\n"
|
||||
f"- To perform a search: write {BEGIN_SEARCH_QUERY} your query here {END_SEARCH_QUERY}.\n"
|
||||
f"Then, the system will search and analyze relevant content, then provide you with helpful information in the format {BEGIN_SEARCH_RESULT} ...search results... {END_SEARCH_RESULT}.\n\n"
|
||||
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
||||
"Once you have all the information you need, continue your reasoning.\n\n"
|
||||
"-- Example 1 --\n" ########################################
|
||||
"Question: \"Are both the directors of Jaws and Casino Royale from the same country?\"\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who is the director of Jaws?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nThe director of Jaws is Steven Spielberg...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information.\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Where is Steven Spielberg from?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nSteven Allan Spielberg is an American filmmaker...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who is the director of Casino Royale?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCasino Royale is a 2006 spy film directed by Martin Campbell...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Where is Martin Campbell from?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nMartin Campbell (born 24 October 1943) is a New Zealand film and television director...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\nIt's enough to answer the question\n"
|
||||
|
||||
"-- Example 2 --\n" #########################################
|
||||
"Question: \"When was the founder of craigslist born?\"\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who was the founder of craigslist?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCraigslist was founded by Craig Newmark...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information.\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY} When was Craig Newmark born?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCraig Newmark was born on December 6, 1952...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\nIt's enough to answer the question\n"
|
||||
"**Remember**:\n"
|
||||
f"- You have a dataset to search, so you just provide a proper search query.\n"
|
||||
f"- Use {BEGIN_SEARCH_QUERY} to request a dataset search and end with {END_SEARCH_QUERY}.\n"
|
||||
"- The language of query MUST be as the same as 'Question' or 'search result'.\n"
|
||||
"- If no helpful information can be found, rewrite the search query to be less and precise keywords.\n"
|
||||
"- When done searching, continue your reasoning.\n\n"
|
||||
'Please answer the following question. You should think step by step to solve it.\n\n'
|
||||
)
|
||||
|
||||
RELEVANT_EXTRACTION_PROMPT = """**Task Instruction:**
|
||||
|
||||
You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
|
||||
|
||||
**Guidelines:**
|
||||
|
||||
1. **Analyze the Searched Web Pages:**
|
||||
- Carefully review the content of each searched web page.
|
||||
- Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
|
||||
|
||||
2. **Extract Relevant Information:**
|
||||
- Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
|
||||
- Ensure that the extracted information is accurate and relevant.
|
||||
|
||||
3. **Output Format:**
|
||||
- **If the web pages provide helpful information for current search query:** Present the information beginning with `**Final Information**` as shown below.
|
||||
- The language of query **MUST BE** as the same as 'Search Query' or 'Web Pages'.\n"
|
||||
**Final Information**
|
||||
|
||||
[Helpful information]
|
||||
|
||||
- **If the web pages do not provide any helpful information for current search query:** Output the following text.
|
||||
|
||||
**Final Information**
|
||||
|
||||
No helpful information found.
|
||||
|
||||
**Inputs:**
|
||||
- **Previous Reasoning Steps:**
|
||||
{prev_reasoning}
|
||||
|
||||
- **Current Search Query:**
|
||||
{search_query}
|
||||
|
||||
- **Searched Web Pages:**
|
||||
{document}
|
||||
|
||||
"""
|
||||
@ -83,7 +83,7 @@ app.errorhandler(Exception)(server_error_response)
|
||||
app.config["SESSION_PERMANENT"] = False
|
||||
app.config["SESSION_TYPE"] = "filesystem"
|
||||
app.config["MAX_CONTENT_LENGTH"] = int(
|
||||
os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024)
|
||||
os.environ.get("MAX_CONTENT_LENGTH", 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
Session(app)
|
||||
@ -107,7 +107,7 @@ def search_pages_path(pages_dir):
|
||||
def register_page(page_path):
|
||||
path = f"{page_path}"
|
||||
|
||||
page_name = page_path.stem.rstrip("_app")
|
||||
page_name = page_path.stem.removesuffix("_app")
|
||||
module_name = ".".join(
|
||||
page_path.parts[page_path.parts.index("api"): -1] + (page_name,)
|
||||
)
|
||||
@ -119,8 +119,9 @@ def register_page(page_path):
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
page_name = getattr(page, "page_name", page_name)
|
||||
sdk_path = "\\sdk\\" if sys.platform.startswith("win") else "/sdk/"
|
||||
url_prefix = (
|
||||
f"/api/{API_VERSION}" if "/sdk/" in path else f"/{API_VERSION}/{page_name}"
|
||||
f"/api/{API_VERSION}" if sdk_path in path else f"/{API_VERSION}/{page_name}"
|
||||
)
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
|
||||
@ -21,11 +21,11 @@ from flask import request, Response
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import FileType, LLMType, ParserType, FileSource
|
||||
from api.db import VALID_FILE_TYPES, VALID_TASK_STATUS, FileType, LLMType, ParserType, FileSource
|
||||
from api.db.db_models import APIToken, Task, File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
from api.db.services.dialog_service import DialogService, chat, keyword_extraction, label_question
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
@ -38,6 +38,8 @@ from api.utils.api_utils import server_error_response, get_data_error_result, ge
|
||||
generate_confirmation_token
|
||||
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.app.tag import label_question
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
@ -343,7 +345,7 @@ def completion():
|
||||
|
||||
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get(conversation_id):
|
||||
def get_conversation(conversation_id):
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
@ -477,7 +479,7 @@ def upload():
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name)
|
||||
queue_tasks(doc, bucket, name, 0)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -546,6 +548,31 @@ def list_chunks():
|
||||
|
||||
return get_json_result(data=res)
|
||||
|
||||
@manager.route('/get_chunk/<chunk_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get_chunk(chunk_id):
|
||||
from rag.nlp import search
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
tenant_id = objs[0].tenant_id
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del chunk[n]
|
||||
|
||||
return get_json_result(data=chunk)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
@ -575,10 +602,23 @@ def list_kb_docs():
|
||||
orderby = req.get("orderby", "create_time")
|
||||
desc = req.get("desc", True)
|
||||
keywords = req.get("keywords", "")
|
||||
|
||||
status = req.get("status", [])
|
||||
if status:
|
||||
invalid_status = {s for s in status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(
|
||||
message=f"Invalid filter status conditions: {', '.join(invalid_status)}"
|
||||
)
|
||||
types = req.get("types", [])
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(
|
||||
message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}"
|
||||
)
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords, status, types)
|
||||
docs = [{"doc_id": doc['id'], "doc_name": doc['name']} for doc in docs]
|
||||
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
@ -613,7 +653,7 @@ def document_rm():
|
||||
tenant_id = objs[0].tenant_id
|
||||
req = request.json
|
||||
try:
|
||||
doc_ids = [DocumentService.get_doc_id_by_doc_name(doc_name) for doc_name in req.get("doc_names", [])]
|
||||
doc_ids = DocumentService.get_doc_ids_by_doc_names(req.get("doc_names", []))
|
||||
for doc_id in req.get("doc_ids", []):
|
||||
if doc_id not in doc_ids:
|
||||
doc_ids.append(doc_id)
|
||||
@ -631,11 +671,16 @@ def document_rm():
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
|
||||
errors = ""
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
doc_dic = {}
|
||||
for doc in docs:
|
||||
doc_dic[doc.id] = doc
|
||||
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
if doc_id not in doc_dic:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
doc = doc_dic[doc_id]
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
@ -816,10 +861,11 @@ def retrieval():
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
question = req.get("question")
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
size = int(req.get("page_size", 30))
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
highlight = bool(req.get("highlight", False))
|
||||
|
||||
try:
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
@ -840,7 +886,7 @@ def retrieval():
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight= highlight,
|
||||
rank_feature=label_question(question, kbs))
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
|
||||
76
api/apps/auth/README.md
Normal file
76
api/apps/auth/README.md
Normal file
@ -0,0 +1,76 @@
|
||||
# Auth
|
||||
|
||||
The Auth module provides implementations of OAuth2 and OpenID Connect (OIDC) authentication for integration with third-party identity providers.
|
||||
|
||||
**Features**
|
||||
|
||||
- Supports both OAuth2 and OIDC authentication protocols
|
||||
- Automatic OIDC configuration discovery (via `/.well-known/openid-configuration`)
|
||||
- JWT token validation
|
||||
- Unified user information handling
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
# OAuth2 configuration
|
||||
oauth_config = {
|
||||
"type": "oauth2",
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"authorization_url": "https://your-oauth-provider.com/oauth/authorize",
|
||||
"token_url": "https://your-oauth-provider.com/oauth/token",
|
||||
"userinfo_url": "https://your-oauth-provider.com/oauth/userinfo",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# OIDC configuration
|
||||
oidc_config = {
|
||||
"type": "oidc",
|
||||
"issuer": "https://your-oauth-provider.com/oidc",
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# Github OAuth configuration
|
||||
github_config = {
|
||||
"type": "github"
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# Get client instance
|
||||
client = get_auth_client(oauth_config)
|
||||
```
|
||||
|
||||
### Authentication Flow
|
||||
|
||||
1. Get authorization URL:
|
||||
```python
|
||||
auth_url = client.get_authorization_url()
|
||||
```
|
||||
|
||||
2. After user authorization, exchange authorization code for token:
|
||||
```python
|
||||
token_response = client.exchange_code_for_token(authorization_code)
|
||||
access_token = token_response["access_token"]
|
||||
```
|
||||
|
||||
3. Fetch user information:
|
||||
```python
|
||||
user_info = client.fetch_user_info(access_token)
|
||||
```
|
||||
|
||||
## User Information Structure
|
||||
|
||||
All authentication methods return user information following this structure:
|
||||
|
||||
```python
|
||||
{
|
||||
"email": "user@example.com",
|
||||
"username": "username",
|
||||
"nickname": "User Name",
|
||||
"avatar_url": "https://example.com/avatar.jpg"
|
||||
}
|
||||
```
|
||||
40
api/apps/auth/__init__.py
Normal file
40
api/apps/auth/__init__.py
Normal file
@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from .oauth import OAuthClient
|
||||
from .oidc import OIDCClient
|
||||
from .github import GithubOAuthClient
|
||||
|
||||
|
||||
CLIENT_TYPES = {
|
||||
"oauth2": OAuthClient,
|
||||
"oidc": OIDCClient,
|
||||
"github": GithubOAuthClient
|
||||
}
|
||||
|
||||
|
||||
def get_auth_client(config)->OAuthClient:
|
||||
channel_type = str(config.get("type", "")).lower()
|
||||
if channel_type == "":
|
||||
if config.get("issuer"):
|
||||
channel_type = "oidc"
|
||||
else:
|
||||
channel_type = "oauth2"
|
||||
client_class = CLIENT_TYPES.get(channel_type)
|
||||
if not client_class:
|
||||
raise ValueError(f"Unsupported type: {channel_type}")
|
||||
|
||||
return client_class(config)
|
||||
63
api/apps/auth/github.py
Normal file
63
api/apps/auth/github.py
Normal file
@ -0,0 +1,63 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
from .oauth import OAuthClient, UserInfo
|
||||
|
||||
|
||||
class GithubOAuthClient(OAuthClient):
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the GithubOAuthClient with the provider's configuration.
|
||||
"""
|
||||
config.update({
|
||||
"authorization_url": "https://github.com/login/oauth/authorize",
|
||||
"token_url": "https://github.com/login/oauth/access_token",
|
||||
"userinfo_url": "https://api.github.com/user",
|
||||
"scope": "user:email"
|
||||
})
|
||||
super().__init__(config)
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, **kwargs):
|
||||
"""
|
||||
Fetch github user info.
|
||||
"""
|
||||
user_info = {}
|
||||
try:
|
||||
headers = {"Authorization": f"Bearer {access_token}"}
|
||||
# user info
|
||||
response = requests.get(self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
user_info.update(response.json())
|
||||
# email info
|
||||
response = requests.get(self.userinfo_url+"/emails", headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
email_info = response.json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email["primary"]), None
|
||||
)["email"]
|
||||
return self.normalize_user_info(user_info)
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch github user info: {e}")
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
email = user_info.get("email")
|
||||
username = user_info.get("login", str(email).split("@")[0])
|
||||
nickname = user_info.get("name", username)
|
||||
avatar_url = user_info.get("avatar_url", "")
|
||||
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
|
||||
110
api/apps/auth/oauth.py
Normal file
110
api/apps/auth/oauth.py
Normal file
@ -0,0 +1,110 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
import urllib.parse
|
||||
|
||||
|
||||
class UserInfo:
|
||||
def __init__(self, email, username, nickname, avatar_url):
|
||||
self.email = email
|
||||
self.username = username
|
||||
self.nickname = nickname
|
||||
self.avatar_url = avatar_url
|
||||
|
||||
def to_dict(self):
|
||||
return {key: value for key, value in self.__dict__.items()}
|
||||
|
||||
|
||||
class OAuthClient:
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the OAuthClient with the provider's configuration.
|
||||
"""
|
||||
self.client_id = config["client_id"]
|
||||
self.client_secret = config["client_secret"]
|
||||
self.authorization_url = config["authorization_url"]
|
||||
self.token_url = config["token_url"]
|
||||
self.userinfo_url = config["userinfo_url"]
|
||||
self.redirect_uri = config["redirect_uri"]
|
||||
self.scope = config.get("scope", None)
|
||||
|
||||
self.http_request_timeout = 7
|
||||
|
||||
|
||||
def get_authorization_url(self, state=None):
|
||||
"""
|
||||
Generate the authorization URL for user login.
|
||||
"""
|
||||
params = {
|
||||
"client_id": self.client_id,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"response_type": "code",
|
||||
}
|
||||
if self.scope:
|
||||
params["scope"] = self.scope
|
||||
if state:
|
||||
params["state"] = state
|
||||
authorization_url = f"{self.authorization_url}?{urllib.parse.urlencode(params)}"
|
||||
return authorization_url
|
||||
|
||||
|
||||
def exchange_code_for_token(self, code):
|
||||
"""
|
||||
Exchange authorization code for access token.
|
||||
"""
|
||||
try:
|
||||
payload = {
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
"code": code,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"grant_type": "authorization_code"
|
||||
}
|
||||
response = requests.post(
|
||||
self.token_url,
|
||||
data=payload,
|
||||
headers={"Accept": "application/json"},
|
||||
timeout=self.http_request_timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to exchange authorization code for token: {e}")
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, **kwargs):
|
||||
"""
|
||||
Fetch user information using access token.
|
||||
"""
|
||||
try:
|
||||
headers = {"Authorization": f"Bearer {access_token}"}
|
||||
response = requests.get(self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
user_info = response.json()
|
||||
return self.normalize_user_info(user_info)
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch user info: {e}")
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
email = user_info.get("email")
|
||||
username = user_info.get("username", str(email).split("@")[0])
|
||||
nickname = user_info.get("nickname", username)
|
||||
avatar_url = user_info.get("avatar_url", None)
|
||||
if avatar_url is None:
|
||||
avatar_url = user_info.get("picture", "")
|
||||
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
|
||||
100
api/apps/auth/oidc.py
Normal file
100
api/apps/auth/oidc.py
Normal file
@ -0,0 +1,100 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
from .oauth import OAuthClient
|
||||
|
||||
|
||||
class OIDCClient(OAuthClient):
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the OIDCClient with the provider's configuration.
|
||||
Use `issuer` as the single source of truth for configuration discovery.
|
||||
"""
|
||||
self.issuer = config.get("issuer")
|
||||
if not self.issuer:
|
||||
raise ValueError("Missing issuer in configuration.")
|
||||
|
||||
oidc_metadata = self._load_oidc_metadata(self.issuer)
|
||||
config.update({
|
||||
'issuer': oidc_metadata['issuer'],
|
||||
'jwks_uri': oidc_metadata['jwks_uri'],
|
||||
'authorization_url': oidc_metadata['authorization_endpoint'],
|
||||
'token_url': oidc_metadata['token_endpoint'],
|
||||
'userinfo_url': oidc_metadata['userinfo_endpoint']
|
||||
})
|
||||
|
||||
super().__init__(config)
|
||||
self.issuer = config['issuer']
|
||||
self.jwks_uri = config['jwks_uri']
|
||||
|
||||
|
||||
def _load_oidc_metadata(self, issuer):
|
||||
"""
|
||||
Load OIDC metadata from `/.well-known/openid-configuration`.
|
||||
"""
|
||||
try:
|
||||
metadata_url = f"{issuer}/.well-known/openid-configuration"
|
||||
response = requests.get(metadata_url, timeout=7)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch OIDC metadata: {e}")
|
||||
|
||||
|
||||
def parse_id_token(self, id_token):
|
||||
"""
|
||||
Parse and validate OIDC ID Token (JWT format) with signature verification.
|
||||
"""
|
||||
try:
|
||||
# Decode JWT header without verifying signature
|
||||
headers = jwt.get_unverified_header(id_token)
|
||||
|
||||
# OIDC usually uses `RS256` for signing
|
||||
alg = headers.get("alg", "RS256")
|
||||
|
||||
# Use PyJWT's PyJWKClient to fetch JWKS and find signing key
|
||||
jwks_url = f"{self.issuer}/.well-known/jwks.json"
|
||||
jwks_cli = jwt.PyJWKClient(jwks_url)
|
||||
signing_key = jwks_cli.get_signing_key_from_jwt(id_token).key
|
||||
|
||||
# Decode and verify signature
|
||||
decoded_token = jwt.decode(
|
||||
id_token,
|
||||
key=signing_key,
|
||||
algorithms=[alg],
|
||||
audience=str(self.client_id),
|
||||
issuer=self.issuer,
|
||||
)
|
||||
return decoded_token
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error parsing ID Token: {e}")
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, id_token=None, **kwargs):
|
||||
"""
|
||||
Fetch user info.
|
||||
"""
|
||||
user_info = {}
|
||||
if id_token:
|
||||
user_info = self.parse_id_token(id_token)
|
||||
user_info.update(super().fetch_user_info(access_token).to_dict())
|
||||
return self.normalize_user_info(user_info)
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
return super().normalize_user_info(user_info)
|
||||
@ -18,13 +18,15 @@ import traceback
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||
from agent.canvas import Canvas
|
||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||
from api.db.db_models import APIToken
|
||||
|
||||
import time
|
||||
|
||||
@manager.route('/templates', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
@ -61,7 +63,6 @@ def save():
|
||||
req["user_id"] = current_user.id
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
if "id" not in req:
|
||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||
@ -75,16 +76,21 @@ def save():
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
UserCanvasService.update_by_id(req["id"], req)
|
||||
# save version
|
||||
UserCanvasVersionService.insert( user_canvas_id=req["id"], dsl=req["dsl"], title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")))
|
||||
UserCanvasVersionService.delete_all_versions(req["id"])
|
||||
return get_json_result(data=req)
|
||||
|
||||
|
||||
|
||||
|
||||
@manager.route('/get/<canvas_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get(canvas_id):
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
e, c = UserCanvasService.get_by_tenant_id(canvas_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
return get_json_result(data=c)
|
||||
|
||||
@manager.route('/getsse/<canvas_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||
def getsse(canvas_id):
|
||||
@ -107,6 +113,7 @@ def getsse(canvas_id):
|
||||
def run():
|
||||
req = request.json
|
||||
stream = req.get("stream", True)
|
||||
running_hint_text = req.get("running_hint_text", "")
|
||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
@ -132,7 +139,7 @@ def run():
|
||||
def sse():
|
||||
nonlocal answer, cvs
|
||||
try:
|
||||
for ans in canvas.run(stream=True):
|
||||
for ans in canvas.run(running_hint_text = running_hint_text, stream=True):
|
||||
if ans.get("running_status"):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {"answer": ans["content"],
|
||||
@ -170,7 +177,7 @@ def run():
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
for answer in canvas.run(stream=False):
|
||||
for answer in canvas.run(running_hint_text = running_hint_text, stream=False):
|
||||
if answer.get("running_status"):
|
||||
continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
@ -283,4 +290,62 @@ def test_db_connect():
|
||||
return get_json_result(data="Database Connection Successful!")
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
#api get list version dsl of canvas
|
||||
@manager.route('/getlistversion/<canvas_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def getlistversion(canvas_id):
|
||||
try:
|
||||
list =sorted([c.to_dict() for c in UserCanvasVersionService.list_by_canvas_id(canvas_id)], key=lambda x: x["update_time"]*-1)
|
||||
return get_json_result(data=list)
|
||||
except Exception as e:
|
||||
return get_data_error_result(message=f"Error getting history files: {e}")
|
||||
#api get version dsl of canvas
|
||||
@manager.route('/getversion/<version_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def getversion( version_id):
|
||||
try:
|
||||
|
||||
e, version = UserCanvasVersionService.get_by_id(version_id)
|
||||
if version:
|
||||
return get_json_result(data=version.to_dict())
|
||||
except Exception as e:
|
||||
return get_json_result(data=f"Error getting history file: {e}")
|
||||
@manager.route('/listteam', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_kbs():
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 150))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
kbs, total = UserCanvasService.get_by_tenant_ids(
|
||||
[m["tenant_id"] for m in tenants], current_user.id, page_number,
|
||||
items_per_page, orderby, desc, keywords)
|
||||
return get_json_result(data={"kbs": kbs, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@manager.route('/setting', methods=['POST']) # noqa: F821
|
||||
@validate_request("id", "title", "permission")
|
||||
@login_required
|
||||
def setting():
|
||||
req = request.json
|
||||
req["user_id"] = current_user.id
|
||||
e,flow = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
flow = flow.to_dict()
|
||||
flow["title"] = req["title"]
|
||||
if req["description"]:
|
||||
flow["description"] = req["description"]
|
||||
if req["permission"]:
|
||||
flow["permission"] = req["permission"]
|
||||
if req["avatar"]:
|
||||
flow["avatar"] = req["avatar"]
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
num= UserCanvasService.update_by_id(req["id"], flow)
|
||||
return get_json_result(data=num)
|
||||
|
||||
@ -19,9 +19,10 @@ import json
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction, label_question
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.prompts import keyword_extraction, cross_languages
|
||||
from rag.settings import PAGERANK_FLD
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
@ -36,6 +37,7 @@ import xxhash
|
||||
import re
|
||||
|
||||
|
||||
|
||||
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
@ -93,12 +95,14 @@ def get():
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
tenant_id = tenants[0].tenant_id
|
||||
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
for tenant in tenants:
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant.tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant.tenant_id), kb_ids)
|
||||
if chunk:
|
||||
break
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
@ -191,6 +195,7 @@ def switch():
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "doc_id")
|
||||
def rm():
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
@ -201,6 +206,9 @@ def rm():
|
||||
deleted_chunk_ids = req["chunk_ids"]
|
||||
chunk_number = len(deleted_chunk_ids)
|
||||
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||
for cid in deleted_chunk_ids:
|
||||
if STORAGE_IMPL.obj_exist(doc.kb_id, cid):
|
||||
STORAGE_IMPL.rm(doc.kb_id, cid)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -272,6 +280,7 @@ def retrieval_test():
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
use_kg = req.get("use_kg", False)
|
||||
top = int(req.get("top_k", 1024))
|
||||
langs = req.get("cross_languages", [])
|
||||
tenant_ids = []
|
||||
|
||||
try:
|
||||
@ -291,6 +300,9 @@ def retrieval_test():
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
|
||||
if langs:
|
||||
question = cross_languages(kb.tenant_id, None, question, langs)
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
|
||||
rerank_mdl = None
|
||||
|
||||
@ -17,29 +17,35 @@ import json
|
||||
import re
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from api.db.db_models import APIToken
|
||||
|
||||
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
import trio
|
||||
from flask import Response, request
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from api import settings
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import DialogService, chat, ask, label_question
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||
from api.db.services.dialog_service import DialogService, ask, chat
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils.api_utils import get_data_error_result, get_json_result, server_error_response, validate_request
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@manager.route("/set", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def set_conversation():
|
||||
req = request.json
|
||||
conv_id = req.get("conversation_id")
|
||||
is_new = req.get("is_new")
|
||||
name = req.get("name", "New conversation")
|
||||
|
||||
if len(name) > 255:
|
||||
name = name[0:255]
|
||||
|
||||
del req["is_new"]
|
||||
if not is_new:
|
||||
del req["conversation_id"]
|
||||
@ -48,8 +54,7 @@ def set_conversation():
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Fail to update a conversation!")
|
||||
return get_data_error_result(message="Fail to update a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
@ -59,38 +64,30 @@ def set_conversation():
|
||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found")
|
||||
conv = {
|
||||
"id": conv_id,
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New conversation"),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
}
|
||||
conv = {"id": conv_id, "dialog_id": req["dialog_id"], "name": name, "message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]}
|
||||
ConversationService.save(**conv)
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@manager.route("/get", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
conv_id = request.args["conversation_id"]
|
||||
try:
|
||||
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
avatar =None
|
||||
avatar = None
|
||||
for tenant in tenants:
|
||||
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
||||
if dialog and len(dialog)>0:
|
||||
if dialog and len(dialog) > 0:
|
||||
avatar = dialog[0].icon
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of conversation authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
return get_json_result(data=False, message="Only owner of conversation authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
@ -98,7 +95,8 @@ def get():
|
||||
for ref in conv.reference:
|
||||
if isinstance(ref, list):
|
||||
continue
|
||||
ref["chunks"] = [{
|
||||
ref["chunks"] = [
|
||||
{
|
||||
"id": get_value(ck, "chunk_id", "id"),
|
||||
"content": get_value(ck, "content", "content_with_weight"),
|
||||
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||
@ -106,18 +104,21 @@ def get():
|
||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(ck, "image_id", "img_id"),
|
||||
"positions": get_value(ck, "positions", "position_int"),
|
||||
} for ck in ref.get("chunks", [])]
|
||||
"doc_type": get_value(ck, "doc_type", "doc_type_kwd"),
|
||||
}
|
||||
for ck in ref.get("chunks", [])
|
||||
]
|
||||
|
||||
conv = conv.to_dict()
|
||||
conv["avatar"]=avatar
|
||||
conv["avatar"] = avatar
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/getsse/<dialog_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||
def getsse(dialog_id):
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
@manager.route("/getsse/<dialog_id>", methods=["GET"]) # type: ignore # noqa: F821
|
||||
def getsse(dialog_id):
|
||||
token = request.headers.get("Authorization").split()
|
||||
if len(token) != 2:
|
||||
return get_data_error_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
@ -129,13 +130,14 @@ def getsse(dialog_id):
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
conv = conv.to_dict()
|
||||
conv["avatar"]= conv["icon"]
|
||||
conv["avatar"] = conv["icon"]
|
||||
del conv["icon"]
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
|
||||
@manager.route("/rm", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def rm():
|
||||
conv_ids = request.json["conversation_ids"]
|
||||
@ -149,28 +151,21 @@ def rm():
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of conversation authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
return get_json_result(data=False, message="Only owner of conversation authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
ConversationService.delete_by_id(cid)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def list_convsersation():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
try:
|
||||
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of dialog authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
convs = ConversationService.query(
|
||||
dialog_id=dialog_id,
|
||||
order_by=ConversationService.model.create_time,
|
||||
reverse=True)
|
||||
return get_json_result(data=False, message="Only owner of dialog authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
convs = ConversationService.query(dialog_id=dialog_id, order_by=ConversationService.model.create_time, reverse=True)
|
||||
|
||||
convs = [d.to_dict() for d in convs]
|
||||
return get_json_result(data=convs)
|
||||
@ -178,7 +173,7 @@ def list_convsersation():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@manager.route("/completion", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
@ -205,13 +200,15 @@ def completion():
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
else:
|
||||
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
|
||||
for ref in conv.reference:
|
||||
if isinstance(ref, list):
|
||||
continue
|
||||
ref["chunks"] = [{
|
||||
ref["chunks"] = [
|
||||
{
|
||||
"id": get_value(ck, "chunk_id", "id"),
|
||||
"content": get_value(ck, "content", "content_with_weight"),
|
||||
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||
@ -219,11 +216,15 @@ def completion():
|
||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(ck, "image_id", "img_id"),
|
||||
"positions": get_value(ck, "positions", "position_int"),
|
||||
} for ck in ref.get("chunks", [])]
|
||||
"doc_type": get_value(ck, "doc_type_kwd", "doc_type_kwd"),
|
||||
}
|
||||
for ck in ref.get("chunks", [])
|
||||
]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
@ -233,9 +234,7 @@ def completion():
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
@ -257,7 +256,7 @@ def completion():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/tts', methods=['POST']) # noqa: F821
|
||||
@manager.route("/tts", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def tts():
|
||||
req = request.json
|
||||
@ -279,9 +278,7 @@ def tts():
|
||||
for chunk in tts_mdl.tts(txt):
|
||||
yield chunk
|
||||
except Exception as e:
|
||||
yield ("data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e)}},
|
||||
ensure_ascii=False)).encode('utf-8')
|
||||
yield ("data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e)}}, ensure_ascii=False)).encode("utf-8")
|
||||
|
||||
resp = Response(stream_audio(), mimetype="audio/mpeg")
|
||||
resp.headers.add_header("Cache-Control", "no-cache")
|
||||
@ -291,7 +288,7 @@ def tts():
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/delete_msg', methods=['POST']) # noqa: F821
|
||||
@manager.route("/delete_msg", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def delete_msg():
|
||||
@ -314,7 +311,7 @@ def delete_msg():
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/thumbup', methods=['POST']) # noqa: F821
|
||||
@manager.route("/thumbup", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def thumbup():
|
||||
@ -322,7 +319,7 @@ def thumbup():
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
up_down = req.get("set")
|
||||
up_down = req.get("thumbup")
|
||||
feedback = req.get("feedback", "")
|
||||
conv = conv.to_dict()
|
||||
for i, msg in enumerate(conv["message"]):
|
||||
@ -341,7 +338,7 @@ def thumbup():
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/ask', methods=['POST']) # noqa: F821
|
||||
@manager.route("/ask", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def ask_about():
|
||||
@ -354,9 +351,7 @@ def ask_about():
|
||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
@ -367,7 +362,7 @@ def ask_about():
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/mindmap', methods=['POST']) # noqa: F821
|
||||
@manager.route("/mindmap", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def mindmap():
|
||||
@ -380,18 +375,16 @@ def mindmap():
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||
question = req["question"]
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||
0.3, 0.3, aggs=False,
|
||||
rank_feature=label_question(question, [kb])
|
||||
)
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, 1, 12, 0.3, 0.3, aggs=False, rank_feature=label_question(question, [kb]))
|
||||
mindmap = MindMapExtractor(chat_mdl)
|
||||
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
||||
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in ranks["chunks"]])
|
||||
mind_map = mind_map.output
|
||||
if "error" in mind_map:
|
||||
return server_error_response(Exception(mind_map["error"]))
|
||||
return get_json_result(data=mind_map)
|
||||
|
||||
|
||||
@manager.route('/related_questions', methods=['POST']) # noqa: F821
|
||||
@manager.route("/related_questions", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question")
|
||||
def related_questions():
|
||||
@ -399,31 +392,49 @@ def related_questions():
|
||||
question = req["question"]
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||
prompt = """
|
||||
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
|
||||
Instructions:
|
||||
- Based on the keywords provided by the user, generate 5-10 related search terms.
|
||||
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
|
||||
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
|
||||
- Keep the term length between 2-4 words, concise and clear.
|
||||
- DO NOT translate, use the language of the original keywords.
|
||||
Role: You are an AI language model assistant tasked with generating 5-10 related questions based on a user’s original query. These questions should help expand the search query scope and improve search relevance.
|
||||
|
||||
### Example:
|
||||
Keywords: Chinese football
|
||||
Related search terms:
|
||||
1. Current status of Chinese football
|
||||
2. Reform of Chinese football
|
||||
3. Youth training of Chinese football
|
||||
4. Chinese football in the Asian Cup
|
||||
5. Chinese football in the World Cup
|
||||
Instructions:
|
||||
Input: You are provided with a user’s question.
|
||||
Output: Generate 5-10 alternative questions that are related to the original user question. These alternatives should help retrieve a broader range of relevant documents from a vector database.
|
||||
Context: Focus on rephrasing the original question in different ways, making sure the alternative questions are diverse but still connected to the topic of the original query. Do not create overly obscure, irrelevant, or unrelated questions.
|
||||
Fallback: If you cannot generate any relevant alternatives, do not return any questions.
|
||||
Guidance:
|
||||
1. Each alternative should be unique but still relevant to the original query.
|
||||
2. Keep the phrasing clear, concise, and easy to understand.
|
||||
3. Avoid overly technical jargon or specialized terms unless directly relevant.
|
||||
4. Ensure that each question contributes towards improving search results by broadening the search angle, not narrowing it.
|
||||
|
||||
Example:
|
||||
Original Question: What are the benefits of electric vehicles?
|
||||
|
||||
Alternative Questions:
|
||||
1. How do electric vehicles impact the environment?
|
||||
2. What are the advantages of owning an electric car?
|
||||
3. What is the cost-effectiveness of electric vehicles?
|
||||
4. How do electric vehicles compare to traditional cars in terms of fuel efficiency?
|
||||
5. What are the environmental benefits of switching to electric cars?
|
||||
6. How do electric vehicles help reduce carbon emissions?
|
||||
7. Why are electric vehicles becoming more popular?
|
||||
8. What are the long-term savings of using electric vehicles?
|
||||
9. How do electric vehicles contribute to sustainability?
|
||||
10. What are the key benefits of electric vehicles for consumers?
|
||||
|
||||
Reason:
|
||||
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
|
||||
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
|
||||
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
||||
|
||||
Rephrasing the original query into multiple alternative questions helps the user explore different aspects of their search topic, improving the quality of search results.
|
||||
These questions guide the search engine to provide a more comprehensive set of relevant documents.
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
||||
ans = chat_mdl.chat(
|
||||
prompt,
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"""
|
||||
Keywords: {question}
|
||||
Related search terms:
|
||||
"""}], {"temperature": 0.9})
|
||||
""",
|
||||
}
|
||||
],
|
||||
{"temperature": 0.9},
|
||||
)
|
||||
return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||
|
||||
@ -18,6 +18,7 @@ from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api import settings
|
||||
@ -42,7 +43,7 @@ def set_dialog():
|
||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||
llm_setting = req.get("llm_setting", {})
|
||||
default_prompt = {
|
||||
default_prompt_with_dataset = {
|
||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||
以下是知识库:
|
||||
{knowledge}
|
||||
@ -53,15 +54,22 @@ def set_dialog():
|
||||
],
|
||||
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
||||
}
|
||||
prompt_config = req.get("prompt_config", default_prompt)
|
||||
default_prompt_no_dataset = {
|
||||
"system": """You are a helpful assistant.""",
|
||||
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
||||
"parameters": [
|
||||
|
||||
],
|
||||
"empty_response": ""
|
||||
}
|
||||
prompt_config = req.get("prompt_config", default_prompt_with_dataset)
|
||||
|
||||
if not prompt_config["system"]:
|
||||
prompt_config["system"] = default_prompt["system"]
|
||||
# if len(prompt_config["parameters"]) < 1:
|
||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
||||
# for p in prompt_config["parameters"]:
|
||||
# if p["key"] == "knowledge":break
|
||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
||||
prompt_config["system"] = default_prompt_with_dataset["system"]
|
||||
|
||||
if not req.get("kb_ids", []):
|
||||
if prompt_config['system'] == default_prompt_with_dataset['system'] or "{knowledge}" in prompt_config['system']:
|
||||
prompt_config = default_prompt_no_dataset
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["optional"]:
|
||||
@ -74,22 +82,19 @@ def set_dialog():
|
||||
e, tenant = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids"))
|
||||
embd_count = len(set([kb.embd_id for kb in kbs]))
|
||||
if embd_count != 1:
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids", []))
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = len(set(embd_ids))
|
||||
if embd_count > 1:
|
||||
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
||||
|
||||
llm_id = req.get("llm_id", tenant.llm_id)
|
||||
if not dialog_id:
|
||||
if not req.get("kb_ids"):
|
||||
return get_data_error_result(
|
||||
message="Fail! Please select knowledgebase!")
|
||||
|
||||
dia = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
"name": name,
|
||||
"kb_ids": req["kb_ids"],
|
||||
"kb_ids": req.get("kb_ids", []),
|
||||
"description": description,
|
||||
"llm_id": llm_id,
|
||||
"llm_setting": llm_setting,
|
||||
|
||||
@ -20,77 +20,73 @@ import re
|
||||
|
||||
import flask
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||
from rag.nlp import search
|
||||
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api import settings
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
from api.db import VALID_FILE_TYPES, VALID_TASK_STATUS, FileSource, FileType, ParserType, TaskStatus
|
||||
from api.db.db_models import File, Task
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import TaskService
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import (
|
||||
server_error_response,
|
||||
get_data_error_result,
|
||||
get_json_result,
|
||||
server_error_response,
|
||||
validate_request,
|
||||
)
|
||||
from api.utils import get_uuid
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from api.utils.file_utils import filename_type, thumbnail, get_project_base_directory
|
||||
from api.utils.file_utils import filename_type, get_project_base_directory, thumbnail
|
||||
from api.utils.web_utils import html2pdf, is_valid_url
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||
from rag.nlp import search
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||
@manager.route("/upload", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def upload():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if "file" not in request.files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
file_objs = request.files.getlist("file")
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if file_obj.filename == "":
|
||||
return get_json_result(data=False, message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
err, files = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
|
||||
if not files:
|
||||
return get_json_result(data=files, message="There seems to be an issue with your file format. Please verify it is correct and not corrupted.", code=settings.RetCode.DATA_ERROR)
|
||||
files = [f[0] for f in files] # remove the blob
|
||||
|
||||
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=False, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=True)
|
||||
return get_json_result(data=files, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=files)
|
||||
|
||||
|
||||
@manager.route('/web_crawl', methods=['POST']) # noqa: F821
|
||||
@manager.route("/web_crawl", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "url")
|
||||
def web_crawl():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
name = request.form.get("name")
|
||||
url = request.form.get("url")
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(
|
||||
data=False, message='The URL format is invalid', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message="The URL format is invalid", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
@ -106,10 +102,7 @@ def web_crawl():
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
try:
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=name + ".pdf",
|
||||
kb_id=kb.id)
|
||||
filename = duplicate_name(DocumentService.query, name=name + ".pdf", kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
@ -128,7 +121,7 @@ def web_crawl():
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
"thumbnail": thumbnail(filename, blob),
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
@ -145,27 +138,25 @@ def web_crawl():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@manager.route("/create", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name", "kb_id")
|
||||
def create():
|
||||
req = request.json
|
||||
kb_id = req["kb_id"]
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
return get_data_error_result(message="Can't find this knowledgebase!")
|
||||
|
||||
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
||||
return get_data_error_result(
|
||||
message="Duplicated document name in the same knowledgebase.")
|
||||
return get_data_error_result(message="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
doc = DocumentService.insert({
|
||||
doc = DocumentService.insert(
|
||||
{
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
@ -174,100 +165,100 @@ def create():
|
||||
"type": FileType.VIRTUAL,
|
||||
"name": req["name"],
|
||||
"location": "",
|
||||
"size": 0
|
||||
})
|
||||
"size": 0,
|
||||
}
|
||||
)
|
||||
return get_json_result(data=doc.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@manager.route("/list", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def list_docs():
|
||||
kb_id = request.args.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kb_id):
|
||||
if KnowledgebaseService.query(tenant_id=tenant.tenant_id, id=kb_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
return get_json_result(data=False, message="Only owner of knowledgebase authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 15))
|
||||
page_number = int(request.args.get("page", 0))
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
|
||||
req = request.get_json()
|
||||
|
||||
run_status = req.get("run_status", [])
|
||||
if run_status:
|
||||
invalid_status = {s for s in run_status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(message=f"Invalid filter run status conditions: {', '.join(invalid_status)}")
|
||||
|
||||
types = req.get("types", [])
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}")
|
||||
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
docs, tol = DocumentService.get_by_kb_id(kb_id, page_number, items_per_page, orderby, desc, keywords, run_status, types)
|
||||
|
||||
for doc_item in docs:
|
||||
if doc_item['thumbnail'] and not doc_item['thumbnail'].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item['thumbnail'] = f"/v1/document/image/{kb_id}-{doc_item['thumbnail']}"
|
||||
if doc_item["thumbnail"] and not doc_item["thumbnail"].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item["thumbnail"] = f"/v1/document/image/{kb_id}-{doc_item['thumbnail']}"
|
||||
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/infos', methods=['POST']) # noqa: F821
|
||||
@manager.route("/infos", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def docinfos():
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/thumbnails', methods=['GET']) # noqa: F821
|
||||
@manager.route("/thumbnails", methods=["GET"]) # noqa: F821
|
||||
# @login_required
|
||||
def thumbnails():
|
||||
doc_ids = request.args.get("doc_ids").split(",")
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "Document ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Lack of "Document ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
docs = DocumentService.get_thumbnails(doc_ids)
|
||||
|
||||
for doc_item in docs:
|
||||
if doc_item['thumbnail'] and not doc_item['thumbnail'].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item['thumbnail'] = f"/v1/document/image/{doc_item['kb_id']}-{doc_item['thumbnail']}"
|
||||
if doc_item["thumbnail"] and not doc_item["thumbnail"].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item["thumbnail"] = f"/v1/document/image/{doc_item['kb_id']}-{doc_item['thumbnail']}"
|
||||
|
||||
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_status', methods=['POST']) # noqa: F821
|
||||
@manager.route("/change_status", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "status")
|
||||
def change_status():
|
||||
req = request.json
|
||||
if str(req["status"]) not in ["0", "1"]:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='"Status" must be either 0 or 1!',
|
||||
code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='"Status" must be either 0 or 1!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
@ -275,23 +266,19 @@ def change_status():
|
||||
return get_data_error_result(message="Document not found!")
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
return get_data_error_result(message="Can't find this knowledgebase!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"status": str(req["status"])}):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document update)!")
|
||||
if not DocumentService.update_by_id(req["doc_id"], {"status": str(req["status"])}):
|
||||
return get_data_error_result(message="Database error (Document update)!")
|
||||
|
||||
status = int(req["status"])
|
||||
settings.docStoreConn.update({"doc_id": req["doc_id"]}, {"available_int": status},
|
||||
search.index_name(kb.tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"doc_id": req["doc_id"]}, {"available_int": status}, search.index_name(kb.tenant_id), doc.kb_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@manager.route("/rm", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def rm():
|
||||
@ -302,16 +289,13 @@ def rm():
|
||||
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
errors = ""
|
||||
kb_table_num_map = {}
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
@ -325,14 +309,25 @@ def rm():
|
||||
|
||||
TaskService.filter_delete([Task.doc_id == doc_id])
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
return get_data_error_result(message="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
deleted_file_count = 0
|
||||
if f2d:
|
||||
deleted_file_count = FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
if deleted_file_count > 0:
|
||||
STORAGE_IMPL.rm(b, n)
|
||||
|
||||
doc_parser = doc.parser_id
|
||||
if doc_parser == ParserType.TABLE:
|
||||
kb_id = doc.kb_id
|
||||
if kb_id not in kb_table_num_map:
|
||||
counts = DocumentService.count_by_kb_id(kb_id=kb_id, keywords="", run_status=[TaskStatus.DONE], types=[])
|
||||
kb_table_num_map[kb_id] = counts
|
||||
kb_table_num_map[kb_id] -= 1
|
||||
if kb_table_num_map[kb_id] <= 0:
|
||||
KnowledgebaseService.delete_field_map(kb_id)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
@ -342,19 +337,16 @@ def rm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/run', methods=['POST']) # noqa: F821
|
||||
@manager.route("/run", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_ids", "run")
|
||||
def run():
|
||||
req = request.json
|
||||
for doc_id in req["doc_ids"]:
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
kb_table_num_map = {}
|
||||
for id in req["doc_ids"]:
|
||||
info = {"run": str(req["run"]), "progress": 0}
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value and req.get("delete", False):
|
||||
@ -377,44 +369,44 @@ def run():
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
|
||||
doc_parser = doc.get("parser_id", ParserType.NAIVE)
|
||||
if doc_parser == ParserType.TABLE:
|
||||
kb_id = doc.get("kb_id")
|
||||
if not kb_id:
|
||||
continue
|
||||
if kb_id not in kb_table_num_map:
|
||||
count = DocumentService.count_by_kb_id(kb_id=kb_id, keywords="", run_status=[TaskStatus.DONE], types=[])
|
||||
kb_table_num_map[kb_id] = count
|
||||
if kb_table_num_map[kb_id] <= 0:
|
||||
KnowledgebaseService.delete_field_map(kb_id)
|
||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name)
|
||||
queue_tasks(doc, bucket, name, 0)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||
@manager.route("/rename", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "name")
|
||||
def rename():
|
||||
req = request.json
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
doc.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message="The extension of file can't be changed",
|
||||
code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(doc.name.lower()).suffix:
|
||||
return get_json_result(data=False, message="The extension of file can't be changed", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
message="Duplicated document name in the same knowledgebase.")
|
||||
return get_data_error_result(message="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document rename)!")
|
||||
if not DocumentService.update_by_id(req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(message="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||
if informs:
|
||||
@ -426,7 +418,7 @@ def rename():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<doc_id>', methods=['GET']) # noqa: F821
|
||||
@manager.route("/get/<doc_id>", methods=["GET"]) # noqa: F821
|
||||
# @login_required
|
||||
def get(doc_id):
|
||||
try:
|
||||
@ -440,29 +432,22 @@ def get(doc_id):
|
||||
ext = re.search(r"\.([^.]+)$", doc.name)
|
||||
if ext:
|
||||
if doc.type == FileType.VISUAL.value:
|
||||
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
||||
response.headers.set("Content-Type", "image/%s" % ext.group(1))
|
||||
else:
|
||||
response.headers.set(
|
||||
'Content-Type',
|
||||
'application/%s' %
|
||||
ext.group(1))
|
||||
response.headers.set("Content-Type", "application/%s" % ext.group(1))
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_parser', methods=['POST']) # noqa: F821
|
||||
@manager.route("/change_parser", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "parser_id")
|
||||
def change_parser():
|
||||
req = request.json
|
||||
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
@ -474,21 +459,16 @@ def change_parser():
|
||||
else:
|
||||
return get_json_result(data=True)
|
||||
|
||||
if ((doc.type == FileType.VISUAL and req["parser_id"] != "picture")
|
||||
or (re.search(
|
||||
r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation")):
|
||||
if (doc.type == FileType.VISUAL and req["parser_id"] != "picture") or (re.search(r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation"):
|
||||
return get_data_error_result(message="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(doc.id,
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value})
|
||||
e = DocumentService.update_by_id(doc.id, {"parser_id": req["parser_id"], "progress": 0, "progress_msg": "", "run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||
doc.process_duation * -1)
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, doc.process_duation * -1)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
@ -502,7 +482,7 @@ def change_parser():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/image/<image_id>', methods=['GET']) # noqa: F821
|
||||
@manager.route("/image/<image_id>", methods=["GET"]) # noqa: F821
|
||||
# @login_required
|
||||
def get_image(image_id):
|
||||
try:
|
||||
@ -511,53 +491,46 @@ def get_image(image_id):
|
||||
return get_data_error_result(message="Image not found.")
|
||||
bkt, nm = image_id.split("-")
|
||||
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
||||
response.headers.set('Content-Type', 'image/JPEG')
|
||||
response.headers.set("Content-Type", "image/JPEG")
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/upload_and_parse', methods=['POST']) # noqa: F821
|
||||
@manager.route("/upload_and_parse", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id")
|
||||
def upload_and_parse():
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if "file" not in request.files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
file_objs = request.files.getlist("file")
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if file_obj.filename == "":
|
||||
return get_json_result(data=False, message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/parse', methods=['POST']) # noqa: F821
|
||||
@manager.route("/parse", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def parse():
|
||||
url = request.json.get("url") if request.json else ""
|
||||
if url:
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(
|
||||
data=False, message='The URL format is invalid', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message="The URL format is invalid", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
download_path = os.path.join(get_project_base_directory(), "logs/downloads")
|
||||
os.makedirs(download_path, exist_ok=True)
|
||||
from seleniumwire.webdriver import Chrome, ChromeOptions
|
||||
|
||||
options = ChromeOptions()
|
||||
options.add_argument('--headless')
|
||||
options.add_argument('--disable-gpu')
|
||||
options.add_argument('--no-sandbox')
|
||||
options.add_argument('--disable-dev-shm-usage')
|
||||
options.add_experimental_option('prefs', {
|
||||
'download.default_directory': download_path,
|
||||
'download.prompt_for_download': False,
|
||||
'download.directory_upgrade': True,
|
||||
'safebrowsing.enabled': True
|
||||
})
|
||||
options.add_argument("--headless")
|
||||
options.add_argument("--disable-gpu")
|
||||
options.add_argument("--no-sandbox")
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_experimental_option("prefs", {"download.default_directory": download_path, "download.prompt_for_download": False, "download.directory_upgrade": True, "safebrowsing.enabled": True})
|
||||
driver = Chrome(options=options)
|
||||
driver.get(url)
|
||||
res_headers = [r.response.headers for r in driver.requests if r and r.response]
|
||||
@ -580,51 +553,41 @@ def parse():
|
||||
|
||||
r = re.search(r"filename=\"([^\"]+)\"", str(res_headers))
|
||||
if not r or not r.group(1):
|
||||
return get_json_result(
|
||||
data=False, message="Can't not identify downloaded file", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message="Can't not identify downloaded file", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
f = File(r.group(1), os.path.join(download_path, r.group(1)))
|
||||
txt = FileService.parse_docs([f], current_user.id)
|
||||
return get_json_result(data=txt)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if "file" not in request.files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
file_objs = request.files.getlist("file")
|
||||
txt = FileService.parse_docs(file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=txt)
|
||||
|
||||
|
||||
@manager.route('/set_meta', methods=['POST']) # noqa: F821
|
||||
@manager.route("/set_meta", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "meta")
|
||||
def set_meta():
|
||||
req = request.json
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
meta = json.loads(req["meta"])
|
||||
except Exception as e:
|
||||
return get_json_result(
|
||||
data=False, message=f'Json syntax error: {e}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message=f"Json syntax error: {e}", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if not isinstance(meta, dict):
|
||||
return get_json_result(
|
||||
data=False, message='Meta data should be in Json map format, like {"key": "value"}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
return get_json_result(data=False, message='Meta data should be in Json map format, like {"key": "value"}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"meta_fields": meta}):
|
||||
return get_data_error_result(
|
||||
message="Database error (meta updates)!")
|
||||
if not DocumentService.update_by_id(req["doc_id"], {"meta_fields": meta}):
|
||||
return get_data_error_result(message="Database error (meta updates)!")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
|
||||
@ -38,8 +38,12 @@ def convert():
|
||||
file2documents = []
|
||||
|
||||
try:
|
||||
files = FileService.get_by_ids(file_ids)
|
||||
files_set = dict({file.id: file for file in files})
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
file = files_set[file_id]
|
||||
if not file:
|
||||
return get_data_error_result(message="File not found!")
|
||||
file_ids_list = [file_id]
|
||||
if file.type == FileType.FOLDER.value:
|
||||
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||
@ -86,6 +90,7 @@ def convert():
|
||||
"file_id": id,
|
||||
"document_id": doc.id,
|
||||
})
|
||||
|
||||
file2documents.append(file2document.to_json())
|
||||
return get_json_result(data=file2documents)
|
||||
except Exception as e:
|
||||
|
||||
@ -55,20 +55,17 @@ def upload():
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
file_res = []
|
||||
try:
|
||||
for file_obj in file_objs:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
e, pf_folder = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this folder!")
|
||||
return get_data_error_result( message="Can't find this folder!")
|
||||
for file_obj in file_objs:
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
||||
return get_data_error_result(
|
||||
message="Exceed the maximum file number of a free user!")
|
||||
return get_data_error_result( message="Exceed the maximum file number of a free user!")
|
||||
|
||||
# split file name path
|
||||
if not file_obj.filename:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
file_obj_names = [file.name, file_obj.filename]
|
||||
file_obj_names = [pf_folder.name, file_obj.filename]
|
||||
else:
|
||||
full_path = '/' + file_obj.filename
|
||||
file_obj_names = full_path.split('/')
|
||||
@ -184,7 +181,7 @@ def list_files():
|
||||
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
|
||||
parent_folder = FileService.get_parent_folder(pf_id)
|
||||
if not FileService.get_parent_folder(pf_id):
|
||||
if not parent_folder:
|
||||
return get_json_result(message="File not found!")
|
||||
|
||||
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
||||
@ -260,6 +257,7 @@ def rm():
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||
else:
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
if not FileService.delete(file):
|
||||
return get_data_error_result(
|
||||
message="Database error (File removal)!")
|
||||
@ -358,9 +356,14 @@ def move():
|
||||
try:
|
||||
file_ids = req["src_file_ids"]
|
||||
parent_id = req["dest_file_id"]
|
||||
files = FileService.get_by_ids(file_ids)
|
||||
files_dict = {}
|
||||
for file in files:
|
||||
files_dict[file.id] = file
|
||||
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
file = files_dict[file_id]
|
||||
if not file:
|
||||
return get_data_error_result(message="File or Folder not found!")
|
||||
if not file.tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from flask import request
|
||||
@ -59,6 +58,7 @@ def create():
|
||||
status=StatusEnum.VALID.value)
|
||||
try:
|
||||
req["id"] = get_uuid()
|
||||
req["name"] = dataset_name
|
||||
req["tenant_id"] = current_user.id
|
||||
req["created_by"] = current_user.id
|
||||
e, t = TenantService.get_by_id(current_user.id)
|
||||
@ -74,7 +74,7 @@ def create():
|
||||
|
||||
@manager.route('/update', methods=['post']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
||||
@validate_request("kb_id", "name", "description", "parser_id")
|
||||
@not_allowed_parameters("id", "tenant_id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||
def update():
|
||||
req = request.json
|
||||
@ -100,7 +100,7 @@ def update():
|
||||
if req.get("parser_id", "") == "tag" and os.environ.get('DOC_ENGINE', "elasticsearch") == "infinity":
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='The chunk method Tag has not been supported by Infinity yet.',
|
||||
message='The chunking method Tag has not been supported by Infinity yet.',
|
||||
code=settings.RetCode.OPERATING_ERROR
|
||||
)
|
||||
|
||||
@ -153,30 +153,44 @@ def detail():
|
||||
if not kb:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
kb["size"] = DocumentService.get_total_size_by_kb_id(kb_id=kb["id"],keywords="", run_status=[], types=[])
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def list_kbs():
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 150))
|
||||
page_number = int(request.args.get("page", 0))
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
parser_id = request.args.get("parser_id")
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
|
||||
req = request.get_json()
|
||||
owner_ids = req.get("owner_ids", [])
|
||||
try:
|
||||
if not owner_ids:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
tenants = [m["tenant_id"] for m in tenants]
|
||||
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||
[m["tenant_id"] for m in tenants], current_user.id, page_number,
|
||||
tenants, current_user.id, page_number,
|
||||
items_per_page, orderby, desc, keywords, parser_id)
|
||||
else:
|
||||
tenants = owner_ids
|
||||
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||
tenants, current_user.id, 0,
|
||||
0, orderby, desc, keywords, parser_id)
|
||||
kbs = [kb for kb in kbs if kb["tenant_id"] in tenants]
|
||||
if page_number and items_per_page:
|
||||
kbs = kbs[(page_number-1)*items_per_page:page_number*items_per_page]
|
||||
total = len(kbs)
|
||||
return get_json_result(data={"kbs": kbs, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['post']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
@ -300,11 +314,12 @@ def knowledge_graph(kb_id):
|
||||
"kb_id": [kb_id],
|
||||
"knowledge_graph_kwd": ["graph"]
|
||||
}
|
||||
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
try:
|
||||
if not settings.docStoreConn.indexExist(search.index_name(kb.tenant_id), kb_id):
|
||||
return get_json_result(data=obj)
|
||||
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [kb_id])
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
if not len(sres.ids):
|
||||
return get_json_result(data=obj)
|
||||
|
||||
for id in sres.ids[:1]:
|
||||
@ -319,5 +334,21 @@ def knowledge_graph(kb_id):
|
||||
if "nodes" in obj["graph"]:
|
||||
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||
if "edges" in obj["graph"]:
|
||||
obj["graph"]["edges"] = sorted(obj["graph"]["edges"], key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
node_id_set = { o["id"] for o in obj["graph"]["nodes"] }
|
||||
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
return get_json_result(data=obj)
|
||||
|
||||
@manager.route('/<kb_id>/knowledge_graph', methods=['DELETE']) # noqa: F821
|
||||
@login_required
|
||||
def delete_knowledge_graph(kb_id):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]}, search.index_name(kb.tenant_id), kb_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
97
api/apps/langfuse_app.py
Normal file
97
api/apps/langfuse_app.py
Normal file
@ -0,0 +1,97 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user, login_required
|
||||
from langfuse import Langfuse
|
||||
|
||||
from api.db.db_models import DB
|
||||
from api.db.services.langfuse_service import TenantLangfuseService
|
||||
from api.utils.api_utils import get_error_data_result, get_json_result, server_error_response, validate_request
|
||||
|
||||
|
||||
@manager.route("/api_key", methods=["POST", "PUT"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("secret_key", "public_key", "host")
|
||||
def set_api_key():
|
||||
req = request.get_json()
|
||||
secret_key = req.get("secret_key", "")
|
||||
public_key = req.get("public_key", "")
|
||||
host = req.get("host", "")
|
||||
if not all([secret_key, public_key, host]):
|
||||
return get_error_data_result(message="Missing required fields")
|
||||
|
||||
langfuse_keys = dict(
|
||||
tenant_id=current_user.id,
|
||||
secret_key=secret_key,
|
||||
public_key=public_key,
|
||||
host=host,
|
||||
)
|
||||
|
||||
langfuse = Langfuse(public_key=langfuse_keys["public_key"], secret_key=langfuse_keys["secret_key"], host=langfuse_keys["host"])
|
||||
if not langfuse.auth_check():
|
||||
return get_error_data_result(message="Invalid Langfuse keys")
|
||||
|
||||
langfuse_entry = TenantLangfuseService.filter_by_tenant(tenant_id=current_user.id)
|
||||
with DB.atomic():
|
||||
try:
|
||||
if not langfuse_entry:
|
||||
TenantLangfuseService.save(**langfuse_keys)
|
||||
else:
|
||||
TenantLangfuseService.update_by_tenant(tenant_id=current_user.id, langfuse_keys=langfuse_keys)
|
||||
return get_json_result(data=langfuse_keys)
|
||||
except Exception as e:
|
||||
server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/api_key", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request()
|
||||
def get_api_key():
|
||||
langfuse_entry = TenantLangfuseService.filter_by_tenant_with_info(tenant_id=current_user.id)
|
||||
if not langfuse_entry:
|
||||
return get_json_result(message="Have not record any Langfuse keys.")
|
||||
|
||||
langfuse = Langfuse(public_key=langfuse_entry["public_key"], secret_key=langfuse_entry["secret_key"], host=langfuse_entry["host"])
|
||||
try:
|
||||
if not langfuse.auth_check():
|
||||
return get_error_data_result(message="Invalid Langfuse keys loaded")
|
||||
except langfuse.api.core.api_error.ApiError as api_err:
|
||||
return get_json_result(message=f"Error from Langfuse: {api_err}")
|
||||
except Exception as e:
|
||||
server_error_response(e)
|
||||
|
||||
langfuse_entry["project_id"] = langfuse.api.projects.get().dict()["data"][0]["id"]
|
||||
langfuse_entry["project_name"] = langfuse.api.projects.get().dict()["data"][0]["name"]
|
||||
|
||||
return get_json_result(data=langfuse_entry)
|
||||
|
||||
|
||||
@manager.route("/api_key", methods=["DELETE"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request()
|
||||
def delete_api_key():
|
||||
langfuse_entry = TenantLangfuseService.filter_by_tenant(tenant_id=current_user.id)
|
||||
if not langfuse_entry:
|
||||
return get_json_result(message="Have not record any Langfuse keys.")
|
||||
|
||||
with DB.atomic():
|
||||
try:
|
||||
TenantLangfuseService.delete_model(langfuse_entry)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
server_error_response(e)
|
||||
@ -61,6 +61,7 @@ def set_api_key():
|
||||
msg = ""
|
||||
for llm in LLMService.query(fid=factory):
|
||||
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
||||
assert factory in EmbeddingModel, f"Embedding model from {factory} is not supported yet."
|
||||
mdl = EmbeddingModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
@ -71,6 +72,7 @@ def set_api_key():
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
||||
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
||||
assert factory in ChatModel, f"Chat model from {factory} is not supported yet."
|
||||
mdl = ChatModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
@ -83,6 +85,7 @@ def set_api_key():
|
||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
||||
assert factory in RerankModel, f"Re-rank model from {factory} is not supported yet."
|
||||
mdl = RerankModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
@ -135,6 +138,8 @@ def set_api_key():
|
||||
def add_llm():
|
||||
req = request.json
|
||||
factory = req["llm_factory"]
|
||||
api_key = req.get("api_key", "x")
|
||||
llm_name = req.get("llm_name")
|
||||
|
||||
def apikey_json(keys):
|
||||
nonlocal req
|
||||
@ -143,7 +148,6 @@ def add_llm():
|
||||
if factory == "VolcEngine":
|
||||
# For VolcEngine, due to its special authentication method
|
||||
# Assemble ark_api_key endpoint_id into api_key
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["ark_api_key", "endpoint_id"])
|
||||
|
||||
elif factory == "Tencent Hunyuan":
|
||||
@ -152,52 +156,43 @@ def add_llm():
|
||||
|
||||
elif factory == "Tencent Cloud":
|
||||
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
||||
return set_api_key()
|
||||
|
||||
elif factory == "Bedrock":
|
||||
# For Bedrock, due to its special authentication method
|
||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
||||
|
||||
elif factory == "LocalAI":
|
||||
llm_name = req["llm_name"] + "___LocalAI"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
llm_name += "___LocalAI"
|
||||
|
||||
elif factory == "HuggingFace":
|
||||
llm_name = req["llm_name"] + "___HuggingFace"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
llm_name += "___HuggingFace"
|
||||
|
||||
elif factory == "OpenAI-API-Compatible":
|
||||
llm_name = req["llm_name"] + "___OpenAI-API"
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
llm_name += "___OpenAI-API"
|
||||
|
||||
elif factory == "VLLM":
|
||||
llm_name += "___VLLM"
|
||||
|
||||
elif factory == "XunFei Spark":
|
||||
llm_name = req["llm_name"]
|
||||
if req["model_type"] == "chat":
|
||||
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
||||
api_key = req.get("spark_api_password", "")
|
||||
elif req["model_type"] == "tts":
|
||||
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
||||
|
||||
elif factory == "BaiduYiyan":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["yiyan_ak", "yiyan_sk"])
|
||||
|
||||
elif factory == "Fish Audio":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["fish_audio_ak", "fish_audio_refid"])
|
||||
|
||||
elif factory == "Google Cloud":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
||||
|
||||
elif factory == "Azure-OpenAI":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["api_key", "api_version"])
|
||||
|
||||
else:
|
||||
llm_name = req["llm_name"]
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
|
||||
llm = {
|
||||
"tenant_id": current_user.id,
|
||||
"llm_factory": factory,
|
||||
@ -209,66 +204,74 @@ def add_llm():
|
||||
}
|
||||
|
||||
msg = ""
|
||||
mdl_nm = llm["llm_name"].split("___")[0]
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
assert factory in EmbeddingModel, f"Embedding model from {factory} is not supported yet."
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access embedding model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
assert factory in ChatModel, f"Chat model from {factory} is not supported yet."
|
||||
mdl = ChatModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
if not tc and m.find("**ERROR**:") >= 0:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.RERANK:
|
||||
assert factory in RerankModel, f"RE-rank model from {factory} is not supported yet."
|
||||
try:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||
if len(arr) == 0:
|
||||
raise Exception("Not known.")
|
||||
except KeyError:
|
||||
msg += f"{factory} dose not support this model({mdl_nm})"
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||
assert factory in CvModel, f"Image to text model from {factory} is not supported yet."
|
||||
mdl = CvModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
with open(os.path.join(get_project_base_directory(), "web/src/assets/yay.jpg"), "rb") as f:
|
||||
m, tc = mdl.describe(f.read())
|
||||
if not tc:
|
||||
if not m and not tc:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.TTS:
|
||||
assert factory in TTSModel, f"TTS model from {factory} is not supported yet."
|
||||
mdl = TTSModel[factory](
|
||||
key=llm["api_key"], model_name=llm["llm_name"], base_url=llm["api_base"]
|
||||
key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
for resp in mdl.tts("Hello~ Ragflower!"):
|
||||
pass
|
||||
except RuntimeError as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||
else:
|
||||
# TODO: check other type of models
|
||||
pass
|
||||
@ -343,8 +346,6 @@ def list_app():
|
||||
|
||||
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||
for o in objs:
|
||||
if not o.api_key:
|
||||
continue
|
||||
if o.llm_name + "@" + o.llm_factory in llm_set:
|
||||
continue
|
||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||
|
||||
12
api/apps/plugin_app.py
Normal file
12
api/apps/plugin_app.py
Normal file
@ -0,0 +1,12 @@
|
||||
from flask import Response
|
||||
from flask_login import login_required
|
||||
from api.utils.api_utils import get_json_result
|
||||
from plugin import GlobalPluginManager
|
||||
|
||||
@manager.route('/llm_tools', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def llm_tools() -> Response:
|
||||
tools = GlobalPluginManager.get_llm_tools()
|
||||
tools_metadata = [t.get_metadata() for t in tools]
|
||||
|
||||
return get_json_result(data=tools_metadata)
|
||||
@ -14,8 +14,14 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import json
|
||||
import time
|
||||
from typing import Any, cast
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.utils.api_utils import get_error_data_result, token_required
|
||||
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_error_result, get_error_data_result, get_json_result, token_required
|
||||
from api.utils.api_utils import get_result
|
||||
from flask import request
|
||||
|
||||
@ -37,3 +43,86 @@ def list_agents(tenant_id):
|
||||
desc = True
|
||||
canvas = UserCanvasService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,title)
|
||||
return get_result(data=canvas)
|
||||
|
||||
|
||||
@manager.route("/agents", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create_agent(tenant_id: str):
|
||||
req: dict[str, Any] = cast(dict[str, Any], request.json)
|
||||
req["user_id"] = tenant_id
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
else:
|
||||
return get_json_result(data=False, message="No DSL data in request.", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if req.get("title") is not None:
|
||||
req["title"] = req["title"].strip()
|
||||
else:
|
||||
return get_json_result(data=False, message="No title in request.", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if UserCanvasService.query(user_id=tenant_id, title=req["title"]):
|
||||
return get_data_error_result(message=f"Agent with title {req['title']} already exists.")
|
||||
|
||||
agent_id = get_uuid()
|
||||
req["id"] = agent_id
|
||||
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(message="Fail to create agent.")
|
||||
|
||||
UserCanvasVersionService.insert(
|
||||
user_canvas_id=agent_id,
|
||||
title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")),
|
||||
dsl=req["dsl"]
|
||||
)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/agents/<agent_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update_agent(tenant_id: str, agent_id: str):
|
||||
req: dict[str, Any] = {k: v for k, v in cast(dict[str, Any], request.json).items() if v is not None}
|
||||
req["user_id"] = tenant_id
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
|
||||
if req.get("title") is not None:
|
||||
req["title"] = req["title"].strip()
|
||||
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_json_result(
|
||||
data=False, message="Only owner of canvas authorized for this operation.",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
UserCanvasService.update_by_id(agent_id, req)
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
UserCanvasVersionService.insert(
|
||||
user_canvas_id=agent_id,
|
||||
title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")),
|
||||
dsl=req["dsl"]
|
||||
)
|
||||
|
||||
UserCanvasVersionService.delete_all_versions(agent_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/agents/<agent_id>", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete_agent(tenant_id: str, agent_id: str):
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_json_result(
|
||||
data=False, message="Only owner of canvas authorized for this operation.",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
UserCanvasService.delete_by_id(agent_id)
|
||||
return get_json_result(data=True)
|
||||
|
||||
@ -13,7 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
|
||||
from flask import request
|
||||
|
||||
from api import settings
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.dialog_service import DialogService
|
||||
@ -21,17 +24,14 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result, token_required
|
||||
from api.utils.api_utils import get_result
|
||||
from api.utils.api_utils import check_duplicate_ids, get_error_data_result, get_result, token_required
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['POST']) # noqa: F821
|
||||
@manager.route("/chats", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
req = request.json
|
||||
ids = req.get("dataset_ids")
|
||||
if not ids:
|
||||
return get_error_data_result(message="`dataset_ids` is required")
|
||||
ids = [i for i in req.get("dataset_ids", []) if i]
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
@ -40,18 +40,21 @@ def create(tenant_id):
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(ids) if ids else []
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) > 1:
|
||||
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
# llm
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
|
||||
if req.get("llm_id") is not None:
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(req["llm_id"])
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory, model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
@ -59,13 +62,8 @@ def create(tenant_id):
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id","top_k"]
|
||||
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
@ -83,9 +81,7 @@ def create(tenant_id):
|
||||
req["rerank_id"] = req.get("rerank_id", "")
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
|
||||
llm_name=req.get("rerank_id"),
|
||||
model_type="rerank"):
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if not req.get("llm_id"):
|
||||
req["llm_id"] = tenant.llm_id
|
||||
@ -104,27 +100,24 @@ def create(tenant_id):
|
||||
{knowledge}
|
||||
The above is the knowledge base.""",
|
||||
"prologue": "Hi! I'm your assistant, what can I do for you?",
|
||||
"parameters": [
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"parameters": [{"key": "knowledge", "optional": False}],
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
|
||||
"quote": True,
|
||||
"tts": False,
|
||||
"refine_multiturn": True
|
||||
"refine_multiturn": True,
|
||||
}
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response", "quote", "tts", "refine_multiturn"]
|
||||
if "prompt_config" not in req:
|
||||
req['prompt_config'] = {}
|
||||
req["prompt_config"] = {}
|
||||
for key in key_list_2:
|
||||
temp = req['prompt_config'].get(key)
|
||||
if (not temp and key == 'system') or (key not in req["prompt_config"]):
|
||||
req['prompt_config'][key] = default_prompt[key]
|
||||
for p in req['prompt_config']["parameters"]:
|
||||
temp = req["prompt_config"].get(key)
|
||||
if (not temp and key == "system") or (key not in req["prompt_config"]):
|
||||
req["prompt_config"][key] = default_prompt[key]
|
||||
for p in req["prompt_config"]["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_error_data_result(
|
||||
message="Parameter '{}' is not used".format(p["key"]))
|
||||
if req["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
|
||||
# save
|
||||
if not DialogService.save(**req):
|
||||
return get_error_data_result(message="Fail to new a chat!")
|
||||
@ -139,10 +132,7 @@ def create(tenant_id):
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
@ -154,19 +144,16 @@ def create(tenant_id):
|
||||
return get_result(data=res)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>', methods=['PUT']) # noqa: F821
|
||||
@manager.route("/chats/<chat_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id, chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message='You do not own the chat')
|
||||
return get_error_data_result(message="You do not own the chat")
|
||||
req = request.json
|
||||
ids = req.get("dataset_ids")
|
||||
if "show_quotation" in req:
|
||||
req["do_refer"] = req.pop("show_quotation")
|
||||
if "dataset_ids" in req:
|
||||
if not ids:
|
||||
return get_error_data_result("`dataset_ids` can't be empty")
|
||||
if ids:
|
||||
if ids is not None:
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
@ -175,12 +162,12 @@ def update(tenant_id, chat_id):
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
@ -194,13 +181,8 @@ def update(tenant_id, chat_id):
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id","top_k"]
|
||||
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
@ -213,17 +195,13 @@ def update(tenant_id, chat_id):
|
||||
res = res.to_json()
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
|
||||
llm_name=req.get("rerank_id"),
|
||||
model_type="rerank"):
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if "name" in req:
|
||||
if not req.get("name"):
|
||||
return get_error_data_result(message="`name` is not empty.")
|
||||
if req["name"].lower() != res["name"].lower() \
|
||||
and len(
|
||||
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||
return get_error_data_result(message="Duplicated chat name in updating dataset.")
|
||||
return get_error_data_result(message="`name` cannot be empty.")
|
||||
if req["name"].lower() != res["name"].lower() and len(DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||
return get_error_data_result(message="Duplicated chat name in updating chat.")
|
||||
if "prompt_config" in req:
|
||||
res["prompt_config"].update(req["prompt_config"])
|
||||
for p in res["prompt_config"]["parameters"]:
|
||||
@ -245,9 +223,11 @@ def update(tenant_id, chat_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['DELETE']) # noqa: F821
|
||||
@manager.route("/chats", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
errors = []
|
||||
success_count = 0
|
||||
req = request.json
|
||||
if not req:
|
||||
ids = None
|
||||
@ -260,15 +240,33 @@ def delete(tenant_id):
|
||||
id_list.append(dia.id)
|
||||
else:
|
||||
id_list = ids
|
||||
for id in id_list:
|
||||
|
||||
unique_id_list, duplicate_messages = check_duplicate_ids(id_list, "assistant")
|
||||
|
||||
for id in unique_id_list:
|
||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message=f"You don't own the chat {id}")
|
||||
errors.append(f"Assistant({id}) not found.")
|
||||
continue
|
||||
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||
DialogService.update_by_id(id, temp_dict)
|
||||
success_count += 1
|
||||
|
||||
if errors:
|
||||
if success_count > 0:
|
||||
return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} chats with {len(errors)} errors")
|
||||
else:
|
||||
return get_error_data_result(message="; ".join(errors))
|
||||
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially deleted {success_count} chats with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['GET']) # noqa: F821
|
||||
@manager.route("/chats", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_chat(tenant_id):
|
||||
id = request.args.get("id")
|
||||
@ -288,13 +286,15 @@ def list_chat(tenant_id):
|
||||
if not chats:
|
||||
return get_result(data=[])
|
||||
list_assts = []
|
||||
key_mapping = {"parameters": "variables",
|
||||
key_mapping = {
|
||||
"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight",
|
||||
"do_refer": "show_quotation"}
|
||||
"do_refer": "show_quotation",
|
||||
}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
for res in chats:
|
||||
renamed_dict = {}
|
||||
@ -303,10 +303,7 @@ def list_chat(tenant_id):
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
@ -316,7 +313,8 @@ def list_chat(tenant_id):
|
||||
for kb_id in res["kb_ids"]:
|
||||
kb = KnowledgebaseService.query(id=kb_id)
|
||||
if not kb:
|
||||
return get_error_data_result(message=f"Don't exist the kb {kb_id}")
|
||||
logging.warning(f"The kb {kb_id} does not exist.")
|
||||
continue
|
||||
kb_list.append(kb[0].to_json())
|
||||
del res["kb_ids"]
|
||||
res["datasets"] = kb_list
|
||||
|
||||
@ -14,23 +14,39 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import logging
|
||||
|
||||
from flask import request
|
||||
from api.db import StatusEnum, FileSource
|
||||
from peewee import OperationalError
|
||||
|
||||
from api.db import FileSource, StatusEnum
|
||||
from api.db.db_models import File
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api import settings
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import (
|
||||
get_result,
|
||||
token_required,
|
||||
deep_merge,
|
||||
get_error_argument_result,
|
||||
get_error_data_result,
|
||||
valid,
|
||||
get_error_operating_result,
|
||||
get_error_permission_result,
|
||||
get_parser_config,
|
||||
get_result,
|
||||
remap_dictionary_keys,
|
||||
token_required,
|
||||
verify_embedding_availability,
|
||||
)
|
||||
from api.utils.validation_utils import (
|
||||
CreateDatasetReq,
|
||||
DeleteDatasetReq,
|
||||
ListDatasetReq,
|
||||
UpdateDatasetReq,
|
||||
validate_and_parse_json_request,
|
||||
validate_and_parse_request_args,
|
||||
)
|
||||
|
||||
|
||||
@ -62,20 +78,28 @@ def create(tenant_id):
|
||||
name:
|
||||
type: string
|
||||
description: Name of the dataset.
|
||||
avatar:
|
||||
type: string
|
||||
description: Base64 encoding of the avatar.
|
||||
description:
|
||||
type: string
|
||||
description: Description of the dataset.
|
||||
embedding_model:
|
||||
type: string
|
||||
description: Embedding model Name.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Dataset permission.
|
||||
language:
|
||||
type: string
|
||||
enum: ['Chinese', 'English']
|
||||
description: Language of the dataset.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "knowledge_graph", "email", "tag"
|
||||
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||
"picture", "presentation", "qa", "table", "tag"
|
||||
]
|
||||
description: Chunking method.
|
||||
pagerank:
|
||||
type: integer
|
||||
description: Set page rank.
|
||||
parser_config:
|
||||
type: object
|
||||
description: Parser configuration.
|
||||
@ -88,108 +112,59 @@ def create(tenant_id):
|
||||
data:
|
||||
type: object
|
||||
"""
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
"qa",
|
||||
"table",
|
||||
"paper",
|
||||
"book",
|
||||
"laws",
|
||||
"presentation",
|
||||
"picture",
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
language,
|
||||
valid_language,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
if check_validation:
|
||||
return check_validation
|
||||
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||
if "tenant_id" in req:
|
||||
return get_error_data_result(message="`tenant_id` must not be provided")
|
||||
if "chunk_count" in req or "document_count" in req:
|
||||
return get_error_data_result(
|
||||
message="`chunk_count` or `document_count` must not be provided"
|
||||
)
|
||||
if "name" not in req:
|
||||
return get_error_data_result(message="`name` is not empty!")
|
||||
# Field name transformations during model dump:
|
||||
# | Original | Dump Output |
|
||||
# |----------------|-------------|
|
||||
# | embedding_model| embd_id |
|
||||
# | chunk_method | parser_id |
|
||||
req, err = validate_and_parse_json_request(request, CreateDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
try:
|
||||
if KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_operating_result(message=f"Dataset name '{req['name']}' already exists")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
req["parser_config"] = get_parser_config(req["parser_id"], req["parser_config"])
|
||||
req["id"] = get_uuid()
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"] == "":
|
||||
return get_error_data_result(message="`name` is not empty string!")
|
||||
if KnowledgebaseService.query(
|
||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||
):
|
||||
return get_error_data_result(
|
||||
message="Duplicated dataset name in creating dataset."
|
||||
)
|
||||
req["tenant_id"] = req["created_by"] = tenant_id
|
||||
if not req.get("embedding_model"):
|
||||
req["embedding_model"] = t.embd_id
|
||||
req["tenant_id"] = tenant_id
|
||||
req["created_by"] = tenant_id
|
||||
|
||||
try:
|
||||
ok, t = TenantService.get_by_id(tenant_id)
|
||||
if not ok:
|
||||
return get_error_permission_result(message="Tenant not found")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
if not req.get("embd_id"):
|
||||
req["embd_id"] = t.embd_id
|
||||
else:
|
||||
valid_embedding_models = [
|
||||
"BAAI/bge-large-zh-v1.5",
|
||||
"BAAI/bge-base-en-v1.5",
|
||||
"BAAI/bge-large-en-v1.5",
|
||||
"BAAI/bge-small-en-v1.5",
|
||||
"BAAI/bge-small-zh-v1.5",
|
||||
"jinaai/jina-embeddings-v2-base-en",
|
||||
"jinaai/jina-embeddings-v2-small-en",
|
||||
"nomic-ai/nomic-embed-text-v1.5",
|
||||
"sentence-transformers/all-MiniLM-L6-v2",
|
||||
"text-embedding-v2",
|
||||
"text-embedding-v3",
|
||||
"maidalun1020/bce-embedding-base_v1",
|
||||
]
|
||||
embd_model = LLMService.query(
|
||||
llm_name=req["embedding_model"], model_type="embedding"
|
||||
)
|
||||
if embd_model:
|
||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
if not embd_model:
|
||||
embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
|
||||
if not embd_model:
|
||||
return get_error_data_result(
|
||||
f"`embedding_model` {req.get('embedding_model')} doesn't exist"
|
||||
)
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
mapped_keys = {
|
||||
new_key: req[old_key]
|
||||
for new_key, old_key in key_mapping.items()
|
||||
if old_key in req
|
||||
}
|
||||
req.update(mapped_keys)
|
||||
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||
if not ok:
|
||||
return err
|
||||
|
||||
try:
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||
renamed_data = {}
|
||||
e, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
for key, value in k.to_dict().items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
return get_result(data=renamed_data)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
try:
|
||||
ok, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
response_data = remap_dictionary_keys(k.to_dict())
|
||||
return get_result(data=response_data)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
|
||||
@ -214,39 +189,60 @@ def delete(tenant_id):
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- ids
|
||||
properties:
|
||||
ids:
|
||||
type: array
|
||||
type: array or null
|
||||
items:
|
||||
type: string
|
||||
description: List of dataset IDs to delete.
|
||||
description: |
|
||||
Specifies the datasets to delete:
|
||||
- If `null`, all datasets will be deleted.
|
||||
- If an array of IDs, only the specified datasets will be deleted.
|
||||
- If an empty array, no datasets will be deleted.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
req = request.json
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids = req.get("ids")
|
||||
if not ids:
|
||||
id_list = []
|
||||
req, err = validate_and_parse_json_request(request, DeleteDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
kb_id_instance_pairs = []
|
||||
if req["ids"] is None:
|
||||
try:
|
||||
kbs = KnowledgebaseService.query(tenant_id=tenant_id)
|
||||
for kb in kbs:
|
||||
id_list.append(kb.id)
|
||||
kb_id_instance_pairs.append((kb.id, kb))
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
else:
|
||||
id_list = ids
|
||||
for id in id_list:
|
||||
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(message=f"You don't own the dataset {id}")
|
||||
for doc in DocumentService.query(kb_id=id):
|
||||
error_kb_ids = []
|
||||
for kb_id in req["ids"]:
|
||||
try:
|
||||
kb = KnowledgebaseService.get_or_none(id=kb_id, tenant_id=tenant_id)
|
||||
if kb is None:
|
||||
error_kb_ids.append(kb_id)
|
||||
continue
|
||||
kb_id_instance_pairs.append((kb_id, kb))
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
if len(error_kb_ids) > 0:
|
||||
return get_error_permission_result(message=f"""User '{tenant_id}' lacks permission for datasets: '{", ".join(error_kb_ids)}'""")
|
||||
|
||||
errors = []
|
||||
success_count = 0
|
||||
for kb_id, kb in kb_id_instance_pairs:
|
||||
try:
|
||||
for doc in DocumentService.query(kb_id=kb_id):
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_error_data_result(
|
||||
message="Remove document error.(Database error)"
|
||||
)
|
||||
errors.append(f"Remove document '{doc.id}' error for dataset '{kb_id}'")
|
||||
continue
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[
|
||||
@ -255,11 +251,23 @@ def delete(tenant_id):
|
||||
]
|
||||
)
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
if not KnowledgebaseService.delete_by_id(id):
|
||||
return get_error_data_result(message="Delete dataset error.(Database error)")
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kb.name])
|
||||
if not KnowledgebaseService.delete_by_id(kb_id):
|
||||
errors.append(f"Delete dataset error for {kb_id}")
|
||||
continue
|
||||
success_count += 1
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
if not errors:
|
||||
return get_result()
|
||||
|
||||
error_message = f"Successfully deleted {success_count} datasets, {len(errors)} failed. Details: {'; '.join(errors)[:128]}..."
|
||||
if success_count == 0:
|
||||
return get_error_data_result(message=error_message)
|
||||
|
||||
return get_result(data={"success_count": success_count, "errors": errors[:5]}, message=error_message)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||
@ -293,20 +301,28 @@ def update(tenant_id, dataset_id):
|
||||
name:
|
||||
type: string
|
||||
description: New name of the dataset.
|
||||
avatar:
|
||||
type: string
|
||||
description: Updated base64 encoding of the avatar.
|
||||
description:
|
||||
type: string
|
||||
description: Updated description of the dataset.
|
||||
embedding_model:
|
||||
type: string
|
||||
description: Updated embedding model Name.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Updated permission.
|
||||
language:
|
||||
type: string
|
||||
enum: ['Chinese', 'English']
|
||||
description: Updated language.
|
||||
description: Updated dataset permission.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "knowledge_graph", "email", "tag"
|
||||
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||
"picture", "presentation", "qa", "table", "tag"
|
||||
]
|
||||
description: Updated chunking method.
|
||||
pagerank:
|
||||
type: integer
|
||||
description: Updated page rank.
|
||||
parser_config:
|
||||
type: object
|
||||
description: Updated parser configuration.
|
||||
@ -316,126 +332,65 @@ def update(tenant_id, dataset_id):
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||
return get_error_data_result(message="You don't own the dataset")
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id"}
|
||||
if any(key in req for key in invalid_keys):
|
||||
return get_error_data_result(message="The input parameters are invalid.")
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
"qa",
|
||||
"table",
|
||||
"paper",
|
||||
"book",
|
||||
"laws",
|
||||
"presentation",
|
||||
"picture",
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
language,
|
||||
valid_language,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
if check_validation:
|
||||
return check_validation
|
||||
if "tenant_id" in req:
|
||||
if req["tenant_id"] != tenant_id:
|
||||
return get_error_data_result(message="Can't change `tenant_id`.")
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if "parser_config" in req:
|
||||
temp_dict = kb.parser_config
|
||||
temp_dict.update(req["parser_config"])
|
||||
req["parser_config"] = temp_dict
|
||||
if "chunk_count" in req:
|
||||
if req["chunk_count"] != kb.chunk_num:
|
||||
return get_error_data_result(message="Can't change `chunk_count`.")
|
||||
req.pop("chunk_count")
|
||||
if "document_count" in req:
|
||||
if req["document_count"] != kb.doc_num:
|
||||
return get_error_data_result(message="Can't change `document_count`.")
|
||||
req.pop("document_count")
|
||||
if "chunk_method" in req:
|
||||
if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
|
||||
return get_error_data_result(
|
||||
message="If `chunk_count` is not 0, `chunk_method` is not changeable."
|
||||
)
|
||||
req["parser_id"] = req.pop("chunk_method")
|
||||
if req["parser_id"] != kb.parser_id:
|
||||
if not req.get("parser_config"):
|
||||
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
||||
if "embedding_model" in req:
|
||||
if kb.chunk_num != 0 and req["embedding_model"] != kb.embd_id:
|
||||
return get_error_data_result(
|
||||
message="If `chunk_count` is not 0, `embedding_model` is not changeable."
|
||||
)
|
||||
if not req.get("embedding_model"):
|
||||
return get_error_data_result("`embedding_model` can't be empty")
|
||||
valid_embedding_models = [
|
||||
"BAAI/bge-large-zh-v1.5",
|
||||
"BAAI/bge-base-en-v1.5",
|
||||
"BAAI/bge-large-en-v1.5",
|
||||
"BAAI/bge-small-en-v1.5",
|
||||
"BAAI/bge-small-zh-v1.5",
|
||||
"jinaai/jina-embeddings-v2-base-en",
|
||||
"jinaai/jina-embeddings-v2-small-en",
|
||||
"nomic-ai/nomic-embed-text-v1.5",
|
||||
"sentence-transformers/all-MiniLM-L6-v2",
|
||||
"text-embedding-v2",
|
||||
"text-embedding-v3",
|
||||
"maidalun1020/bce-embedding-base_v1",
|
||||
]
|
||||
embd_model = LLMService.query(
|
||||
llm_name=req["embedding_model"], model_type="embedding"
|
||||
)
|
||||
if embd_model:
|
||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
|
||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
||||
if not embd_model:
|
||||
embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
|
||||
# Field name transformations during model dump:
|
||||
# | Original | Dump Output |
|
||||
# |----------------|-------------|
|
||||
# | embedding_model| embd_id |
|
||||
# | chunk_method | parser_id |
|
||||
extras = {"dataset_id": dataset_id}
|
||||
req, err = validate_and_parse_json_request(request, UpdateDatasetReq, extras=extras, exclude_unset=True)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
if not embd_model:
|
||||
return get_error_data_result(
|
||||
f"`embedding_model` {req.get('embedding_model')} doesn't exist"
|
||||
)
|
||||
req["embd_id"] = req.pop("embedding_model")
|
||||
if "name" in req:
|
||||
req["name"] = req["name"].strip()
|
||||
if (
|
||||
req["name"].lower() != kb.name.lower()
|
||||
and len(
|
||||
KnowledgebaseService.query(
|
||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||
)
|
||||
)
|
||||
> 0
|
||||
):
|
||||
return get_error_data_result(
|
||||
message="Duplicated dataset name in updating dataset."
|
||||
)
|
||||
if not req:
|
||||
return get_error_argument_result(message="No properties were modified")
|
||||
|
||||
try:
|
||||
kb = KnowledgebaseService.get_or_none(id=dataset_id, tenant_id=tenant_id)
|
||||
if kb is None:
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{dataset_id}'")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
if req.get("parser_config"):
|
||||
req["parser_config"] = deep_merge(kb.parser_config, req["parser_config"])
|
||||
|
||||
if (chunk_method := req.get("parser_id")) and chunk_method != kb.parser_id:
|
||||
if not req.get("parser_config"):
|
||||
req["parser_config"] = get_parser_config(chunk_method, None)
|
||||
elif "parser_config" in req and not req["parser_config"]:
|
||||
del req["parser_config"]
|
||||
|
||||
if "name" in req and req["name"].lower() != kb.name.lower():
|
||||
try:
|
||||
exists = KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
if exists:
|
||||
return get_error_data_result(message=f"Dataset name '{req['name']}' already exists")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
if "embd_id" in req:
|
||||
if kb.chunk_num != 0 and req["embd_id"] != kb.embd_id:
|
||||
return get_error_data_result(message=f"When chunk_num ({kb.chunk_num}) > 0, embedding_model must remain {kb.embd_id}")
|
||||
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||
if not ok:
|
||||
return err
|
||||
|
||||
try:
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_error_data_result(message="Update dataset error.(Database error)")
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list(tenant_id):
|
||||
def list_datasets(tenant_id):
|
||||
"""
|
||||
List datasets.
|
||||
---
|
||||
@ -464,7 +419,7 @@ def list(tenant_id):
|
||||
name: page_size
|
||||
type: integer
|
||||
required: false
|
||||
default: 1024
|
||||
default: 30
|
||||
description: Number of items per page.
|
||||
- in: query
|
||||
name: orderby
|
||||
@ -491,45 +446,46 @@ def list(tenant_id):
|
||||
items:
|
||||
type: object
|
||||
"""
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
if id:
|
||||
kbs = KnowledgebaseService.get_kb_by_id(id,tenant_id)
|
||||
args, err = validate_and_parse_request_args(request, ListDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
kb_id = request.args.get("id")
|
||||
name = args.get("name")
|
||||
if kb_id:
|
||||
try:
|
||||
kbs = KnowledgebaseService.get_kb_by_id(kb_id, tenant_id)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {id}")
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{kb_id}'")
|
||||
if name:
|
||||
kbs = KnowledgebaseService.get_kb_by_name(name,tenant_id)
|
||||
try:
|
||||
kbs = KnowledgebaseService.get_kb_by_name(name, tenant_id)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {name}")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{name}'")
|
||||
|
||||
try:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||
kbs = KnowledgebaseService.get_list(
|
||||
[m["tenant_id"] for m in tenants],
|
||||
tenant_id,
|
||||
page_number,
|
||||
items_per_page,
|
||||
orderby,
|
||||
desc,
|
||||
id,
|
||||
args["page"],
|
||||
args["page_size"],
|
||||
args["orderby"],
|
||||
args["desc"],
|
||||
kb_id,
|
||||
name,
|
||||
)
|
||||
renamed_list = []
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
response_data_list = []
|
||||
for kb in kbs:
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
renamed_data = {}
|
||||
for key, value in kb.items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
renamed_list.append(renamed_data)
|
||||
return get_result(data=renamed_list)
|
||||
response_data_list.append(remap_dictionary_keys(kb))
|
||||
return get_result(data=response_data_list)
|
||||
|
||||
@ -16,11 +16,11 @@
|
||||
from flask import request, jsonify
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import label_question
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
||||
|
||||
@ -16,7 +16,6 @@
|
||||
import pathlib
|
||||
import datetime
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction, label_question
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import rag_tokenizer
|
||||
from api.db import LLMType, ParserType
|
||||
@ -37,8 +36,10 @@ from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import construct_json_result, get_parser_config
|
||||
from api.utils.api_utils import construct_json_result, get_parser_config, check_duplicate_ids
|
||||
from rag.nlp import search
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.app.tag import label_question
|
||||
from rag.utils import rmSpace
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
@ -66,6 +67,7 @@ class Chunk(BaseModel):
|
||||
raise ValueError("Each sublist in positions must have a length of 5")
|
||||
return value
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def upload(dataset_id, tenant_id):
|
||||
@ -135,6 +137,10 @@ def upload(dataset_id, tenant_id):
|
||||
return get_result(
|
||||
message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR
|
||||
)
|
||||
if len(file_obj.filename.encode("utf-8")) >= 128:
|
||||
return get_result(
|
||||
message="File name should be less than 128 bytes.", code=settings.RetCode.ARGUMENT_ERROR
|
||||
)
|
||||
'''
|
||||
# total size
|
||||
total_size = 0
|
||||
@ -216,6 +222,9 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
chunk_method:
|
||||
type: string
|
||||
description: Chunking method.
|
||||
enabled:
|
||||
type: boolean
|
||||
description: Document status.
|
||||
responses:
|
||||
200:
|
||||
description: Document updated successfully.
|
||||
@ -225,6 +234,10 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
req = request.json
|
||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||
return get_error_data_result(message="You don't own the dataset.")
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not e:
|
||||
return get_error_data_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
doc = DocumentService.query(kb_id=dataset_id, id=document_id)
|
||||
if not doc:
|
||||
return get_error_data_result(message="The dataset doesn't own the document.")
|
||||
@ -239,7 +252,17 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
if req["progress"] != doc.progress:
|
||||
return get_error_data_result(message="Can't change `progress`.")
|
||||
|
||||
if "meta_fields" in req:
|
||||
if not isinstance(req["meta_fields"], dict):
|
||||
return get_error_data_result(message="meta_fields must be a dictionary")
|
||||
DocumentService.update_meta_fields(document_id, req["meta_fields"])
|
||||
|
||||
if "name" in req and req["name"] != doc.name:
|
||||
if len(req["name"].encode("utf-8")) >= 128:
|
||||
return get_result(
|
||||
message="The name should be less than 128 bytes.",
|
||||
code=settings.RetCode.ARGUMENT_ERROR,
|
||||
)
|
||||
if (
|
||||
pathlib.Path(req["name"].lower()).suffix
|
||||
!= pathlib.Path(doc.name.lower()).suffix
|
||||
@ -260,6 +283,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if "chunk_method" in req:
|
||||
@ -315,9 +339,25 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
return get_error_data_result(message="Document not found!")
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
|
||||
|
||||
if "enabled" in req:
|
||||
status = int(req["enabled"])
|
||||
if doc.status != req["enabled"]:
|
||||
try:
|
||||
if not DocumentService.update_by_id(
|
||||
doc.id, {"status": str(status)}):
|
||||
return get_error_data_result(
|
||||
message="Database error (Document update)!")
|
||||
|
||||
settings.docStoreConn.update({"doc_id": doc.id}, {"available_int": status},
|
||||
search.index_name(kb.tenant_id), doc.kb_id)
|
||||
return get_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def download(tenant_id, dataset_id, document_id):
|
||||
@ -356,6 +396,10 @@ def download(tenant_id, dataset_id, document_id):
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
if not document_id:
|
||||
return get_error_data_result(
|
||||
message="Specify document_id please."
|
||||
)
|
||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||
return get_error_data_result(message=f"You do not own the dataset {dataset_id}.")
|
||||
doc = DocumentService.query(kb_id=dataset_id, id=document_id)
|
||||
@ -472,10 +516,12 @@ def list_docs(dataset_id, tenant_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
if not DocumentService.query(id=id, kb_id=dataset_id):
|
||||
|
||||
if id and not DocumentService.query(id=id, kb_id=dataset_id):
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if not DocumentService.query(name=name, kb_id=dataset_id):
|
||||
if name and not DocumentService.query(name=name, kb_id=dataset_id):
|
||||
return get_error_data_result(message=f"You don't own the document {name}.")
|
||||
|
||||
page = int(request.args.get("page", 1))
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_size = int(request.args.get("page_size", 30))
|
||||
@ -569,15 +615,22 @@ def delete(tenant_id, dataset_id):
|
||||
doc_list.append(doc.id)
|
||||
else:
|
||||
doc_list = doc_ids
|
||||
|
||||
unique_doc_ids, duplicate_messages = check_duplicate_ids(doc_list, "document")
|
||||
doc_list = unique_doc_ids
|
||||
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
errors = ""
|
||||
not_found = []
|
||||
success_count = 0
|
||||
for doc_id in doc_list:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_error_data_result(message="Document not found!")
|
||||
not_found.append(doc_id)
|
||||
continue
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
@ -599,12 +652,22 @@ def delete(tenant_id, dataset_id):
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
STORAGE_IMPL.rm(b, n)
|
||||
success_count += 1
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if not_found:
|
||||
return get_result(message=f"Documents not found: {not_found}", code=settings.RetCode.DATA_ERROR)
|
||||
|
||||
if errors:
|
||||
return get_result(message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially deleted {success_count} datasets with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
@ -652,18 +715,24 @@ def parse(tenant_id, dataset_id):
|
||||
req = request.json
|
||||
if not req.get("document_ids"):
|
||||
return get_error_data_result("`document_ids` is required")
|
||||
for id in req["document_ids"]:
|
||||
doc_list = req.get("document_ids")
|
||||
unique_doc_ids, duplicate_messages = check_duplicate_ids(doc_list, "document")
|
||||
doc_list = unique_doc_ids
|
||||
|
||||
not_found = []
|
||||
success_count = 0
|
||||
for id in doc_list:
|
||||
doc = DocumentService.query(id=id, kb_id=dataset_id)
|
||||
if not doc:
|
||||
not_found.append(id)
|
||||
continue
|
||||
if not doc:
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if doc[0].progress != 0.0:
|
||||
if 0.0 < doc[0].progress < 1.0:
|
||||
return get_error_data_result(
|
||||
"Can't stop parsing document with progress at 0 or 100"
|
||||
"Can't parse document that is currently being processed"
|
||||
)
|
||||
info = {"run": "1", "progress": 0}
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
info = {"run": "1", "progress": 0, "progress_msg": "", "chunk_num": 0, "token_num": 0}
|
||||
DocumentService.update_by_id(id, info)
|
||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), dataset_id)
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
@ -671,7 +740,16 @@ def parse(tenant_id, dataset_id):
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name)
|
||||
queue_tasks(doc, bucket, name, 0)
|
||||
success_count += 1
|
||||
if not_found:
|
||||
return get_result(message=f"Documents not found: {not_found}", code=settings.RetCode.DATA_ERROR)
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially parsed {success_count} documents with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
@ -717,9 +795,15 @@ def stop_parsing(tenant_id, dataset_id):
|
||||
if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
||||
req = request.json
|
||||
|
||||
if not req.get("document_ids"):
|
||||
return get_error_data_result("`document_ids` is required")
|
||||
for id in req["document_ids"]:
|
||||
doc_list = req.get("document_ids")
|
||||
unique_doc_ids, duplicate_messages = check_duplicate_ids(doc_list, "document")
|
||||
doc_list = unique_doc_ids
|
||||
|
||||
success_count = 0
|
||||
for id in doc_list:
|
||||
doc = DocumentService.query(id=id, kb_id=dataset_id)
|
||||
if not doc:
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
@ -729,7 +813,13 @@ def stop_parsing(tenant_id, dataset_id):
|
||||
)
|
||||
info = {"run": "2", "progress": 0, "chunk_num": 0}
|
||||
DocumentService.update_by_id(id, info)
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
|
||||
settings.docStoreConn.delete({"doc_id": doc[0].id}, search.index_name(tenant_id), dataset_id)
|
||||
success_count += 1
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially stopped {success_count} documents with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
return get_result()
|
||||
|
||||
|
||||
@ -766,6 +856,12 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
required: false
|
||||
default: 30
|
||||
description: Number of items per page.
|
||||
- in: query
|
||||
name: id
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: Chunk Id.
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
@ -850,6 +946,8 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
res = {"total": 0, "chunks": [], "doc": renamed_doc}
|
||||
if req.get("id"):
|
||||
chunk = settings.docStoreConn.get(req.get("id"), search.index_name(tenant_id), [dataset_id])
|
||||
if not chunk:
|
||||
return get_result(message=f"Chunk not found: {dataset_id}/{req.get('id')}", code=settings.RetCode.NOT_FOUND)
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
@ -867,7 +965,7 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
"important_keywords":chunk.get("important_kwd",[]),
|
||||
"questions":chunk.get("question_kwd",[]),
|
||||
"dataset_id":chunk.get("kb_id",chunk.get("dataset_id")),
|
||||
"image_id":chunk["img_id"],
|
||||
"image_id":chunk.get("img_id", ""),
|
||||
"available":bool(chunk.get("available_int",1)),
|
||||
"positions":chunk.get("position_int",[]),
|
||||
}
|
||||
@ -892,7 +990,7 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
"questions": sres.field[id].get("question_kwd", []),
|
||||
"dataset_id": sres.field[id].get("kb_id", sres.field[id].get("dataset_id")),
|
||||
"image_id": sres.field[id].get("img_id", ""),
|
||||
"available": bool(sres.field[id].get("available_int", 1)),
|
||||
"available": bool(int(sres.field[id].get("available_int", "1"))),
|
||||
"positions": sres.field[id].get("position_int",[]),
|
||||
}
|
||||
res["chunks"].append(d)
|
||||
@ -977,7 +1075,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
)
|
||||
doc = doc[0]
|
||||
req = request.json
|
||||
if not req.get("content"):
|
||||
if not str(req.get("content", "")).strip():
|
||||
return get_error_data_result(message="`content` is required")
|
||||
if "important_keywords" in req:
|
||||
if not isinstance(req["important_keywords"], list):
|
||||
@ -1000,7 +1098,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
d["important_tks"] = rag_tokenizer.tokenize(
|
||||
" ".join(req.get("important_keywords", []))
|
||||
)
|
||||
d["question_kwd"] = req.get("questions", [])
|
||||
d["question_kwd"] = [str(q).strip() for q in req.get("questions", []) if str(q).strip()]
|
||||
d["question_tks"] = rag_tokenizer.tokenize(
|
||||
"\n".join(req.get("questions", []))
|
||||
)
|
||||
@ -1089,15 +1187,23 @@ def rm_chunk(tenant_id, dataset_id, document_id):
|
||||
"""
|
||||
if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
||||
docs = DocumentService.get_by_ids([document_id])
|
||||
if not docs:
|
||||
raise LookupError(f"Can't find the document with ID {document_id}!")
|
||||
req = request.json
|
||||
condition = {"doc_id": document_id}
|
||||
if "chunk_ids" in req:
|
||||
condition["id"] = req["chunk_ids"]
|
||||
unique_chunk_ids, duplicate_messages = check_duplicate_ids(req["chunk_ids"], "chunk")
|
||||
condition["id"] = unique_chunk_ids
|
||||
chunk_number = settings.docStoreConn.delete(condition, search.index_name(tenant_id), dataset_id)
|
||||
if chunk_number != 0:
|
||||
DocumentService.decrement_chunk_num(document_id, dataset_id, 1, chunk_number, 0)
|
||||
if "chunk_ids" in req and chunk_number != len(req["chunk_ids"]):
|
||||
return get_error_data_result(message=f"rm_chunk deleted chunks {chunk_number}, expect {len(req['chunk_ids'])}")
|
||||
if "chunk_ids" in req and chunk_number != len(unique_chunk_ids):
|
||||
if len(unique_chunk_ids) == 0:
|
||||
return get_result(message=f"deleted {chunk_number} chunks")
|
||||
return get_error_data_result(message=f"rm_chunk deleted chunks {chunk_number}, expect {len(unique_chunk_ids)}")
|
||||
if duplicate_messages:
|
||||
return get_result(message=f"Partially deleted {chunk_number} chunks with {len(duplicate_messages)} errors", data={"success_count": chunk_number, "errors": duplicate_messages},)
|
||||
return get_result(message=f"deleted {chunk_number} chunks")
|
||||
|
||||
|
||||
@ -1185,7 +1291,7 @@ def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
|
||||
if "questions" in req:
|
||||
if not isinstance(req["questions"], list):
|
||||
return get_error_data_result("`questions` should be a list")
|
||||
d["question_kwd"] = req.get("questions")
|
||||
d["question_kwd"] = [str(q).strip() for q in req.get("questions", []) if str(q).strip()]
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["questions"]))
|
||||
if "available" in req:
|
||||
d["available_int"] = int(req["available"])
|
||||
@ -1301,7 +1407,7 @@ def retrieval_test(tenant_id):
|
||||
if not KnowledgebaseService.accessible(kb_id=id, user_id=tenant_id):
|
||||
return get_error_data_result(f"You don't own the dataset {id}.")
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
embd_nms = list(set([TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs])) # remove vendor suffix for comparison
|
||||
if len(embd_nms) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
@ -1330,6 +1436,7 @@ def retrieval_test(tenant_id):
|
||||
else:
|
||||
highlight = True
|
||||
try:
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||
if not e:
|
||||
return get_error_data_result(message="Dataset not found!")
|
||||
@ -1346,7 +1453,7 @@ def retrieval_test(tenant_id):
|
||||
ranks = settings.retrievaler.retrieval(
|
||||
question,
|
||||
embd_mdl,
|
||||
kb.tenant_id,
|
||||
tenant_ids,
|
||||
kb_ids,
|
||||
page,
|
||||
size,
|
||||
|
||||
@ -13,30 +13,30 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
import json
|
||||
from api.db import LLMType
|
||||
from flask import request, Response
|
||||
import re
|
||||
import time
|
||||
|
||||
import tiktoken
|
||||
from flask import Response, jsonify, request
|
||||
from api.db.services.conversation_service import ConversationService, iframe_completion
|
||||
from api.db.services.conversation_service import completion as rag_completion
|
||||
from api.db.services.canvas_service import completion as agent_completion
|
||||
from api.db.services.dialog_service import ask
|
||||
from api.db.services.canvas_service import completion as agent_completion, completionOpenAI
|
||||
from agent.canvas import Canvas
|
||||
from api.db import StatusEnum
|
||||
from api.db import LLMType, StatusEnum
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.dialog_service import DialogService, ask, chat
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result
|
||||
from api.utils.api_utils import get_result, token_required
|
||||
from api.utils.api_utils import get_result, token_required, get_data_openai, get_error_data_result, validate_request, check_duplicate_ids
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@manager.route("/chats/<chat_id>/sessions", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id, chat_id):
|
||||
req = request.json
|
||||
@ -49,7 +49,7 @@ def create(tenant_id, chat_id):
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}],
|
||||
"user_id": req.get("user_id", "")
|
||||
"user_id": req.get("user_id", ""),
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
@ -58,23 +58,25 @@ def create(tenant_id, chat_id):
|
||||
if not e:
|
||||
return get_error_data_result(message="Fail to create a session!")
|
||||
conv = conv.to_dict()
|
||||
conv['messages'] = conv.pop("message")
|
||||
conv["messages"] = conv.pop("message")
|
||||
conv["chat_id"] = conv.pop("dialog_id")
|
||||
del conv["reference"]
|
||||
return get_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@manager.route("/agents/<agent_id>/sessions", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create_agent_session(tenant_id, agent_id):
|
||||
req = request.json
|
||||
if not request.is_json:
|
||||
req = request.form
|
||||
files = request.files
|
||||
user_id = request.args.get("user_id", "")
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_error_data_result("You cannot access the agent.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
@ -84,33 +86,45 @@ def create_agent_session(tenant_id, agent_id):
|
||||
if query:
|
||||
for ele in query:
|
||||
if not ele["optional"]:
|
||||
if not req.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` is required")
|
||||
if ele["type"] == "file":
|
||||
if files is None or not files.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||
upload_file = files.get(ele["key"])
|
||||
file_content = FileService.parse_docs([upload_file], user_id)
|
||||
file_name = upload_file.filename
|
||||
ele["value"] = file_name + "\n" + file_content
|
||||
else:
|
||||
if req is None or not req.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||
ele["value"] = req[ele["key"]]
|
||||
if ele["optional"]:
|
||||
if req.get(ele["key"]):
|
||||
ele["value"] = req[ele['key']]
|
||||
else:
|
||||
if ele["type"] == "file":
|
||||
if files is not None and files.get(ele["key"]):
|
||||
upload_file = files.get(ele["key"])
|
||||
file_content = FileService.parse_docs([upload_file], user_id)
|
||||
file_name = upload_file.filename
|
||||
ele["value"] = file_name + "\n" + file_content
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
else:
|
||||
if req is not None and req.get(ele["key"]):
|
||||
ele["value"] = req[ele["key"]]
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
|
||||
for ans in canvas.run(stream=False):
|
||||
pass
|
||||
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": req.get("user_id", "") if isinstance(req, dict) else "",
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent",
|
||||
"dsl": cvs.dsl
|
||||
}
|
||||
conv = {"id": get_uuid(), "dialog_id": cvs.id, "user_id": user_id, "message": [{"role": "assistant", "content": canvas.get_prologue()}], "source": "agent", "dsl": cvs.dsl}
|
||||
API4ConversationService.save(**conv)
|
||||
conv["agent_id"] = conv.pop("dialog_id")
|
||||
return get_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT']) # noqa: F821
|
||||
@manager.route("/chats/<chat_id>/sessions/<session_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id, chat_id, session_id):
|
||||
req = request.json
|
||||
@ -132,12 +146,14 @@ def update(tenant_id, chat_id, session_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/completions', methods=['POST']) # noqa: F821
|
||||
@manager.route("/chats/<chat_id>/completions", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def chat_completion(tenant_id, chat_id):
|
||||
req = request.json
|
||||
if not req or not req.get("session_id"):
|
||||
if not req:
|
||||
req = {"question": ""}
|
||||
if not req.get("session_id"):
|
||||
req["question"] = ""
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
if req.get("session_id"):
|
||||
@ -159,7 +175,227 @@ def chat_completion(tenant_id, chat_id):
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||
@manager.route("/chats_openai/<chat_id>/chat/completions", methods=["POST"]) # noqa: F821
|
||||
@validate_request("model", "messages") # noqa: F821
|
||||
@token_required
|
||||
def chat_completion_openai_like(tenant_id, chat_id):
|
||||
"""
|
||||
OpenAI-like chat completion API that simulates the behavior of OpenAI's completions endpoint.
|
||||
|
||||
This function allows users to interact with a model and receive responses based on a series of historical messages.
|
||||
If `stream` is set to True (by default), the response will be streamed in chunks, mimicking the OpenAI-style API.
|
||||
Set `stream` to False explicitly, the response will be returned in a single complete answer.
|
||||
Example usage:
|
||||
|
||||
curl -X POST https://ragflow_address.com/api/v1/chats_openai/<chat_id>/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $RAGFLOW_API_KEY" \
|
||||
-d '{
|
||||
"model": "model",
|
||||
"messages": [{"role": "user", "content": "Say this is a test!"}],
|
||||
"stream": true
|
||||
}'
|
||||
|
||||
Alternatively, you can use Python's `OpenAI` client:
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
model = "model"
|
||||
client = OpenAI(api_key="ragflow-api-key", base_url=f"http://ragflow_address/api/v1/chats_openai/<chat_id>")
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who are you?"},
|
||||
{"role": "assistant", "content": "I am an AI assistant named..."},
|
||||
{"role": "user", "content": "Can you tell me how to install neovim"},
|
||||
],
|
||||
stream=True
|
||||
)
|
||||
|
||||
stream = True
|
||||
if stream:
|
||||
for chunk in completion:
|
||||
print(chunk)
|
||||
else:
|
||||
print(completion.choices[0].message.content)
|
||||
"""
|
||||
req = request.json
|
||||
|
||||
messages = req.get("messages", [])
|
||||
# To prevent empty [] input
|
||||
if len(messages) < 1:
|
||||
return get_error_data_result("You have to provide messages.")
|
||||
if messages[-1]["role"] != "user":
|
||||
return get_error_data_result("The last content of this conversation is not from user.")
|
||||
|
||||
prompt = messages[-1]["content"]
|
||||
# Treat context tokens as reasoning tokens
|
||||
context_token_used = sum(len(message["content"]) for message in messages)
|
||||
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value)
|
||||
if not dia:
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
dia = dia[0]
|
||||
|
||||
# Filter system and non-sense assistant messages
|
||||
msg = []
|
||||
for m in messages:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
|
||||
# tools = get_tools()
|
||||
# toolcall_session = SimpleFunctionCallServer()
|
||||
tools = None
|
||||
toolcall_session = None
|
||||
|
||||
if req.get("stream", True):
|
||||
# The value for the usage field on all chunks except for the last one will be null.
|
||||
# The usage field on the last chunk contains token usage statistics for the entire request.
|
||||
# The choices field on the last chunk will always be an empty array [].
|
||||
def streamed_response_generator(chat_id, dia, msg):
|
||||
token_used = 0
|
||||
answer_cache = ""
|
||||
reasoning_cache = ""
|
||||
response = {
|
||||
"id": f"chatcmpl-{chat_id}",
|
||||
"choices": [{"delta": {"content": "", "role": "assistant", "function_call": None, "tool_calls": None, "reasoning_content": ""}, "finish_reason": None, "index": 0, "logprobs": None}],
|
||||
"created": int(time.time()),
|
||||
"model": "model",
|
||||
"object": "chat.completion.chunk",
|
||||
"system_fingerprint": "",
|
||||
"usage": None,
|
||||
}
|
||||
|
||||
try:
|
||||
for ans in chat(dia, msg, True, toolcall_session=toolcall_session, tools=tools):
|
||||
answer = ans["answer"]
|
||||
|
||||
reasoning_match = re.search(r"<think>(.*?)</think>", answer, flags=re.DOTALL)
|
||||
if reasoning_match:
|
||||
reasoning_part = reasoning_match.group(1)
|
||||
content_part = answer[reasoning_match.end() :]
|
||||
else:
|
||||
reasoning_part = ""
|
||||
content_part = answer
|
||||
|
||||
reasoning_incremental = ""
|
||||
if reasoning_part:
|
||||
if reasoning_part.startswith(reasoning_cache):
|
||||
reasoning_incremental = reasoning_part.replace(reasoning_cache, "", 1)
|
||||
else:
|
||||
reasoning_incremental = reasoning_part
|
||||
reasoning_cache = reasoning_part
|
||||
|
||||
content_incremental = ""
|
||||
if content_part:
|
||||
if content_part.startswith(answer_cache):
|
||||
content_incremental = content_part.replace(answer_cache, "", 1)
|
||||
else:
|
||||
content_incremental = content_part
|
||||
answer_cache = content_part
|
||||
|
||||
token_used += len(reasoning_incremental) + len(content_incremental)
|
||||
|
||||
if not any([reasoning_incremental, content_incremental]):
|
||||
continue
|
||||
|
||||
if reasoning_incremental:
|
||||
response["choices"][0]["delta"]["reasoning_content"] = reasoning_incremental
|
||||
else:
|
||||
response["choices"][0]["delta"]["reasoning_content"] = None
|
||||
|
||||
if content_incremental:
|
||||
response["choices"][0]["delta"]["content"] = content_incremental
|
||||
else:
|
||||
response["choices"][0]["delta"]["content"] = None
|
||||
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
except Exception as e:
|
||||
response["choices"][0]["delta"]["content"] = "**ERROR**: " + str(e)
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
|
||||
# The last chunk
|
||||
response["choices"][0]["delta"]["content"] = None
|
||||
response["choices"][0]["delta"]["reasoning_content"] = None
|
||||
response["choices"][0]["finish_reason"] = "stop"
|
||||
response["usage"] = {"prompt_tokens": len(prompt), "completion_tokens": token_used, "total_tokens": len(prompt) + token_used}
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
yield "data:[DONE]\n\n"
|
||||
|
||||
resp = Response(streamed_response_generator(chat_id, dia, msg), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False, toolcall_session=toolcall_session, tools=tools):
|
||||
# focus answer content only
|
||||
answer = ans
|
||||
break
|
||||
content = answer["answer"]
|
||||
|
||||
response = {
|
||||
"id": f"chatcmpl-{chat_id}",
|
||||
"object": "chat.completion",
|
||||
"created": int(time.time()),
|
||||
"model": req.get("model", ""),
|
||||
"usage": {
|
||||
"prompt_tokens": len(prompt),
|
||||
"completion_tokens": len(content),
|
||||
"total_tokens": len(prompt) + len(content),
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": context_token_used,
|
||||
"accepted_prediction_tokens": len(content),
|
||||
"rejected_prediction_tokens": 0, # 0 for simplicity
|
||||
},
|
||||
},
|
||||
"choices": [{"message": {"role": "assistant", "content": content}, "logprobs": None, "finish_reason": "stop", "index": 0}],
|
||||
}
|
||||
return jsonify(response)
|
||||
|
||||
@manager.route('/agents_openai/<agent_id>/chat/completions', methods=['POST']) # noqa: F821
|
||||
@validate_request("model", "messages") # noqa: F821
|
||||
@token_required
|
||||
def agents_completion_openai_compatibility (tenant_id, agent_id):
|
||||
req = request.json
|
||||
tiktokenenc = tiktoken.get_encoding("cl100k_base")
|
||||
messages = req.get("messages", [])
|
||||
if not messages:
|
||||
return get_error_data_result("You must provide at least one message.")
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||
|
||||
filtered_messages = [m for m in messages if m["role"] in ["user", "assistant"]]
|
||||
prompt_tokens = sum(len(tiktokenenc.encode(m["content"])) for m in filtered_messages)
|
||||
if not filtered_messages:
|
||||
return jsonify(get_data_openai(
|
||||
id=agent_id,
|
||||
content="No valid messages found (user or assistant).",
|
||||
finish_reason="stop",
|
||||
model=req.get("model", ""),
|
||||
completion_tokens=len(tiktokenenc.encode("No valid messages found (user or assistant).")),
|
||||
prompt_tokens=prompt_tokens,
|
||||
))
|
||||
|
||||
# Get the last user message as the question
|
||||
question = next((m["content"] for m in reversed(messages) if m["role"] == "user"), "")
|
||||
|
||||
if req.get("stream", True):
|
||||
return Response(completionOpenAI(tenant_id, agent_id, question, session_id=req.get("id", ""), stream=True), mimetype="text/event-stream")
|
||||
else:
|
||||
# For non-streaming, just return the response directly
|
||||
response = next(completionOpenAI(tenant_id, agent_id, question, session_id=req.get("id", ""), stream=False))
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
@manager.route("/agents/<agent_id>/completions", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def agent_completions(tenant_id, agent_id):
|
||||
req = request.json
|
||||
@ -170,12 +406,20 @@ def agent_completions(tenant_id, agent_id):
|
||||
dsl = cvs[0].dsl
|
||||
if not isinstance(dsl, str):
|
||||
dsl = json.dumps(dsl)
|
||||
#canvas = Canvas(dsl, tenant_id)
|
||||
#if canvas.get_preset_param():
|
||||
# req["question"] = ""
|
||||
|
||||
conv = API4ConversationService.query(id=req["session_id"], dialog_id=agent_id)
|
||||
if not conv:
|
||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||
# If an update to UserCanvas is detected, update the API4Conversation.dsl
|
||||
sync_dsl = req.get("sync_dsl", False)
|
||||
if sync_dsl is True and cvs[0].update_time > conv[0].update_time:
|
||||
current_dsl = conv[0].dsl
|
||||
new_dsl = json.loads(dsl)
|
||||
state_fields = ["history", "messages", "path", "reference"]
|
||||
states = {field: current_dsl.get(field, []) for field in state_fields}
|
||||
current_dsl.update(new_dsl)
|
||||
current_dsl.update(states)
|
||||
API4ConversationService.update_by_id(req["session_id"], {"dsl": current_dsl})
|
||||
else:
|
||||
req["question"] = ""
|
||||
if req.get("stream", True):
|
||||
@ -192,7 +436,7 @@ def agent_completions(tenant_id, agent_id):
|
||||
return get_error_data_result(str(e))
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET']) # noqa: F821
|
||||
@manager.route("/chats/<chat_id>/sessions", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_session(tenant_id, chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
@ -211,7 +455,7 @@ def list_session(tenant_id, chat_id):
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
conv['messages'] = conv.pop("message")
|
||||
conv["messages"] = conv.pop("message")
|
||||
infos = conv["messages"]
|
||||
for info in infos:
|
||||
if "prompt" in info:
|
||||
@ -220,12 +464,11 @@ def list_session(tenant_id, chat_id):
|
||||
if conv["reference"]:
|
||||
messages = conv["messages"]
|
||||
message_num = 0
|
||||
chunk_num = 0
|
||||
while message_num < len(messages):
|
||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||
chunk_list = []
|
||||
if "chunks" in conv["reference"][chunk_num]:
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
if "chunks" in conv["reference"][message_num]:
|
||||
chunks = conv["reference"][message_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk.get("chunk_id", chunk.get("id")),
|
||||
@ -238,14 +481,13 @@ def list_session(tenant_id, chat_id):
|
||||
}
|
||||
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
message_num += 1
|
||||
del conv["reference"]
|
||||
return get_result(data=convs)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['GET']) # noqa: F821
|
||||
@manager.route("/agents/<agent_id>/sessions", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_agent_session(tenant_id, agent_id):
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
@ -259,11 +501,13 @@ def list_agent_session(tenant_id, agent_id):
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id, user_id)
|
||||
# dsl defaults to True in all cases except for False and false
|
||||
include_dsl = request.args.get("dsl") != "False" and request.args.get("dsl") != "false"
|
||||
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id, user_id, include_dsl)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
conv['messages'] = conv.pop("message")
|
||||
conv["messages"] = conv.pop("message")
|
||||
infos = conv["messages"]
|
||||
for info in infos:
|
||||
if "prompt" in info:
|
||||
@ -296,11 +540,14 @@ def list_agent_session(tenant_id, agent_id):
|
||||
return get_result(data=convs)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"]) # noqa: F821
|
||||
@manager.route("/chats/<chat_id>/sessions", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id, chat_id):
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You don't own the chat")
|
||||
|
||||
errors = []
|
||||
success_count = 0
|
||||
req = request.json
|
||||
convs = ConversationService.query(dialog_id=chat_id)
|
||||
if not req:
|
||||
@ -314,15 +561,98 @@ def delete(tenant_id, chat_id):
|
||||
conv_list.append(conv.id)
|
||||
else:
|
||||
conv_list = ids
|
||||
|
||||
unique_conv_ids, duplicate_messages = check_duplicate_ids(conv_list, "session")
|
||||
conv_list = unique_conv_ids
|
||||
|
||||
for id in conv_list:
|
||||
conv = ConversationService.query(id=id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="The chat doesn't own the session")
|
||||
errors.append(f"The chat doesn't own the session {id}")
|
||||
continue
|
||||
ConversationService.delete_by_id(id)
|
||||
success_count += 1
|
||||
|
||||
if errors:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
data={"success_count": success_count, "errors": errors},
|
||||
message=f"Partially deleted {success_count} sessions with {len(errors)} errors"
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message="; ".join(errors))
|
||||
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
message=f"Partially deleted {success_count} sessions with {len(duplicate_messages)} errors",
|
||||
data={"success_count": success_count, "errors": duplicate_messages}
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/sessions/ask', methods=['POST']) # noqa: F821
|
||||
@manager.route("/agents/<agent_id>/sessions", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete_agent_session(tenant_id, agent_id):
|
||||
errors = []
|
||||
success_count = 0
|
||||
req = request.json
|
||||
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||
if not cvs:
|
||||
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||
|
||||
convs = API4ConversationService.query(dialog_id=agent_id)
|
||||
if not convs:
|
||||
return get_error_data_result(f"Agent {agent_id} has no sessions")
|
||||
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids = req.get("ids")
|
||||
|
||||
if not ids:
|
||||
conv_list = []
|
||||
for conv in convs:
|
||||
conv_list.append(conv.id)
|
||||
else:
|
||||
conv_list = ids
|
||||
|
||||
unique_conv_ids, duplicate_messages = check_duplicate_ids(conv_list, "session")
|
||||
conv_list = unique_conv_ids
|
||||
|
||||
for session_id in conv_list:
|
||||
conv = API4ConversationService.query(id=session_id, dialog_id=agent_id)
|
||||
if not conv:
|
||||
errors.append(f"The agent doesn't own the session {session_id}")
|
||||
continue
|
||||
API4ConversationService.delete_by_id(session_id)
|
||||
success_count += 1
|
||||
|
||||
if errors:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
data={"success_count": success_count, "errors": errors},
|
||||
message=f"Partially deleted {success_count} sessions with {len(errors)} errors"
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message="; ".join(errors))
|
||||
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
message=f"Partially deleted {success_count} sessions with {len(duplicate_messages)} errors",
|
||||
data={"success_count": success_count, "errors": duplicate_messages}
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/sessions/ask", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def ask_about(tenant_id):
|
||||
req = request.json
|
||||
@ -348,9 +678,7 @@ def ask_about(tenant_id):
|
||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
@ -361,7 +689,7 @@ def ask_about(tenant_id):
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/sessions/related_questions', methods=['POST']) # noqa: F821
|
||||
@manager.route("/sessions/related_questions", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def related_questions(tenant_id):
|
||||
req = request.json
|
||||
@ -393,18 +721,27 @@ Reason:
|
||||
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
||||
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
||||
ans = chat_mdl.chat(
|
||||
prompt,
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"""
|
||||
Keywords: {question}
|
||||
Related search terms:
|
||||
"""}], {"temperature": 0.9})
|
||||
""",
|
||||
}
|
||||
],
|
||||
{"temperature": 0.9},
|
||||
)
|
||||
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||
|
||||
|
||||
@manager.route('/chatbots/<dialog_id>/completions', methods=['POST']) # noqa: F821
|
||||
@manager.route("/chatbots/<dialog_id>/completions", methods=["POST"]) # noqa: F821
|
||||
def chatbot_completions(dialog_id):
|
||||
req = request.json
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
token = request.headers.get("Authorization").split()
|
||||
if len(token) != 2:
|
||||
return get_error_data_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
@ -427,11 +764,11 @@ def chatbot_completions(dialog_id):
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('/agentbots/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||
@manager.route("/agentbots/<agent_id>/completions", methods=["POST"]) # noqa: F821
|
||||
def agent_bot_completions(agent_id):
|
||||
req = request.json
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
token = request.headers.get("Authorization").split()
|
||||
if len(token) != 2:
|
||||
return get_error_data_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
|
||||
@ -37,7 +37,6 @@ from timeit import default_timer as timer
|
||||
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
@manager.route("/version", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def version():
|
||||
@ -201,7 +200,7 @@ def new_token():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
obj = {
|
||||
"tenant_id": tenant_id,
|
||||
"token": generate_confirmation_token(tenant_id),
|
||||
@ -256,7 +255,7 @@ def token_list():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
objs = APITokenService.query(tenant_id=tenant_id)
|
||||
objs = [o.to_dict() for o in objs]
|
||||
for o in objs:
|
||||
@ -298,3 +297,25 @@ def rm(token):
|
||||
[APIToken.tenant_id == current_user.id, APIToken.token == token]
|
||||
)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/config', methods=['GET']) # noqa: F821
|
||||
def get_config():
|
||||
"""
|
||||
Get system configuration.
|
||||
---
|
||||
tags:
|
||||
- System
|
||||
responses:
|
||||
200:
|
||||
description: Return system configuration
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
registerEnable:
|
||||
type: integer 0 means disabled, 1 means enabled
|
||||
description: Whether user registration is enabled
|
||||
"""
|
||||
return get_json_result(data={
|
||||
"registerEnabled": settings.REGISTER_ENABLED
|
||||
})
|
||||
|
||||
@ -13,35 +13,37 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
from flask import request, session, redirect
|
||||
from werkzeug.security import generate_password_hash, check_password_hash
|
||||
from flask_login import login_required, current_user, login_user, logout_user
|
||||
from flask import redirect, request, session
|
||||
from flask_login import current_user, login_required, login_user, logout_user
|
||||
from werkzeug.security import check_password_hash, generate_password_hash
|
||||
|
||||
from api import settings
|
||||
from api.apps.auth import get_auth_client
|
||||
from api.db import FileType, UserTenantRole
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.utils.api_utils import (
|
||||
server_error_response,
|
||||
validate_request,
|
||||
get_data_error_result,
|
||||
)
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService
|
||||
from api.db.services.user_service import TenantService, UserService, UserTenantService
|
||||
from api.utils import (
|
||||
get_uuid,
|
||||
get_format_time,
|
||||
decrypt,
|
||||
download_img,
|
||||
current_timestamp,
|
||||
datetime_format,
|
||||
decrypt,
|
||||
download_img,
|
||||
get_format_time,
|
||||
get_uuid,
|
||||
)
|
||||
from api.utils.api_utils import (
|
||||
construct_response,
|
||||
get_data_error_result,
|
||||
get_json_result,
|
||||
server_error_response,
|
||||
validate_request,
|
||||
)
|
||||
from api.db import UserTenantRole, FileType
|
||||
from api import settings
|
||||
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.utils.api_utils import get_json_result, construct_response
|
||||
|
||||
|
||||
@manager.route("/login", methods=["POST", "GET"]) # noqa: F821
|
||||
@ -76,9 +78,7 @@ def login():
|
||||
type: object
|
||||
"""
|
||||
if not request.json:
|
||||
return get_json_result(
|
||||
data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!"
|
||||
)
|
||||
return get_json_result(data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!")
|
||||
|
||||
email = request.json.get("email", "")
|
||||
users = UserService.query(email=email)
|
||||
@ -93,9 +93,7 @@ def login():
|
||||
try:
|
||||
password = decrypt(password)
|
||||
except BaseException:
|
||||
return get_json_result(
|
||||
data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password"
|
||||
)
|
||||
return get_json_result(data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password")
|
||||
|
||||
user = UserService.query_user(email, password)
|
||||
if user:
|
||||
@ -115,9 +113,131 @@ def login():
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/login/channels", methods=["GET"]) # noqa: F821
|
||||
def get_login_channels():
|
||||
"""
|
||||
Get all supported authentication channels.
|
||||
"""
|
||||
try:
|
||||
channels = []
|
||||
for channel, config in settings.OAUTH_CONFIG.items():
|
||||
channels.append(
|
||||
{
|
||||
"channel": channel,
|
||||
"display_name": config.get("display_name", channel.title()),
|
||||
"icon": config.get("icon", "sso"),
|
||||
}
|
||||
)
|
||||
return get_json_result(data=channels)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_json_result(data=[], message=f"Load channels failure, error: {str(e)}", code=settings.RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/login/<channel>", methods=["GET"]) # noqa: F821
|
||||
def oauth_login(channel):
|
||||
channel_config = settings.OAUTH_CONFIG.get(channel)
|
||||
if not channel_config:
|
||||
raise ValueError(f"Invalid channel name: {channel}")
|
||||
auth_cli = get_auth_client(channel_config)
|
||||
|
||||
state = get_uuid()
|
||||
session["oauth_state"] = state
|
||||
auth_url = auth_cli.get_authorization_url(state)
|
||||
return redirect(auth_url)
|
||||
|
||||
|
||||
@manager.route("/oauth/callback/<channel>", methods=["GET"]) # noqa: F821
|
||||
def oauth_callback(channel):
|
||||
"""
|
||||
Handle the OAuth/OIDC callback for various channels dynamically.
|
||||
"""
|
||||
try:
|
||||
channel_config = settings.OAUTH_CONFIG.get(channel)
|
||||
if not channel_config:
|
||||
raise ValueError(f"Invalid channel name: {channel}")
|
||||
auth_cli = get_auth_client(channel_config)
|
||||
|
||||
# Check the state
|
||||
state = request.args.get("state")
|
||||
if not state or state != session.get("oauth_state"):
|
||||
return redirect("/?error=invalid_state")
|
||||
session.pop("oauth_state", None)
|
||||
|
||||
# Obtain the authorization code
|
||||
code = request.args.get("code")
|
||||
if not code:
|
||||
return redirect("/?error=missing_code")
|
||||
|
||||
# Exchange authorization code for access token
|
||||
token_info = auth_cli.exchange_code_for_token(code)
|
||||
access_token = token_info.get("access_token")
|
||||
if not access_token:
|
||||
return redirect("/?error=token_failed")
|
||||
|
||||
id_token = token_info.get("id_token")
|
||||
|
||||
# Fetch user info
|
||||
user_info = auth_cli.fetch_user_info(access_token, id_token=id_token)
|
||||
if not user_info.email:
|
||||
return redirect("/?error=email_missing")
|
||||
|
||||
# Login or register
|
||||
users = UserService.query(email=user_info.email)
|
||||
user_id = get_uuid()
|
||||
|
||||
if not users:
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(user_info.avatar_url)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
avatar = ""
|
||||
|
||||
users = user_register(
|
||||
user_id,
|
||||
{
|
||||
"access_token": get_uuid(),
|
||||
"email": user_info.email,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info.nickname,
|
||||
"login_channel": channel,
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
|
||||
if not users:
|
||||
raise Exception(f"Failed to register {user_info.email}")
|
||||
if len(users) > 1:
|
||||
raise Exception(f"Same email: {user_info.email} exists!")
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect(f"/?auth={user.get_id()}")
|
||||
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
logging.exception(e)
|
||||
return redirect(f"/?error={str(e)}")
|
||||
|
||||
# User exists, try to log in
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect(f"/?auth={user.get_id()}")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return redirect(f"/?error={str(e)}")
|
||||
|
||||
|
||||
@manager.route("/github_callback", methods=["GET"]) # noqa: F821
|
||||
def github_callback():
|
||||
"""
|
||||
**Deprecated**, Use `/oauth/callback/<channel>` instead.
|
||||
|
||||
GitHub OAuth callback endpoint.
|
||||
---
|
||||
tags:
|
||||
@ -309,9 +429,7 @@ def user_info_from_feishu(access_token):
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
}
|
||||
res = requests.get(
|
||||
"https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers
|
||||
)
|
||||
res = requests.get("https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers)
|
||||
user_info = res.json()["data"]
|
||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||
return user_info
|
||||
@ -321,17 +439,13 @@ def user_info_from_github(access_token):
|
||||
import requests
|
||||
|
||||
headers = {"Accept": "application/json", "Authorization": f"token {access_token}"}
|
||||
res = requests.get(
|
||||
f"https://api.github.com/user?access_token={access_token}", headers=headers
|
||||
)
|
||||
res = requests.get(f"https://api.github.com/user?access_token={access_token}", headers=headers)
|
||||
user_info = res.json()
|
||||
email_info = requests.get(
|
||||
f"https://api.github.com/user/emails?access_token={access_token}",
|
||||
headers=headers,
|
||||
).json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email["primary"]), None
|
||||
)["email"]
|
||||
user_info["email"] = next((email for email in email_info if email["primary"]), None)["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
@ -391,9 +505,7 @@ def setting_user():
|
||||
request_data = request.json
|
||||
if request_data.get("password"):
|
||||
new_password = request_data.get("new_password")
|
||||
if not check_password_hash(
|
||||
current_user.password, decrypt(request_data["password"])
|
||||
):
|
||||
if not check_password_hash(current_user.password, decrypt(request_data["password"])):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
@ -424,9 +536,7 @@ def setting_user():
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_json_result(
|
||||
data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR
|
||||
)
|
||||
return get_json_result(data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/info", methods=["GET"]) # noqa: F821
|
||||
@ -518,7 +628,21 @@ def user_register(user_id, user):
|
||||
"model_type": llm.model_type,
|
||||
"api_key": settings.API_KEY,
|
||||
"api_base": settings.LLM_BASE_URL,
|
||||
"max_tokens": llm.max_tokens if llm.max_tokens else 8192
|
||||
"max_tokens": llm.max_tokens if llm.max_tokens else 8192,
|
||||
}
|
||||
)
|
||||
if settings.LIGHTEN != 1:
|
||||
for buildin_embedding_model in settings.BUILTIN_EMBEDDING_MODELS:
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(buildin_embedding_model)
|
||||
tenant_llm.append(
|
||||
{
|
||||
"tenant_id": user_id,
|
||||
"llm_factory": fid,
|
||||
"llm_name": mdlnm,
|
||||
"model_type": "embedding",
|
||||
"api_key": "",
|
||||
"api_base": "",
|
||||
"max_tokens": 1024 if buildin_embedding_model == "BAAI/bge-large-zh-v1.5@BAAI" else 512,
|
||||
}
|
||||
)
|
||||
|
||||
@ -562,11 +686,19 @@ def user_add():
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
|
||||
if not settings.REGISTER_ENABLED:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message="User registration is disabled!",
|
||||
code=settings.RetCode.OPERATING_ERROR,
|
||||
)
|
||||
|
||||
req = request.json
|
||||
email_address = req["email"]
|
||||
|
||||
# Validate the email address
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,5}$", email_address):
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", email_address):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message=f"Invalid email address: {email_address}!",
|
||||
|
||||
@ -49,6 +49,7 @@ class FileType(StrEnum):
|
||||
FOLDER = 'folder'
|
||||
OTHER = "other"
|
||||
|
||||
VALID_FILE_TYPES = {FileType.PDF, FileType.DOC, FileType.VISUAL, FileType.AURAL, FileType.VIRTUAL, FileType.FOLDER, FileType.OTHER}
|
||||
|
||||
class LLMType(StrEnum):
|
||||
CHAT = 'chat'
|
||||
@ -73,6 +74,7 @@ class TaskStatus(StrEnum):
|
||||
DONE = "3"
|
||||
FAIL = "4"
|
||||
|
||||
VALID_TASK_STATUS = {TaskStatus.UNSTART, TaskStatus.RUNNING, TaskStatus.CANCEL, TaskStatus.DONE, TaskStatus.FAIL}
|
||||
|
||||
class ParserType(StrEnum):
|
||||
PRESENTATION = "presentation"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -103,16 +103,12 @@ def init_llm_factory():
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
factory_llm_infos = json.load(
|
||||
open(
|
||||
os.path.join(get_project_base_directory(), "conf", "llm_factories.json"),
|
||||
"r",
|
||||
)
|
||||
)
|
||||
for factory_llm_info in factory_llm_infos["factory_llm_infos"]:
|
||||
llm_infos = factory_llm_info.pop("llm")
|
||||
factory_llm_infos = settings.FACTORY_LLM_INFOS
|
||||
for factory_llm_info in factory_llm_infos:
|
||||
info = deepcopy(factory_llm_info)
|
||||
llm_infos = info.pop("llm")
|
||||
try:
|
||||
LLMFactoriesService.save(**factory_llm_info)
|
||||
LLMFactoriesService.save(**info)
|
||||
except Exception:
|
||||
pass
|
||||
LLMService.filter_delete([LLM.fid == factory_llm_info["name"]])
|
||||
@ -123,7 +119,7 @@ def init_llm_factory():
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
|
||||
LLMFactoriesService.filter_delete([(LLMFactories.name == "Local") | (LLMFactories.name == "novita.ai")])
|
||||
LLMService.filter_delete([LLM.fid == "Local"])
|
||||
LLMService.filter_delete([LLM.llm_name == "qwen-vl-max"])
|
||||
LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
|
||||
@ -152,7 +148,7 @@ def init_llm_factory():
|
||||
pass
|
||||
break
|
||||
for kb_id in KnowledgebaseService.get_all_ids():
|
||||
KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
|
||||
KnowledgebaseService.update_document_number_in_init(kb_id=kb_id, doc_num=DocumentService.get_kb_doc_count(kb_id))
|
||||
|
||||
|
||||
|
||||
@ -160,7 +156,7 @@ def add_graph_templates():
|
||||
dir = os.path.join(get_project_base_directory(), "agent", "templates")
|
||||
for fnm in os.listdir(dir):
|
||||
try:
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r",encoding="utf-8"))
|
||||
try:
|
||||
CanvasTemplateService.save(**cnvs)
|
||||
except Exception:
|
||||
|
||||
@ -43,8 +43,12 @@ class API4ConversationService(CommonService):
|
||||
@DB.connection_context()
|
||||
def get_list(cls, dialog_id, tenant_id,
|
||||
page_number, items_per_page,
|
||||
orderby, desc, id, user_id=None):
|
||||
orderby, desc, id, user_id=None, include_dsl=True):
|
||||
if include_dsl:
|
||||
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
||||
else:
|
||||
fields = [field for field in cls.model._meta.fields.values() if field.name != 'dsl']
|
||||
sessions = cls.model.select(*fields).where(cls.model.dialog_id == dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if user_id:
|
||||
|
||||
@ -18,13 +18,15 @@ import time
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
from agent.canvas import Canvas
|
||||
from api.db.db_models import DB, CanvasTemplate, UserCanvas, API4Conversation
|
||||
from api.db import TenantPermission
|
||||
from api.db.db_models import DB, CanvasTemplate, User, UserCanvas, API4Conversation
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.utils import get_uuid
|
||||
|
||||
|
||||
from api.utils.api_utils import get_data_openai
|
||||
import tiktoken
|
||||
from peewee import fn
|
||||
class CanvasTemplateService(CommonService):
|
||||
model = CanvasTemplate
|
||||
|
||||
@ -51,6 +53,73 @@ class UserCanvasService(CommonService):
|
||||
|
||||
return list(agents.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_id(cls, pid):
|
||||
try:
|
||||
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.title,
|
||||
cls.model.dsl,
|
||||
cls.model.description,
|
||||
cls.model.permission,
|
||||
cls.model.update_time,
|
||||
cls.model.user_id,
|
||||
cls.model.create_time,
|
||||
cls.model.create_date,
|
||||
cls.model.update_date,
|
||||
User.nickname,
|
||||
User.avatar.alias('tenant_avatar'),
|
||||
]
|
||||
angents = cls.model.select(*fields) \
|
||||
.join(User, on=(cls.model.user_id == User.id)) \
|
||||
.where(cls.model.id == pid)
|
||||
# obj = cls.model.query(id=pid)[0]
|
||||
return True, angents.dicts()[0]
|
||||
except Exception as e:
|
||||
print(e)
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
||||
page_number, items_per_page,
|
||||
orderby, desc, keywords,
|
||||
):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.title,
|
||||
cls.model.dsl,
|
||||
cls.model.description,
|
||||
cls.model.permission,
|
||||
User.nickname,
|
||||
User.avatar.alias('tenant_avatar'),
|
||||
cls.model.update_time
|
||||
]
|
||||
if keywords:
|
||||
angents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||
((cls.model.user_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.user_id == user_id)),
|
||||
(fn.LOWER(cls.model.title).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
angents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||
((cls.model.user_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.user_id == user_id))
|
||||
)
|
||||
if desc:
|
||||
angents = angents.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
angents = angents.order_by(cls.model.getter_by(orderby).asc())
|
||||
count = angents.count()
|
||||
angents = angents.paginate(page_number, items_per_page)
|
||||
return list(angents.dicts()), count
|
||||
|
||||
|
||||
def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
@ -86,20 +155,6 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
||||
"dsl": cvs.dsl
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
if query:
|
||||
yield "data:" + json.dumps({"code": 0,
|
||||
"message": "",
|
||||
"data": {
|
||||
"session_id": session_id,
|
||||
"answer": canvas.get_prologue(),
|
||||
"reference": [],
|
||||
"param": canvas.get_preset_param()
|
||||
}
|
||||
},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
else:
|
||||
conv = API4Conversation(**conv)
|
||||
else:
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
@ -130,7 +185,7 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
||||
continue
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", []), "param": canvas.get_preset_param()}
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
@ -160,8 +215,211 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", []) , "param": canvas.get_preset_param()}
|
||||
result = structure_answer(conv, result, message_id, session_id)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield result
|
||||
break
|
||||
def completionOpenAI(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||
"""Main function for OpenAI-compatible completions, structured similarly to the completion function."""
|
||||
tiktokenenc = tiktoken.get_encoding("cl100k_base")
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
|
||||
if not e:
|
||||
yield get_data_openai(
|
||||
id=session_id,
|
||||
model=agent_id,
|
||||
content="**ERROR**: Agent not found."
|
||||
)
|
||||
return
|
||||
|
||||
if cvs.user_id != tenant_id:
|
||||
yield get_data_openai(
|
||||
id=session_id,
|
||||
model=agent_id,
|
||||
content="**ERROR**: You do not own the agent"
|
||||
)
|
||||
return
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
canvas.reset()
|
||||
message_id = str(uuid4())
|
||||
|
||||
# Handle new session creation
|
||||
if not session_id:
|
||||
query = canvas.get_preset_param()
|
||||
if query:
|
||||
for ele in query:
|
||||
if not ele["optional"]:
|
||||
if not kwargs.get(ele["key"]):
|
||||
yield get_data_openai(
|
||||
id=None,
|
||||
model=agent_id,
|
||||
content=f"`{ele['key']}` is required",
|
||||
completion_tokens=len(tiktokenenc.encode(f"`{ele['key']}` is required")),
|
||||
prompt_tokens=len(tiktokenenc.encode(question if question else ""))
|
||||
)
|
||||
return
|
||||
ele["value"] = kwargs[ele["key"]]
|
||||
if ele["optional"]:
|
||||
if kwargs.get(ele["key"]):
|
||||
ele["value"] = kwargs[ele['key']]
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": kwargs.get("user_id", "") if isinstance(kwargs, dict) else "",
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue(), "created_at": time.time()}],
|
||||
"source": "agent",
|
||||
"dsl": cvs.dsl
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv = API4Conversation(**conv)
|
||||
|
||||
# Handle existing session
|
||||
else:
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
if not e:
|
||||
yield get_data_openai(
|
||||
id=session_id,
|
||||
model=agent_id,
|
||||
content="**ERROR**: Session not found!"
|
||||
)
|
||||
return
|
||||
|
||||
canvas = Canvas(json.dumps(conv.dsl), tenant_id)
|
||||
canvas.messages.append({"role": "user", "content": question, "id": message_id})
|
||||
canvas.add_user_input(question)
|
||||
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
conv.message.append({
|
||||
"role": "user",
|
||||
"content": question,
|
||||
"id": message_id
|
||||
})
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
# Process request based on stream mode
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
prompt_tokens = len(tiktokenenc.encode(str(question)))
|
||||
|
||||
if stream:
|
||||
try:
|
||||
completion_tokens = 0
|
||||
for ans in canvas.run(stream=True):
|
||||
if ans.get("running_status"):
|
||||
completion_tokens += len(tiktokenenc.encode(ans.get("content", "")))
|
||||
yield "data: " + json.dumps(
|
||||
get_data_openai(
|
||||
id=session_id,
|
||||
model=agent_id,
|
||||
content=ans["content"],
|
||||
object="chat.completion.chunk",
|
||||
completion_tokens=completion_tokens,
|
||||
prompt_tokens=prompt_tokens
|
||||
),
|
||||
ensure_ascii=False
|
||||
) + "\n\n"
|
||||
continue
|
||||
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
|
||||
completion_tokens += len(tiktokenenc.encode(final_ans.get("content", "")))
|
||||
yield "data: " + json.dumps(
|
||||
get_data_openai(
|
||||
id=session_id,
|
||||
model=agent_id,
|
||||
content=final_ans["content"],
|
||||
object="chat.completion.chunk",
|
||||
finish_reason="stop",
|
||||
completion_tokens=completion_tokens,
|
||||
prompt_tokens=prompt_tokens
|
||||
),
|
||||
ensure_ascii=False
|
||||
) + "\n\n"
|
||||
|
||||
# Update conversation
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "created_at": time.time(), "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield "data: " + json.dumps(
|
||||
get_data_openai(
|
||||
id=session_id,
|
||||
model=agent_id,
|
||||
content="**ERROR**: " + str(e),
|
||||
finish_reason="stop",
|
||||
completion_tokens=len(tiktokenenc.encode("**ERROR**: " + str(e))),
|
||||
prompt_tokens=prompt_tokens
|
||||
),
|
||||
ensure_ascii=False
|
||||
) + "\n\n"
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
else: # Non-streaming mode
|
||||
try:
|
||||
all_answer_content = ""
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"):
|
||||
continue
|
||||
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
final_ans["reference"] = answer.get("reference", [])
|
||||
all_answer_content += final_ans["content"]
|
||||
|
||||
final_ans["content"] = all_answer_content
|
||||
|
||||
# Update conversation
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "created_at": time.time(), "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
# Return the response in OpenAI format
|
||||
yield get_data_openai(
|
||||
id=session_id,
|
||||
model=agent_id,
|
||||
content=final_ans["content"],
|
||||
finish_reason="stop",
|
||||
completion_tokens=len(tiktokenenc.encode(final_ans["content"])),
|
||||
prompt_tokens=prompt_tokens,
|
||||
param=canvas.get_preset_param() # Added param info like in completion
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield get_data_openai(
|
||||
id=session_id,
|
||||
model=agent_id,
|
||||
content="**ERROR**: " + str(e),
|
||||
finish_reason="stop",
|
||||
completion_tokens=len(tiktokenenc.encode("**ERROR**: " + str(e))),
|
||||
prompt_tokens=prompt_tokens
|
||||
)
|
||||
|
||||
|
||||
@ -18,21 +18,60 @@ from datetime import datetime
|
||||
import peewee
|
||||
|
||||
from api.db.db_models import DB
|
||||
from api.utils import datetime_format, current_timestamp, get_uuid
|
||||
from api.utils import current_timestamp, datetime_format, get_uuid
|
||||
|
||||
|
||||
class CommonService:
|
||||
"""Base service class that provides common database operations.
|
||||
|
||||
This class serves as a foundation for all service classes in the application,
|
||||
implementing standard CRUD operations and common database query patterns.
|
||||
It uses the Peewee ORM for database interactions and provides a consistent
|
||||
interface for database operations across all derived service classes.
|
||||
|
||||
Attributes:
|
||||
model: The Peewee model class that this service operates on. Must be set by subclasses.
|
||||
"""
|
||||
|
||||
model = None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||
return cls.model.query(cols=cols, reverse=reverse,
|
||||
order_by=order_by, **kwargs)
|
||||
"""Execute a database query with optional column selection and ordering.
|
||||
|
||||
This method provides a flexible way to query the database with various filters
|
||||
and sorting options. It supports column selection, sort order control, and
|
||||
additional filter conditions.
|
||||
|
||||
Args:
|
||||
cols (list, optional): List of column names to select. If None, selects all columns.
|
||||
reverse (bool, optional): If True, sorts in descending order. If False, sorts in ascending order.
|
||||
order_by (str, optional): Column name to sort results by.
|
||||
**kwargs: Additional filter conditions passed as keyword arguments.
|
||||
|
||||
Returns:
|
||||
peewee.ModelSelect: A query result containing matching records.
|
||||
"""
|
||||
return cls.model.query(cols=cols, reverse=reverse, order_by=order_by, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all(cls, cols=None, reverse=None, order_by=None):
|
||||
"""Retrieve all records from the database with optional column selection and ordering.
|
||||
|
||||
This method fetches all records from the model's table with support for
|
||||
column selection and result ordering. If no order_by is specified and reverse
|
||||
is True, it defaults to ordering by create_time.
|
||||
|
||||
Args:
|
||||
cols (list, optional): List of column names to select. If None, selects all columns.
|
||||
reverse (bool, optional): If True, sorts in descending order. If False, sorts in ascending order.
|
||||
order_by (str, optional): Column name to sort results by. Defaults to 'create_time' if reverse is specified.
|
||||
|
||||
Returns:
|
||||
peewee.ModelSelect: A query containing all matching records.
|
||||
"""
|
||||
if cols:
|
||||
query_records = cls.model.select(*cols)
|
||||
else:
|
||||
@ -41,21 +80,44 @@ class CommonService:
|
||||
if not order_by or not hasattr(cls, order_by):
|
||||
order_by = "create_time"
|
||||
if reverse is True:
|
||||
query_records = query_records.order_by(
|
||||
cls.model.getter_by(order_by).desc())
|
||||
query_records = query_records.order_by(cls.model.getter_by(order_by).desc())
|
||||
elif reverse is False:
|
||||
query_records = query_records.order_by(
|
||||
cls.model.getter_by(order_by).asc())
|
||||
query_records = query_records.order_by(cls.model.getter_by(order_by).asc())
|
||||
return query_records
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get(cls, **kwargs):
|
||||
"""Get a single record matching the given criteria.
|
||||
|
||||
This method retrieves a single record from the database that matches
|
||||
the specified filter conditions.
|
||||
|
||||
Args:
|
||||
**kwargs: Filter conditions as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance: Single matching record.
|
||||
|
||||
Raises:
|
||||
peewee.DoesNotExist: If no matching record is found.
|
||||
"""
|
||||
return cls.model.get(**kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_or_none(cls, **kwargs):
|
||||
"""Get a single record or None if not found.
|
||||
|
||||
This method attempts to retrieve a single record matching the given criteria,
|
||||
returning None if no match is found instead of raising an exception.
|
||||
|
||||
Args:
|
||||
**kwargs: Filter conditions as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance or None: Matching record if found, None otherwise.
|
||||
"""
|
||||
try:
|
||||
return cls.model.get(**kwargs)
|
||||
except peewee.DoesNotExist:
|
||||
@ -64,14 +126,34 @@ class CommonService:
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def save(cls, **kwargs):
|
||||
# if "id" not in kwargs:
|
||||
# kwargs["id"] = get_uuid()
|
||||
"""Save a new record to database.
|
||||
|
||||
This method creates a new record in the database with the provided field values,
|
||||
forcing an insert operation rather than an update.
|
||||
|
||||
Args:
|
||||
**kwargs: Record field values as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance: The created record object.
|
||||
"""
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, **kwargs):
|
||||
"""Insert a new record with automatic ID and timestamps.
|
||||
|
||||
This method creates a new record with automatically generated ID and timestamp fields.
|
||||
It handles the creation of create_time, create_date, update_time, and update_date fields.
|
||||
|
||||
Args:
|
||||
**kwargs: Record field values as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance: The newly created record object.
|
||||
"""
|
||||
if "id" not in kwargs:
|
||||
kwargs["id"] = get_uuid()
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
@ -84,26 +166,49 @@ class CommonService:
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert_many(cls, data_list, batch_size=100):
|
||||
"""Insert multiple records in batches.
|
||||
|
||||
This method efficiently inserts multiple records into the database using batch processing.
|
||||
It automatically sets creation timestamps for all records.
|
||||
|
||||
Args:
|
||||
data_list (list): List of dictionaries containing record data to insert.
|
||||
batch_size (int, optional): Number of records to insert in each batch. Defaults to 100.
|
||||
"""
|
||||
with DB.atomic():
|
||||
for d in data_list:
|
||||
d["create_time"] = current_timestamp()
|
||||
d["create_date"] = datetime_format(datetime.now())
|
||||
for i in range(0, len(data_list), batch_size):
|
||||
cls.model.insert_many(data_list[i:i + batch_size]).execute()
|
||||
cls.model.insert_many(data_list[i : i + batch_size]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_many_by_id(cls, data_list):
|
||||
"""Update multiple records by their IDs.
|
||||
|
||||
This method updates multiple records in the database, identified by their IDs.
|
||||
It automatically updates the update_time and update_date fields for each record.
|
||||
|
||||
Args:
|
||||
data_list (list): List of dictionaries containing record data to update.
|
||||
Each dictionary must include an 'id' field.
|
||||
"""
|
||||
with DB.atomic():
|
||||
for data in data_list:
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(data).where(
|
||||
cls.model.id == data["id"]).execute()
|
||||
cls.model.update(data).where(cls.model.id == data["id"]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_by_id(cls, pid, data):
|
||||
# Update a single record by ID
|
||||
# Args:
|
||||
# pid: Record ID
|
||||
# data: Updated field values
|
||||
# Returns:
|
||||
# Number of records updated
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
num = cls.model.update(data).where(cls.model.id == pid).execute()
|
||||
@ -112,15 +217,28 @@ class CommonService:
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_id(cls, pid):
|
||||
# Get a record by ID
|
||||
# Args:
|
||||
# pid: Record ID
|
||||
# Returns:
|
||||
# Tuple of (success, record)
|
||||
try:
|
||||
obj = cls.model.query(id=pid)[0]
|
||||
obj = cls.model.get_or_none(cls.model.id == pid)
|
||||
if obj:
|
||||
return True, obj
|
||||
except Exception:
|
||||
pass
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_ids(cls, pids, cols=None):
|
||||
# Get multiple records by their IDs
|
||||
# Args:
|
||||
# pids: List of record IDs
|
||||
# cols: List of columns to select
|
||||
# Returns:
|
||||
# Query of matching records
|
||||
if cols:
|
||||
objs = cls.model.select(*cols)
|
||||
else:
|
||||
@ -130,11 +248,33 @@ class CommonService:
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_id(cls, pid):
|
||||
# Delete a record by ID
|
||||
# Args:
|
||||
# pid: Record ID
|
||||
# Returns:
|
||||
# Number of records deleted
|
||||
return cls.model.delete().where(cls.model.id == pid).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_ids(cls, pids):
|
||||
# Delete multiple records by their IDs
|
||||
# Args:
|
||||
# pids: List of record IDs
|
||||
# Returns:
|
||||
# Number of records deleted
|
||||
with DB.atomic():
|
||||
res = cls.model.delete().where(cls.model.id.in_(pids)).execute()
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_delete(cls, filters):
|
||||
# Delete records matching given filters
|
||||
# Args:
|
||||
# filters: List of filter conditions
|
||||
# Returns:
|
||||
# Number of records deleted
|
||||
with DB.atomic():
|
||||
num = cls.model.delete().where(*filters).execute()
|
||||
return num
|
||||
@ -142,42 +282,51 @@ class CommonService:
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_update(cls, filters, update_data):
|
||||
# Update records matching given filters
|
||||
# Args:
|
||||
# filters: List of filter conditions
|
||||
# update_data: Updated field values
|
||||
# Returns:
|
||||
# Number of records updated
|
||||
with DB.atomic():
|
||||
return cls.model.update(update_data).where(*filters).execute()
|
||||
|
||||
@staticmethod
|
||||
def cut_list(tar_list, n):
|
||||
# Split a list into chunks of size n
|
||||
# Args:
|
||||
# tar_list: List to split
|
||||
# n: Chunk size
|
||||
# Returns:
|
||||
# List of tuples containing chunks
|
||||
length = len(tar_list)
|
||||
arr = range(length)
|
||||
result = [tuple(tar_list[x:(x + n)]) for x in arr[::n]]
|
||||
result = [tuple(tar_list[x : (x + n)]) for x in arr[::n]]
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_scope_list(cls, in_key, in_filters_list,
|
||||
filters=None, cols=None):
|
||||
def filter_scope_list(cls, in_key, in_filters_list, filters=None, cols=None):
|
||||
# Get records matching IN clause filters with optional column selection
|
||||
# Args:
|
||||
# in_key: Field name for IN clause
|
||||
# in_filters_list: List of values for IN clause
|
||||
# filters: Additional filter conditions
|
||||
# cols: List of columns to select
|
||||
# Returns:
|
||||
# List of matching records
|
||||
in_filters_tuple_list = cls.cut_list(in_filters_list, 20)
|
||||
if not filters:
|
||||
filters = []
|
||||
res_list = []
|
||||
if cols:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select(
|
||||
*
|
||||
cols).where(
|
||||
getattr(
|
||||
cls.model,
|
||||
in_key).in_(i),
|
||||
*
|
||||
filters)
|
||||
query_records = cls.model.select(*cols).where(getattr(cls.model, in_key).in_(i), *filters)
|
||||
if query_records:
|
||||
res_list.extend(
|
||||
[query_record for query_record in query_records])
|
||||
res_list.extend([query_record for query_record in query_records])
|
||||
else:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select().where(
|
||||
getattr(cls.model, in_key).in_(i), *filters)
|
||||
query_records = cls.model.select().where(getattr(cls.model, in_key).in_(i), *filters)
|
||||
if query_records:
|
||||
res_list.extend(
|
||||
[query_record for query_record in query_records])
|
||||
res_list.extend([query_record for query_record in query_records])
|
||||
return res_list
|
||||
|
||||
@ -23,6 +23,8 @@ from api.db.services.dialog_service import DialogService, chat
|
||||
from api.utils import get_uuid
|
||||
import json
|
||||
|
||||
from rag.prompts import chunks_format
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
@ -53,18 +55,7 @@ def structure_answer(conv, ans, message_id, session_id):
|
||||
reference = {}
|
||||
ans["reference"] = {}
|
||||
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
|
||||
chunk_list = [{
|
||||
"id": get_value(chunk, "chunk_id", "id"),
|
||||
"content": get_value(chunk, "content", "content_with_weight"),
|
||||
"document_id": get_value(chunk, "doc_id", "document_id"),
|
||||
"document_name": get_value(chunk, "docnm_kwd", "document_name"),
|
||||
"dataset_id": get_value(chunk, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(chunk, "image_id", "img_id"),
|
||||
"positions": get_value(chunk, "positions", "position_int"),
|
||||
} for chunk in reference.get("chunks", [])]
|
||||
chunk_list = chunks_format(reference)
|
||||
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
|
||||
@ -13,48 +13,79 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import binascii
|
||||
import os
|
||||
import json
|
||||
import json_repair
|
||||
import logging
|
||||
import re
|
||||
from collections import defaultdict
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from timeit import default_timer as timer
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
from api.db import LLMType, ParserType, StatusEnum
|
||||
from api.db.db_models import Dialog, DB
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
|
||||
from langfuse import Langfuse
|
||||
|
||||
from agentic_reasoning import DeepResearcher
|
||||
from api import settings
|
||||
from graphrag.utils import get_tags_from_cache, set_tags_to_cache
|
||||
from api.db import LLMType, ParserType, StatusEnum
|
||||
from api.db.db_models import DB, Dialog
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.langfuse_service import TenantLangfuseService
|
||||
from api.db.services.llm_service import LLMBundle, TenantLLMService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp.search import index_name
|
||||
from rag.settings import TAG_FLD
|
||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.prompts import chunks_format, citation_prompt, cross_languages, full_question, kb_prompt, keyword_extraction, llm_id2llm_type, message_fit_in
|
||||
from rag.utils import num_tokens_from_string, rmSpace
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DialogService(CommonService):
|
||||
model = Dialog
|
||||
|
||||
@classmethod
|
||||
def save(cls, **kwargs):
|
||||
"""Save a new record to database.
|
||||
|
||||
This method creates a new record in the database with the provided field values,
|
||||
forcing an insert operation rather than an update.
|
||||
|
||||
Args:
|
||||
**kwargs: Record field values as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance: The created record object.
|
||||
"""
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
def update_many_by_id(cls, data_list):
|
||||
"""Update multiple records by their IDs.
|
||||
|
||||
This method updates multiple records in the database, identified by their IDs.
|
||||
It automatically updates the update_time and update_date fields for each record.
|
||||
|
||||
Args:
|
||||
data_list (list): List of dictionaries containing record data to update.
|
||||
Each dictionary must include an 'id' field.
|
||||
"""
|
||||
with DB.atomic():
|
||||
for data in data_list:
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(data).where(cls.model.id == data["id"]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, tenant_id,
|
||||
page_number, items_per_page, orderby, desc, id, name):
|
||||
def get_list(cls, tenant_id, page_number, items_per_page, orderby, desc, id, name):
|
||||
chats = cls.model.select()
|
||||
if id:
|
||||
chats = chats.where(cls.model.id == id)
|
||||
if name:
|
||||
chats = chats.where(cls.model.name == name)
|
||||
chats = chats.where(
|
||||
(cls.model.tenant_id == tenant_id)
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
chats = chats.where((cls.model.tenant_id == tenant_id) & (cls.model.status == StatusEnum.VALID.value))
|
||||
if desc:
|
||||
chats = chats.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
@ -65,131 +96,65 @@ class DialogService(CommonService):
|
||||
return list(chats.dicts())
|
||||
|
||||
|
||||
def message_fit_in(msg, max_length=4000):
|
||||
def count():
|
||||
nonlocal msg
|
||||
tks_cnts = []
|
||||
for m in msg:
|
||||
tks_cnts.append(
|
||||
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
||||
total = 0
|
||||
for m in tks_cnts:
|
||||
total += m["count"]
|
||||
return total
|
||||
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||
if len(msg) > 1:
|
||||
msg_.append(msg[-1])
|
||||
msg = msg_
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
ll = num_tokens_from_string(msg_[0]["content"])
|
||||
ll2 = num_tokens_from_string(msg_[-1]["content"])
|
||||
if ll / (ll + ll2) > 0.8:
|
||||
m = msg_[0]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||
msg[0]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
m = msg_[1]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
||||
msg[1]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
|
||||
def llm_id2llm_type(llm_id):
|
||||
llm_id, _ = TenantLLMService.split_model_name_and_factory(llm_id)
|
||||
fnm = os.path.join(get_project_base_directory(), "conf")
|
||||
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
||||
for llm_factory in llm_factories["factory_llm_infos"]:
|
||||
for llm in llm_factory["llm"]:
|
||||
if llm_id == llm["llm_name"]:
|
||||
return llm["model_type"].strip(",")[-1]
|
||||
|
||||
|
||||
def kb_prompt(kbinfos, max_tokens):
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
used_token_count = 0
|
||||
chunks_num = 0
|
||||
for i, c in enumerate(knowledges):
|
||||
used_token_count += num_tokens_from_string(c)
|
||||
chunks_num += 1
|
||||
if max_tokens * 0.97 < used_token_count:
|
||||
knowledges = knowledges[:i]
|
||||
break
|
||||
|
||||
docs = DocumentService.get_by_ids([ck["doc_id"] for ck in kbinfos["chunks"][:chunks_num]])
|
||||
docs = {d.id: d.meta_fields for d in docs}
|
||||
|
||||
doc2chunks = defaultdict(lambda: {"chunks": [], "meta": []})
|
||||
for ck in kbinfos["chunks"][:chunks_num]:
|
||||
doc2chunks[ck["docnm_kwd"]]["chunks"].append(ck["content_with_weight"])
|
||||
doc2chunks[ck["docnm_kwd"]]["meta"] = docs.get(ck["doc_id"], {})
|
||||
|
||||
knowledges = []
|
||||
for nm, cks_meta in doc2chunks.items():
|
||||
txt = f"Document: {nm} \n"
|
||||
for k,v in cks_meta["meta"].items():
|
||||
txt += f"{k}: {v}\n"
|
||||
txt += "Relevant fragments as following:\n"
|
||||
for i, chunk in enumerate(cks_meta["chunks"], 1):
|
||||
txt += f"{i}. {chunk}\n"
|
||||
knowledges.append(txt)
|
||||
return knowledges
|
||||
|
||||
|
||||
def label_question(question, kbs):
|
||||
tags = None
|
||||
tag_kb_ids = []
|
||||
for kb in kbs:
|
||||
if kb.parser_config.get("tag_kb_ids"):
|
||||
tag_kb_ids.extend(kb.parser_config["tag_kb_ids"])
|
||||
if tag_kb_ids:
|
||||
all_tags = get_tags_from_cache(tag_kb_ids)
|
||||
if not all_tags:
|
||||
all_tags = settings.retrievaler.all_tags_in_portion(kb.tenant_id, tag_kb_ids)
|
||||
set_tags_to_cache(all_tags, tag_kb_ids)
|
||||
def chat_solo(dialog, messages, stream=True):
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
all_tags = json.loads(all_tags)
|
||||
tag_kbs = KnowledgebaseService.get_by_ids(tag_kb_ids)
|
||||
tags = settings.retrievaler.tag_query(question,
|
||||
list(set([kb.tenant_id for kb in tag_kbs])),
|
||||
tag_kb_ids,
|
||||
all_tags,
|
||||
kb.parser_config.get("topn_tags", 3)
|
||||
)
|
||||
return tags
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
tts_mdl = None
|
||||
if prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"]
|
||||
if stream:
|
||||
last_ans = ""
|
||||
delta_ans = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans) :]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
delta_ans = ""
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
if not dialog.kb_ids:
|
||||
for ans in chat_solo(dialog, messages, stream):
|
||||
yield ans
|
||||
return
|
||||
|
||||
chat_start_ts = timer()
|
||||
|
||||
# Get llm model name and model provider name
|
||||
llm_id, model_provider = TenantLLMService.split_model_name_and_factory(dialog.llm_id)
|
||||
|
||||
# Get llm model instance by model and provide name
|
||||
llm = LLMService.query(llm_name=llm_id) if not model_provider else LLMService.query(llm_name=llm_id, fid=model_provider)
|
||||
|
||||
if not llm:
|
||||
# Model name is provided by tenant, but not system built-in
|
||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not model_provider else \
|
||||
TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id, llm_factory=model_provider)
|
||||
if not llm:
|
||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
||||
max_tokens = 8192
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
max_tokens = llm[0].max_tokens
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
max_tokens = llm_model_config.get("max_tokens", 8192)
|
||||
|
||||
check_llm_ts = timer()
|
||||
|
||||
langfuse_tracer = None
|
||||
langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=dialog.tenant_id)
|
||||
if langfuse_keys:
|
||||
langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
|
||||
if langfuse.auth_check():
|
||||
langfuse_tracer = langfuse
|
||||
langfuse.trace = langfuse_tracer.trace(name=f"{dialog.name}-{llm_model_config['llm_name']}")
|
||||
|
||||
check_langfuse_tracer_ts = timer()
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embedding_list) != 1:
|
||||
@ -204,9 +169,6 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||
if "doc_ids" in messages[-1]:
|
||||
attachments = messages[-1]["doc_ids"]
|
||||
for m in messages[:-1]:
|
||||
if "doc_ids" in m:
|
||||
attachments.extend(m["doc_ids"])
|
||||
|
||||
create_retriever_ts = timer()
|
||||
|
||||
@ -220,6 +182,9 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
toolcall_session, tools = kwargs.get("toolcall_session"), kwargs.get("tools")
|
||||
if toolcall_session and tools:
|
||||
chat_mdl.bind_tools(toolcall_session, tools)
|
||||
|
||||
bind_llm_ts = timer()
|
||||
|
||||
@ -242,14 +207,16 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
if p["key"] not in kwargs and not p["optional"]:
|
||||
raise KeyError("Miss parameter: " + p["key"])
|
||||
if p["key"] not in kwargs:
|
||||
prompt_config["system"] = prompt_config["system"].replace(
|
||||
"{%s}" % p["key"], " ")
|
||||
prompt_config["system"] = prompt_config["system"].replace("{%s}" % p["key"], " ")
|
||||
|
||||
if len(questions) > 1 and prompt_config.get("refine_multiturn"):
|
||||
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
||||
else:
|
||||
questions = questions[-1:]
|
||||
|
||||
if prompt_config.get("cross_languages"):
|
||||
questions = [cross_languages(dialog.tenant_id, dialog.llm_id, questions[0], prompt_config["cross_languages"])]
|
||||
|
||||
refine_question_ts = timer()
|
||||
|
||||
rerank_mdl = None
|
||||
@ -258,9 +225,11 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
|
||||
bind_reranker_ts = timer()
|
||||
generate_keyword_ts = bind_reranker_ts
|
||||
thought = ""
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
knowledges = []
|
||||
else:
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
@ -268,67 +237,136 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
|
||||
kbinfos = retriever.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
||||
knowledges = []
|
||||
if prompt_config.get("reasoning", False):
|
||||
reasoner = DeepResearcher(
|
||||
chat_mdl,
|
||||
prompt_config,
|
||||
partial(retriever.retrieval, embd_mdl=embd_mdl, tenant_ids=tenant_ids, kb_ids=dialog.kb_ids, page=1, page_size=dialog.top_n, similarity_threshold=0.2, vector_similarity_weight=0.3),
|
||||
)
|
||||
|
||||
for think in reasoner.thinking(kbinfos, " ".join(questions)):
|
||||
if isinstance(think, str):
|
||||
thought = think
|
||||
knowledges = [t for t in think.split("\n") if t]
|
||||
elif stream:
|
||||
yield think
|
||||
else:
|
||||
kbinfos = retriever.retrieval(
|
||||
" ".join(questions),
|
||||
embd_mdl,
|
||||
tenant_ids,
|
||||
dialog.kb_ids,
|
||||
1,
|
||||
dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(" ".join(questions), kbs)
|
||||
top=dialog.top_k,
|
||||
aggs=False,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(" ".join(questions), kbs),
|
||||
)
|
||||
if prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(" ".join(questions))
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if prompt_config.get("use_kg"):
|
||||
ck = settings.kg_retrievaler.retrieval(" ".join(questions),
|
||||
tenant_ids,
|
||||
dialog.kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(dialog.tenant_id, LLMType.CHAT))
|
||||
ck = settings.kg_retrievaler.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl, LLMBundle(dialog.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
retrieval_ts = timer()
|
||||
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
logging.debug(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
|
||||
logging.debug("{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
|
||||
retrieval_ts = timer()
|
||||
if not knowledges and prompt_config.get("empty_response"):
|
||||
empty_res = prompt_config["empty_response"]
|
||||
yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
|
||||
yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions), "audio_binary": tts(tts_mdl, empty_res)}
|
||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
|
||||
kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
|
||||
gen_conf = dialog.llm_setting
|
||||
|
||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||
msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
|
||||
for m in messages if m["role"] != "system"])
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||
prompt4citation = ""
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
prompt4citation = citation_prompt()
|
||||
msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"])
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.95))
|
||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||
prompt = msg[0]["content"]
|
||||
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_tokens"] = min(
|
||||
gen_conf["max_tokens"],
|
||||
max_tokens - used_token_count)
|
||||
gen_conf["max_tokens"] = min(gen_conf["max_tokens"], max_tokens - used_token_count)
|
||||
|
||||
def repair_bad_citation_formats(answer: str, kbinfos: dict, idx: set):
|
||||
max_index = len(kbinfos["chunks"])
|
||||
|
||||
def safe_add(i):
|
||||
if 0 <= i < max_index:
|
||||
idx.add(i)
|
||||
return True
|
||||
return False
|
||||
|
||||
def find_and_replace(pattern, group_index=1, repl=lambda i: f"##{i}$$", flags=0):
|
||||
nonlocal answer
|
||||
for match in re.finditer(pattern, answer, flags=flags):
|
||||
try:
|
||||
i = int(match.group(group_index))
|
||||
if safe_add(i):
|
||||
answer = answer.replace(match.group(0), repl(i))
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
find_and_replace(r"\(\s*ID:\s*(\d+)\s*\)") # (ID: 12)
|
||||
find_and_replace(r"ID[: ]+(\d+)") # ID: 12, ID 12
|
||||
find_and_replace(r"\$\$(\d+)\$\$") # $$12$$
|
||||
find_and_replace(r"\$\[(\d+)\]\$") # $[12]$
|
||||
find_and_replace(r"\$\$(\d+)\${2,}") # $$12$$$$
|
||||
find_and_replace(r"\$(\d+)\$") # $12$
|
||||
find_and_replace(r"(#{2,})(\d+)(\${2,})", group_index=2) # 2+ # and 2+ $
|
||||
find_and_replace(r"(#{2,})(\d+)(#{1,})", group_index=2) # 2+ # and 1+ #
|
||||
find_and_replace(r"##(\d+)#{2,}") # ##12###
|
||||
find_and_replace(r"【(\d+)】") # 【12】
|
||||
find_and_replace(r"ref\s*(\d+)", flags=re.IGNORECASE) # ref12, ref 12, REF 12
|
||||
|
||||
return answer, idx
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts
|
||||
|
||||
finish_chat_ts = timer()
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions, langfuse_tracer
|
||||
|
||||
refs = []
|
||||
ans = answer.split("</think>")
|
||||
think = ""
|
||||
if len(ans) == 2:
|
||||
think = ans[0] + "</think>"
|
||||
answer = ans[1]
|
||||
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer, idx = retriever.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
answer = re.sub(r"##[ij]\$\$", "", answer, flags=re.DOTALL)
|
||||
idx = set([])
|
||||
if not re.search(r"##[0-9]+\$\$", answer):
|
||||
answer, idx = retriever.insert_citations(
|
||||
answer,
|
||||
[ck["content_ltks"] for ck in kbinfos["chunks"]],
|
||||
[ck["vector"] for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
vtweight=dialog.vector_similarity_weight,
|
||||
)
|
||||
else:
|
||||
for match in re.finditer(r"##([0-9]+)\$\$", answer):
|
||||
i = int(match.group(1))
|
||||
if i < len(kbinfos["chunks"]):
|
||||
idx.add(i)
|
||||
|
||||
answer, idx = repair_bad_citation_formats(answer, kbinfos, idx)
|
||||
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs:
|
||||
recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
@ -344,7 +382,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
|
||||
total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
|
||||
check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
|
||||
create_retriever_time_cost = (create_retriever_ts - check_llm_ts) * 1000
|
||||
check_langfuse_tracer_cost = (check_langfuse_tracer_ts - check_llm_ts) * 1000
|
||||
create_retriever_time_cost = (create_retriever_ts - check_langfuse_tracer_ts) * 1000
|
||||
bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
|
||||
bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
|
||||
refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
|
||||
@ -353,27 +392,59 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
|
||||
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
|
||||
|
||||
prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms"
|
||||
return {"answer": answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt)}
|
||||
tk_num = num_tokens_from_string(think + answer)
|
||||
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||
prompt = (
|
||||
f"{prompt}\n\n"
|
||||
"## Time elapsed:\n"
|
||||
f" - Total: {total_time_cost:.1f}ms\n"
|
||||
f" - Check LLM: {check_llm_time_cost:.1f}ms\n"
|
||||
f" - Check Langfuse tracer: {check_langfuse_tracer_cost:.1f}ms\n"
|
||||
f" - Create retriever: {create_retriever_time_cost:.1f}ms\n"
|
||||
f" - Bind embedding: {bind_embedding_time_cost:.1f}ms\n"
|
||||
f" - Bind LLM: {bind_llm_time_cost:.1f}ms\n"
|
||||
f" - Multi-turn optimization: {refine_question_time_cost:.1f}ms\n"
|
||||
f" - Bind reranker: {bind_reranker_time_cost:.1f}ms\n"
|
||||
f" - Generate keyword: {generate_keyword_time_cost:.1f}ms\n"
|
||||
f" - Retrieval: {retrieval_time_cost:.1f}ms\n"
|
||||
f" - Generate answer: {generate_result_time_cost:.1f}ms\n\n"
|
||||
"## Token usage:\n"
|
||||
f" - Generated tokens(approximately): {tk_num}\n"
|
||||
f" - Token speed: {int(tk_num / (generate_result_time_cost / 1000.0))}/s"
|
||||
)
|
||||
|
||||
langfuse_output = "\n" + re.sub(r"^.*?(### Query:.*)", r"\1", prompt, flags=re.DOTALL)
|
||||
langfuse_output = {"time_elapsed:": re.sub(r"\n", " \n", langfuse_output), "created_at": time.time()}
|
||||
|
||||
# Add a condition check to call the end method only if langfuse_tracer exists
|
||||
if langfuse_tracer and "langfuse_generation" in locals():
|
||||
langfuse_generation.end(output=langfuse_output)
|
||||
|
||||
return {"answer": think + answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
|
||||
|
||||
if langfuse_tracer:
|
||||
langfuse_generation = langfuse_tracer.trace.generation(name="chat", model=llm_model_config["llm_name"], input={"prompt": prompt, "prompt4citation": prompt4citation, "messages": msg})
|
||||
|
||||
if stream:
|
||||
last_ans = ""
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
|
||||
for ans in chat_mdl.chat_streamly(prompt + prompt4citation, msg[1:], gen_conf):
|
||||
if thought:
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
delta_ans = ans[len(last_ans) :]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
delta_ans = answer[len(last_ans):]
|
||||
yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
delta_ans = answer[len(last_ans) :]
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(answer)
|
||||
yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(thought + answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
||||
logging.debug("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
answer = chat_mdl.chat(prompt + prompt4citation, msg[1:], gen_conf)
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
res = decorate_answer(answer)
|
||||
res["audio_binary"] = tts(tts_mdl, answer)
|
||||
yield res
|
||||
@ -389,26 +460,22 @@ Table of database fields are as follows:
|
||||
Question are as follows:
|
||||
{}
|
||||
Please write the SQL, only SQL, without any other explanations or text.
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
question
|
||||
)
|
||||
""".format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question)
|
||||
tried_times = 0
|
||||
|
||||
def get_table():
|
||||
nonlocal sys_prompt, user_prompt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {
|
||||
"temperature": 0.06})
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {"temperature": 0.06})
|
||||
sql = re.sub(r"^.*</think>", "", sql, flags=re.DOTALL)
|
||||
logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||
sql = re.sub(r" +", " ", sql)
|
||||
sql = re.sub(r"([;;]|```).*", "", sql)
|
||||
if sql[:len("select ")] != "select ":
|
||||
if sql[: len("select ")] != "select ":
|
||||
return None, None
|
||||
if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
|
||||
if sql[:len("select *")] != "select *":
|
||||
if sql[: len("select *")] != "select *":
|
||||
sql = "select doc_id,docnm_kwd," + sql[6:]
|
||||
else:
|
||||
flds = []
|
||||
@ -445,11 +512,7 @@ Please write the SQL, only SQL, without any other explanations or text.
|
||||
{}
|
||||
|
||||
Please correct the error and write SQL again, only SQL, without any other explanations or text.
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
question, sql, tbl["error"]
|
||||
)
|
||||
""".format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question, sql, tbl["error"])
|
||||
tbl, sql = get_table()
|
||||
logging.debug("TRY it again: {}".format(sql))
|
||||
|
||||
@ -457,24 +520,18 @@ Please write the SQL, only SQL, without any other explanations or text.
|
||||
if tbl.get("error") or len(tbl["rows"]) == 0:
|
||||
return None
|
||||
|
||||
docid_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "doc_id"])
|
||||
doc_name_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||
column_idx = [ii for ii in range(
|
||||
len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
|
||||
docid_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "doc_id"])
|
||||
doc_name_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||
column_idx = [ii for ii in range(len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
|
||||
|
||||
# compose Markdown table
|
||||
columns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||
tbl["columns"][i]["name"])) for i in
|
||||
column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
columns = (
|
||||
"|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
)
|
||||
|
||||
line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + \
|
||||
("|------|" if docid_idx and docid_idx else "")
|
||||
line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + ("|------|" if docid_idx and docid_idx else "")
|
||||
|
||||
rows = ["|" +
|
||||
"|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") +
|
||||
"|" for r in tbl["rows"]]
|
||||
rows = ["|" + "|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") + "|" for r in tbl["rows"]]
|
||||
rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
|
||||
if quota:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
@ -484,11 +541,7 @@ Please write the SQL, only SQL, without any other explanations or text.
|
||||
|
||||
if not docid_idx or not doc_name_idx:
|
||||
logging.warning("SQL missing field: " + sql)
|
||||
return {
|
||||
"answer": "\n".join([columns, line, rows]),
|
||||
"reference": {"chunks": [], "doc_aggs": []},
|
||||
"prompt": sys_prompt
|
||||
}
|
||||
return {"answer": "\n".join([columns, line, rows]), "reference": {"chunks": [], "doc_aggs": []}, "prompt": sys_prompt}
|
||||
|
||||
docid_idx = list(docid_idx)[0]
|
||||
doc_name_idx = list(doc_name_idx)[0]
|
||||
@ -499,179 +552,14 @@ Please write the SQL, only SQL, without any other explanations or text.
|
||||
doc_aggs[r[docid_idx]]["count"] += 1
|
||||
return {
|
||||
"answer": "\n".join([columns, line, rows]),
|
||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
|
||||
doc_aggs.items()]},
|
||||
"prompt": sys_prompt
|
||||
"reference": {
|
||||
"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in doc_aggs.items()],
|
||||
},
|
||||
"prompt": sys_prompt,
|
||||
}
|
||||
|
||||
|
||||
def relevant(tenant_id, llm_id, question, contents: list):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
if not contents:
|
||||
return False
|
||||
contents = "Documents: \n" + " - ".join(contents)
|
||||
contents = f"Question: {question}\n" + contents
|
||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||
if ans.lower().find("yes") >= 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def rewrite(tenant_id, llm_id, question):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
||||
return ans
|
||||
|
||||
|
||||
def keyword_extraction(chat_mdl, content, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
Task: extract the most important keywords/phrases of a given piece of text content.
|
||||
Requirements:
|
||||
- Summarize the text content, and give top {topn} important keywords/phrases.
|
||||
- The keywords MUST be in language of the given piece of text content.
|
||||
- The keywords are delimited by ENGLISH COMMA.
|
||||
- Keywords ONLY in output.
|
||||
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0:
|
||||
return ""
|
||||
return kwd
|
||||
|
||||
|
||||
def question_proposal(chat_mdl, content, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
Task: propose {topn} questions about a given piece of text content.
|
||||
Requirements:
|
||||
- Understand and summarize the text content, and propose top {topn} important questions.
|
||||
- The questions SHOULD NOT have overlapping meanings.
|
||||
- The questions SHOULD cover the main content of the text as much as possible.
|
||||
- The questions MUST be in language of the given piece of text content.
|
||||
- One question per line.
|
||||
- Question ONLY in output.
|
||||
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0:
|
||||
return ""
|
||||
return kwd
|
||||
|
||||
|
||||
def full_question(tenant_id, llm_id, messages):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
conv = []
|
||||
for m in messages:
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
today = datetime.date.today().isoformat()
|
||||
yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
|
||||
tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
|
||||
prompt = f"""
|
||||
Role: A helpful assistant
|
||||
|
||||
Task and steps:
|
||||
1. Generate a full user question that would follow the conversation.
|
||||
2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
|
||||
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
|
||||
# Example 1
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
###############
|
||||
Output: What's the name of Donald Trump's mother?
|
||||
|
||||
------------
|
||||
# Example 2
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
User: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
|
||||
------------
|
||||
# Example 3
|
||||
## Conversation
|
||||
USER: What's the weather today in London?
|
||||
ASSISTANT: Cloudy.
|
||||
USER: What's about tomorrow in Rochester?
|
||||
###############
|
||||
Output: What's the weather in Rochester on {tomorrow}?
|
||||
######################
|
||||
|
||||
# Real Data
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
|
||||
return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
|
||||
|
||||
|
||||
def tts(tts_mdl, text):
|
||||
if not tts_mdl or not text:
|
||||
return
|
||||
@ -692,10 +580,7 @@ def ask(question, kb_ids, tenant_id):
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||
max_tokens = chat_mdl.max_length
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids,
|
||||
1, 12, 0.1, 0.3, aggs=False,
|
||||
rank_feature=label_question(question, kbs)
|
||||
)
|
||||
kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False, rank_feature=label_question(question, kbs))
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
prompt = """
|
||||
Role: You're a smart assistant. Your name is Miss R.
|
||||
@ -717,17 +602,9 @@ def ask(question, kb_ids, tenant_id):
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal knowledges, kbinfos, prompt
|
||||
answer, idx = retriever.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
answer, idx = retriever.insert_citations(answer, [ck["content_ltks"] for ck in kbinfos["chunks"]], [ck["vector"] for ck in kbinfos["chunks"]], embd_mdl, tkweight=0.7, vtweight=0.3)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs:
|
||||
recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
@ -738,6 +615,7 @@ def ask(question, kb_ids, tenant_id):
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
refs["chunks"] = chunks_format(refs)
|
||||
return {"answer": answer, "reference": refs}
|
||||
|
||||
answer = ""
|
||||
@ -745,64 +623,3 @@ def ask(question, kb_ids, tenant_id):
|
||||
answer = ans
|
||||
yield {"answer": answer, "reference": {}}
|
||||
yield decorate_answer(answer)
|
||||
|
||||
|
||||
def content_tagging(chat_mdl, content, all_tags, examples, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
|
||||
Task: Tag (put on some labels) to a given piece of text content based on the examples and the entire tag set.
|
||||
|
||||
Steps::
|
||||
- Comprehend the tag/label set.
|
||||
- Comprehend examples which all consist of both text content and assigned tags with relevance score in format of JSON.
|
||||
- Summarize the text content, and tag it with top {topn} most relevant tags from the set of tag/label and the corresponding relevance score.
|
||||
|
||||
Requirements
|
||||
- The tags MUST be from the tag set.
|
||||
- The output MUST be in JSON format only, the key is tag and the value is its relevance score.
|
||||
- The relevance score must be range from 1 to 10.
|
||||
- Keywords ONLY in output.
|
||||
|
||||
# TAG SET
|
||||
{", ".join(all_tags)}
|
||||
|
||||
"""
|
||||
for i, ex in enumerate(examples):
|
||||
prompt += """
|
||||
# Examples {}
|
||||
### Text Content
|
||||
{}
|
||||
|
||||
Output:
|
||||
{}
|
||||
|
||||
""".format(i, ex["content"], json.dumps(ex[TAG_FLD], indent=2, ensure_ascii=False))
|
||||
|
||||
prompt += f"""
|
||||
# Real Data
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.5})
|
||||
if isinstance(kwd, tuple):
|
||||
kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0:
|
||||
raise Exception(kwd)
|
||||
|
||||
try:
|
||||
return json_repair.loads(kwd)
|
||||
except json_repair.JSONDecodeError:
|
||||
try:
|
||||
result = kwd.replace(prompt[:-1], '').replace('user', '').replace('model', '').strip()
|
||||
result = '{' + result.split('{')[1].split('}')[0] + '}'
|
||||
return json_repair.loads(result)
|
||||
except Exception as e:
|
||||
logging.exception(f"JSON parsing error: {result} -> {e}")
|
||||
raise e
|
||||
|
||||
@ -13,9 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import xxhash
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
@ -23,23 +22,22 @@ from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from io import BytesIO
|
||||
|
||||
import trio
|
||||
import xxhash
|
||||
from peewee import fn
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from api import settings
|
||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
|
||||
from api.db import FileType, TaskStatus, ParserType, LLMType
|
||||
from api.db.db_models import DB, Knowledgebase, Tenant, Task, UserTenant
|
||||
from api.db.db_models import Document
|
||||
from api.db import FileType, LLMType, ParserType, StatusEnum, TaskStatus, UserTenantRole
|
||||
from api.db.db_models import DB, Document, Knowledgebase, Task, Tenant, UserTenant
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db import StatusEnum
|
||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||
from rag.nlp import rag_tokenizer, search
|
||||
from rag.settings import get_svr_queue_name
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.utils.doc_store_conn import OrderByExpr
|
||||
|
||||
|
||||
class DocumentService(CommonService):
|
||||
@ -73,7 +71,7 @@ class DocumentService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_kb_id(cls, kb_id, page_number, items_per_page,
|
||||
orderby, desc, keywords):
|
||||
orderby, desc, keywords, run_status, types):
|
||||
if keywords:
|
||||
docs = cls.model.select().where(
|
||||
(cls.model.kb_id == kb_id),
|
||||
@ -81,24 +79,66 @@ class DocumentService(CommonService):
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||
|
||||
if run_status:
|
||||
docs = docs.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
docs = docs.where(cls.model.type.in_(types))
|
||||
|
||||
count = docs.count()
|
||||
if desc:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
|
||||
if page_number and items_per_page:
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(docs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def count_by_kb_id(cls, kb_id, keywords, run_status, types):
|
||||
if keywords:
|
||||
docs = cls.model.select().where(
|
||||
(cls.model.kb_id == kb_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||
|
||||
if run_status:
|
||||
docs = docs.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
docs = docs.where(cls.model.type.in_(types))
|
||||
|
||||
count = docs.count()
|
||||
|
||||
return count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_total_size_by_kb_id(cls, kb_id, keywords="", run_status=[], types=[]):
|
||||
query = cls.model.select(fn.COALESCE(fn.SUM(cls.model.size), 0)).where(
|
||||
cls.model.kb_id == kb_id
|
||||
)
|
||||
|
||||
if keywords:
|
||||
query = query.where(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
if run_status:
|
||||
query = query.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
query = query.where(cls.model.type.in_(types))
|
||||
|
||||
return int(query.scalar()) or 0
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, doc):
|
||||
if not cls.save(**doc):
|
||||
raise RuntimeError("Database error (Document)!")
|
||||
e, kb = KnowledgebaseService.get_by_id(doc["kb_id"])
|
||||
if not KnowledgebaseService.update_by_id(
|
||||
kb.id, {"doc_num": kb.doc_num + 1}):
|
||||
if not KnowledgebaseService.atomic_increase_doc_num_by_id(doc["kb_id"]):
|
||||
raise RuntimeError("Database error (Knowledgebase)!")
|
||||
return Document(**doc)
|
||||
|
||||
@ -108,13 +148,17 @@ class DocumentService(CommonService):
|
||||
cls.clear_chunk_num(doc.id)
|
||||
try:
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "community_report"], "source_id": doc.id},
|
||||
graph_source = settings.docStoreConn.getFields(
|
||||
settings.docStoreConn.search(["source_id"], [], {"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]}, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [doc.kb_id]), ["source_id"]
|
||||
)
|
||||
if len(graph_source) > 0 and doc.id in list(graph_source.values())[0]["source_id"]:
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "source_id": doc.id},
|
||||
{"remove": {"source_id": doc.id}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]},
|
||||
{"removed_kwd": "Y"},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "community_report"], "must_not": {"exists": "source_id"}},
|
||||
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "must_not": {"exists": "source_id"}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
except Exception:
|
||||
pass
|
||||
@ -267,11 +311,18 @@ class DocumentService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible4deletion(cls, doc_id, user_id):
|
||||
docs = cls.model.select(
|
||||
cls.model.id).join(
|
||||
docs = cls.model.select(cls.model.id
|
||||
).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)
|
||||
).where(cls.model.id == doc_id, Knowledgebase.created_by == user_id).paginate(0, 1)
|
||||
).join(
|
||||
UserTenant, on=(
|
||||
(UserTenant.tenant_id == Knowledgebase.created_by) & (UserTenant.user_id == user_id))
|
||||
).where(
|
||||
cls.model.id == doc_id,
|
||||
UserTenant.status == StatusEnum.VALID.value,
|
||||
((UserTenant.role == UserTenantRole.NORMAL) | (UserTenant.role == UserTenantRole.OWNER))
|
||||
).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
@ -326,6 +377,15 @@ class DocumentService(CommonService):
|
||||
return
|
||||
return doc_id[0]["id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_ids_by_doc_names(cls, doc_names):
|
||||
if not doc_names:
|
||||
return []
|
||||
|
||||
query = cls.model.select(cls.model.id).where(cls.model.name.in_(doc_names))
|
||||
return list(query.scalars().iterator())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_thumbnails(cls, docids):
|
||||
@ -336,6 +396,8 @@ class DocumentService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_parser_config(cls, id, config):
|
||||
if not config:
|
||||
return
|
||||
e, d = cls.get_by_id(id)
|
||||
if not e:
|
||||
raise LookupError(f"Document({id}) not found.")
|
||||
@ -373,15 +435,14 @@ class DocumentService(CommonService):
|
||||
"process_begin_at": get_format_time()
|
||||
})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_meta_fields(cls, doc_id, meta_fields):
|
||||
return cls.update_by_id(doc_id, {"meta_fields": meta_fields})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls):
|
||||
MSG = {
|
||||
"raptor": "Start RAPTOR (Recursive Abstractive Processing for Tree-Organized Retrieval).",
|
||||
"graphrag": "Start Graph Extraction",
|
||||
"graph_resolution": "Start Graph Resolution",
|
||||
"graph_community": "Start Graph Community Reports Generation"
|
||||
}
|
||||
docs = cls.get_unfinished_docs()
|
||||
for d in docs:
|
||||
try:
|
||||
@ -392,37 +453,33 @@ class DocumentService(CommonService):
|
||||
prg = 0
|
||||
finished = True
|
||||
bad = 0
|
||||
has_raptor = False
|
||||
has_graphrag = False
|
||||
e, doc = DocumentService.get_by_id(d["id"])
|
||||
status = doc.run # TaskStatus.RUNNING.value
|
||||
priority = 0
|
||||
for t in tsks:
|
||||
if 0 <= t.progress < 1:
|
||||
finished = False
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
if t.progress_msg not in msg:
|
||||
msg.append(t.progress_msg)
|
||||
if t.progress == -1:
|
||||
bad += 1
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
msg.append(t.progress_msg)
|
||||
if t.task_type == "raptor":
|
||||
has_raptor = True
|
||||
elif t.task_type == "graphrag":
|
||||
has_graphrag = True
|
||||
priority = max(priority, t.priority)
|
||||
prg /= len(tsks)
|
||||
if finished and bad:
|
||||
prg = -1
|
||||
status = TaskStatus.FAIL.value
|
||||
elif finished:
|
||||
m = "\n".join(sorted(msg))
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and m.find(MSG["raptor"]) < 0:
|
||||
queue_raptor_o_graphrag_tasks(d, "raptor", MSG["raptor"])
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and not has_raptor:
|
||||
queue_raptor_o_graphrag_tasks(d, "raptor", priority)
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") and m.find(MSG["graphrag"]) < 0:
|
||||
queue_raptor_o_graphrag_tasks(d, "graphrag", MSG["graphrag"])
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") \
|
||||
and d["parser_config"].get("graphrag", {}).get("resolution") \
|
||||
and m.find(MSG["graph_resolution"]) < 0:
|
||||
queue_raptor_o_graphrag_tasks(d, "graph_resolution", MSG["graph_resolution"])
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") \
|
||||
and d["parser_config"].get("graphrag", {}).get("community") \
|
||||
and m.find(MSG["graph_community"]) < 0:
|
||||
queue_raptor_o_graphrag_tasks(d, "graph_community", MSG["graph_community"])
|
||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") and not has_graphrag:
|
||||
queue_raptor_o_graphrag_tasks(d, "graphrag", priority)
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
else:
|
||||
status = TaskStatus.DONE.value
|
||||
@ -459,7 +516,7 @@ class DocumentService(CommonService):
|
||||
return False
|
||||
|
||||
|
||||
def queue_raptor_o_graphrag_tasks(doc, ty, msg):
|
||||
def queue_raptor_o_graphrag_tasks(doc, ty, priority):
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
@ -472,7 +529,8 @@ def queue_raptor_o_graphrag_tasks(doc, ty, msg):
|
||||
"doc_id": doc["id"],
|
||||
"from_page": 100000000,
|
||||
"to_page": 100000000,
|
||||
"progress_msg": datetime.now().strftime("%H:%M:%S") + " " + msg
|
||||
"task_type": ty,
|
||||
"progress_msg": datetime.now().strftime("%H:%M:%S") + " created task " + ty
|
||||
}
|
||||
|
||||
task = new_task()
|
||||
@ -481,18 +539,17 @@ def queue_raptor_o_graphrag_tasks(doc, ty, msg):
|
||||
hasher.update(ty.encode("utf-8"))
|
||||
task["digest"] = hasher.hexdigest()
|
||||
bulk_insert_into_db(Task, [task], True)
|
||||
task["task_type"] = ty
|
||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
||||
assert REDIS_CONN.queue_product(get_svr_queue_name(priority), message=task), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
from rag.app import presentation, picture, naive, audio, email
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.conversation_service import ConversationService
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.conversation_service import ConversationService
|
||||
from rag.app import audio, email, naive, picture, presentation
|
||||
|
||||
e, conv = ConversationService.get_by_id(conversation_id)
|
||||
if not e:
|
||||
@ -500,6 +557,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
assert e, "Conversation not found!"
|
||||
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not dia.kb_ids:
|
||||
raise LookupError("No knowledge base associated with this conversation. "
|
||||
"Please add a knowledge base before uploading documents")
|
||||
kb_id = dia.kb_ids[0]
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
@ -588,10 +648,11 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
cks = [c for c in docs if c["doc_id"] == doc_id]
|
||||
|
||||
if parser_ids[doc_id] != ParserType.PICTURE.value:
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
mindmap = MindMapExtractor(llm_bdl)
|
||||
try:
|
||||
mind_map = json.dumps(mindmap([c["content_with_weight"] for c in docs if c["doc_id"] == doc_id]).output,
|
||||
ensure_ascii=False, indent=2)
|
||||
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in docs if c["doc_id"] == doc_id])
|
||||
mind_map = json.dumps(mind_map.output, ensure_ascii=False, indent=2)
|
||||
if len(mind_map) < 32:
|
||||
raise Exception("Few content: " + mind_map)
|
||||
cks.append({
|
||||
|
||||
@ -14,44 +14,46 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
import os
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from flask_login import current_user
|
||||
from peewee import fn
|
||||
|
||||
from api.db import FileType, KNOWLEDGEBASE_FOLDER_NAME, FileSource, ParserType
|
||||
from api.db.db_models import DB, File2Document, Knowledgebase
|
||||
from api.db.db_models import File, Document
|
||||
from api.db import KNOWLEDGEBASE_FOLDER_NAME, FileSource, FileType, ParserType
|
||||
from api.db.db_models import DB, Document, File, File2Document, Knowledgebase
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.file_utils import filename_type, thumbnail_img
|
||||
from api.utils.file_utils import filename_type, read_potential_broken_pdf, thumbnail_img
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
class FileService(CommonService):
|
||||
# Service class for managing file operations and storage
|
||||
model = File
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_pf_id(cls, tenant_id, pf_id, page_number, items_per_page,
|
||||
orderby, desc, keywords):
|
||||
def get_by_pf_id(cls, tenant_id, pf_id, page_number, items_per_page, orderby, desc, keywords):
|
||||
# Get files by parent folder ID with pagination and filtering
|
||||
# Args:
|
||||
# tenant_id: ID of the tenant
|
||||
# pf_id: Parent folder ID
|
||||
# page_number: Page number for pagination
|
||||
# items_per_page: Number of items per page
|
||||
# orderby: Field to order by
|
||||
# desc: Boolean indicating descending order
|
||||
# keywords: Search keywords
|
||||
# Returns:
|
||||
# Tuple of (file_list, total_count)
|
||||
if keywords:
|
||||
files = cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == pf_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower())),
|
||||
~(cls.model.id == pf_id)
|
||||
)
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == pf_id), (fn.LOWER(cls.model.name).contains(keywords.lower())), ~(cls.model.id == pf_id))
|
||||
else:
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == pf_id),
|
||||
~(cls.model.id == pf_id)
|
||||
)
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == pf_id), ~(cls.model.id == pf_id))
|
||||
count = files.count()
|
||||
if desc:
|
||||
files = files.order_by(cls.model.getter_by(orderby).desc())
|
||||
@ -64,37 +66,54 @@ class FileService(CommonService):
|
||||
for file in res_files:
|
||||
if file["type"] == FileType.FOLDER.value:
|
||||
file["size"] = cls.get_folder_size(file["id"])
|
||||
file['kbs_info'] = []
|
||||
children = list(cls.model.select().where(
|
||||
file["kbs_info"] = []
|
||||
children = list(
|
||||
cls.model.select()
|
||||
.where(
|
||||
(cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == file["id"]),
|
||||
~(cls.model.id == file["id"]),
|
||||
).dicts())
|
||||
)
|
||||
.dicts()
|
||||
)
|
||||
file["has_child_folder"] = any(value["type"] == FileType.FOLDER.value for value in children)
|
||||
continue
|
||||
kbs_info = cls.get_kb_id_by_file_id(file['id'])
|
||||
file['kbs_info'] = kbs_info
|
||||
kbs_info = cls.get_kb_id_by_file_id(file["id"])
|
||||
file["kbs_info"] = kbs_info
|
||||
|
||||
return res_files, count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_id_by_file_id(cls, file_id):
|
||||
kbs = (cls.model.select(*[Knowledgebase.id, Knowledgebase.name])
|
||||
# Get knowledge base IDs associated with a file
|
||||
# Args:
|
||||
# file_id: File ID
|
||||
# Returns:
|
||||
# List of dictionaries containing knowledge base IDs and names
|
||||
kbs = (
|
||||
cls.model.select(*[Knowledgebase.id, Knowledgebase.name])
|
||||
.join(File2Document, on=(File2Document.file_id == file_id))
|
||||
.join(Document, on=(File2Document.document_id == Document.id))
|
||||
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
||||
.where(cls.model.id == file_id))
|
||||
.where(cls.model.id == file_id)
|
||||
)
|
||||
if not kbs:
|
||||
return []
|
||||
kbs_info_list = []
|
||||
for kb in list(kbs.dicts()):
|
||||
kbs_info_list.append({"kb_id": kb['id'], "kb_name": kb['name']})
|
||||
kbs_info_list.append({"kb_id": kb["id"], "kb_name": kb["name"]})
|
||||
return kbs_info_list
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_pf_id_name(cls, id, name):
|
||||
# Get file by parent folder ID and name
|
||||
# Args:
|
||||
# id: Parent folder ID
|
||||
# name: File name
|
||||
# Returns:
|
||||
# File object or None if not found
|
||||
file = cls.model.select().where((cls.model.parent_id == id) & (cls.model.name == name))
|
||||
if file.count():
|
||||
e, file = cls.get_by_id(file[0].id)
|
||||
@ -106,6 +125,14 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_id_list_by_id(cls, id, name, count, res):
|
||||
# Recursively get list of file IDs by traversing folder structure
|
||||
# Args:
|
||||
# id: Starting folder ID
|
||||
# name: List of folder names to traverse
|
||||
# count: Current depth in traversal
|
||||
# res: List to store results
|
||||
# Returns:
|
||||
# List of file IDs
|
||||
if count < len(name):
|
||||
file = cls.get_by_pf_id_name(id, name[count])
|
||||
if file:
|
||||
@ -119,6 +146,12 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_innermost_file_ids(cls, folder_id, result_ids):
|
||||
# Get IDs of all files in the deepest level of folders
|
||||
# Args:
|
||||
# folder_id: Starting folder ID
|
||||
# result_ids: List to store results
|
||||
# Returns:
|
||||
# List of file IDs
|
||||
subfolders = cls.model.select().where(cls.model.parent_id == folder_id)
|
||||
if subfolders.exists():
|
||||
for subfolder in subfolders:
|
||||
@ -130,24 +163,30 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def create_folder(cls, file, parent_id, name, count):
|
||||
# Recursively create folder structure
|
||||
# Args:
|
||||
# file: Current file object
|
||||
# parent_id: Parent folder ID
|
||||
# name: List of folder names to create
|
||||
# count: Current depth in creation
|
||||
# Returns:
|
||||
# Created file object
|
||||
if count > len(name) - 2:
|
||||
return file
|
||||
else:
|
||||
file = cls.insert({
|
||||
"id": get_uuid(),
|
||||
"parent_id": parent_id,
|
||||
"tenant_id": current_user.id,
|
||||
"created_by": current_user.id,
|
||||
"name": name[count],
|
||||
"location": "",
|
||||
"size": 0,
|
||||
"type": FileType.FOLDER.value
|
||||
})
|
||||
file = cls.insert(
|
||||
{"id": get_uuid(), "parent_id": parent_id, "tenant_id": current_user.id, "created_by": current_user.id, "name": name[count], "location": "", "size": 0, "type": FileType.FOLDER.value}
|
||||
)
|
||||
return cls.create_folder(file, file.id, name, count + 1)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def is_parent_folder_exist(cls, parent_id):
|
||||
# Check if parent folder exists
|
||||
# Args:
|
||||
# parent_id: Parent folder ID
|
||||
# Returns:
|
||||
# Boolean indicating if folder exists
|
||||
parent_files = cls.model.select().where(cls.model.id == parent_id)
|
||||
if parent_files.count():
|
||||
return True
|
||||
@ -157,9 +196,12 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_root_folder(cls, tenant_id):
|
||||
for file in cls.model.select().where((cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == cls.model.id)
|
||||
):
|
||||
# Get or create root folder for tenant
|
||||
# Args:
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# Root folder dictionary
|
||||
for file in cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||
return file.to_dict()
|
||||
|
||||
file_id = get_uuid()
|
||||
@ -179,17 +221,29 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_folder(cls, tenant_id):
|
||||
for root in cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||
for folder in cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id), (cls.model.parent_id == root.id),
|
||||
(cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)):
|
||||
# Get knowledge base folder for tenant
|
||||
# Args:
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# Knowledge base folder dictionary
|
||||
for root in cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||
for folder in cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == root.id), (cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)):
|
||||
return folder.to_dict()
|
||||
assert False, "Can't find the KB folder. Database init error."
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def new_a_file_from_kb(cls, tenant_id, name, parent_id, ty=FileType.FOLDER.value, size=0, location=""):
|
||||
# Create a new file from knowledge base
|
||||
# Args:
|
||||
# tenant_id: Tenant ID
|
||||
# name: File name
|
||||
# parent_id: Parent folder ID
|
||||
# ty: File type
|
||||
# size: File size
|
||||
# location: File location
|
||||
# Returns:
|
||||
# Created file dictionary
|
||||
for file in cls.query(tenant_id=tenant_id, parent_id=parent_id, name=name):
|
||||
return file.to_dict()
|
||||
file = {
|
||||
@ -201,7 +255,7 @@ class FileService(CommonService):
|
||||
"type": ty,
|
||||
"size": size,
|
||||
"location": location,
|
||||
"source_type": FileSource.KNOWLEDGEBASE
|
||||
"source_type": FileSource.KNOWLEDGEBASE,
|
||||
}
|
||||
cls.save(**file)
|
||||
return file
|
||||
@ -209,12 +263,15 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def init_knowledgebase_docs(cls, root_id, tenant_id):
|
||||
for _ in cls.model.select().where((cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)\
|
||||
& (cls.model.parent_id == root_id)):
|
||||
# Initialize knowledge base documents
|
||||
# Args:
|
||||
# root_id: Root folder ID
|
||||
# tenant_id: Tenant ID
|
||||
for _ in cls.model.select().where((cls.model.name == KNOWLEDGEBASE_FOLDER_NAME) & (cls.model.parent_id == root_id)):
|
||||
return
|
||||
folder = cls.new_a_file_from_kb(tenant_id, KNOWLEDGEBASE_FOLDER_NAME, root_id)
|
||||
|
||||
for kb in Knowledgebase.select(*[Knowledgebase.id, Knowledgebase.name]).where(Knowledgebase.tenant_id==tenant_id):
|
||||
for kb in Knowledgebase.select(*[Knowledgebase.id, Knowledgebase.name]).where(Knowledgebase.tenant_id == tenant_id):
|
||||
kb_folder = cls.new_a_file_from_kb(tenant_id, kb.name, folder["id"])
|
||||
for doc in DocumentService.query(kb_id=kb.id):
|
||||
FileService.add_file_from_kb(doc.to_dict(), kb_folder["id"], tenant_id)
|
||||
@ -222,6 +279,11 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_parent_folder(cls, file_id):
|
||||
# Get parent folder of a file
|
||||
# Args:
|
||||
# file_id: File ID
|
||||
# Returns:
|
||||
# Parent folder object
|
||||
file = cls.model.select().where(cls.model.id == file_id)
|
||||
if file.count():
|
||||
e, file = cls.get_by_id(file[0].parent_id)
|
||||
@ -234,6 +296,11 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_parent_folders(cls, start_id):
|
||||
# Get all parent folders in path
|
||||
# Args:
|
||||
# start_id: Starting file ID
|
||||
# Returns:
|
||||
# List of parent folder objects
|
||||
parent_folders = []
|
||||
current_id = start_id
|
||||
while current_id:
|
||||
@ -249,6 +316,11 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, file):
|
||||
# Insert a new file record
|
||||
# Args:
|
||||
# file: File data dictionary
|
||||
# Returns:
|
||||
# Created file object
|
||||
if not cls.save(**file):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
return File(**file)
|
||||
@ -256,6 +328,7 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete(cls, file):
|
||||
#
|
||||
return cls.delete_by_id(file.id)
|
||||
|
||||
@classmethod
|
||||
@ -267,12 +340,10 @@ class FileService(CommonService):
|
||||
@DB.connection_context()
|
||||
def delete_folder_by_pf_id(cls, user_id, folder_id):
|
||||
try:
|
||||
files = cls.model.select().where((cls.model.tenant_id == user_id)
|
||||
& (cls.model.parent_id == folder_id))
|
||||
files = cls.model.select().where((cls.model.tenant_id == user_id) & (cls.model.parent_id == folder_id))
|
||||
for file in files:
|
||||
cls.delete_folder_by_pf_id(user_id, file.id)
|
||||
return cls.model.delete().where((cls.model.tenant_id == user_id)
|
||||
& (cls.model.id == folder_id)).execute(),
|
||||
return (cls.model.delete().where((cls.model.tenant_id == user_id) & (cls.model.id == folder_id)).execute(),)
|
||||
except Exception:
|
||||
logging.exception("delete_folder_by_pf_id")
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
@ -290,8 +361,7 @@ class FileService(CommonService):
|
||||
|
||||
def dfs(parent_id):
|
||||
nonlocal size
|
||||
for f in cls.model.select(*[cls.model.id, cls.model.size, cls.model.type]).where(
|
||||
cls.model.parent_id == parent_id, cls.model.id != parent_id):
|
||||
for f in cls.model.select(*[cls.model.id, cls.model.size, cls.model.type]).where(cls.model.parent_id == parent_id, cls.model.id != parent_id):
|
||||
size += f.size
|
||||
if f.type == FileType.FOLDER.value:
|
||||
dfs(f.id)
|
||||
@ -313,7 +383,7 @@ class FileService(CommonService):
|
||||
"type": doc["type"],
|
||||
"size": doc["size"],
|
||||
"location": doc["location"],
|
||||
"source_type": FileSource.KNOWLEDGEBASE
|
||||
"source_type": FileSource.KNOWLEDGEBASE,
|
||||
}
|
||||
cls.save(**file)
|
||||
File2DocumentService.save(**{"id": get_uuid(), "file_id": file["id"], "document_id": doc["id"]})
|
||||
@ -322,7 +392,7 @@ class FileService(CommonService):
|
||||
@DB.connection_context()
|
||||
def move_file(cls, file_ids, folder_id):
|
||||
try:
|
||||
cls.filter_update((cls.model.id << file_ids, ), { 'parent_id': folder_id })
|
||||
cls.filter_update((cls.model.id << file_ids,), {"parent_id": folder_id})
|
||||
except Exception:
|
||||
logging.exception("move_file")
|
||||
raise RuntimeError("Database error (File move)!")
|
||||
@ -339,16 +409,13 @@ class FileService(CommonService):
|
||||
err, files = [], []
|
||||
for file in file_objs:
|
||||
try:
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get("MAX_FILE_NUM_PER_USER", 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
raise RuntimeError("Exceed the maximum file number of a free user!")
|
||||
if len(file.filename) >= 128:
|
||||
if len(file.filename.encode("utf-8")) >= 128:
|
||||
raise RuntimeError("Exceed the maximum length of file name!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=kb.id)
|
||||
filename = duplicate_name(DocumentService.query, name=file.filename, kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
@ -356,15 +423,18 @@ class FileService(CommonService):
|
||||
location = filename
|
||||
while STORAGE_IMPL.obj_exist(kb.id, location):
|
||||
location += "_"
|
||||
|
||||
blob = file.read()
|
||||
if filetype == FileType.PDF.value:
|
||||
blob = read_potential_broken_pdf(blob)
|
||||
STORAGE_IMPL.put(kb.id, location, blob)
|
||||
|
||||
doc_id = get_uuid()
|
||||
|
||||
img = thumbnail_img(filename, blob)
|
||||
thumbnail_location = ''
|
||||
thumbnail_location = ""
|
||||
if img is not None:
|
||||
thumbnail_location = f'thumbnail_{doc_id}.png'
|
||||
thumbnail_location = f"thumbnail_{doc_id}.png"
|
||||
STORAGE_IMPL.put(kb.id, thumbnail_location, img)
|
||||
|
||||
doc = {
|
||||
@ -377,7 +447,7 @@ class FileService(CommonService):
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail_location
|
||||
"thumbnail": thumbnail_location,
|
||||
}
|
||||
DocumentService.insert(doc)
|
||||
|
||||
@ -390,29 +460,17 @@ class FileService(CommonService):
|
||||
|
||||
@staticmethod
|
||||
def parse_docs(file_objs, user_id):
|
||||
from rag.app import presentation, picture, naive, audio, email
|
||||
from rag.app import audio, email, naive, picture, presentation
|
||||
|
||||
def dummy(prog=None, msg=""):
|
||||
pass
|
||||
|
||||
FACTORY = {
|
||||
ParserType.PRESENTATION.value: presentation,
|
||||
ParserType.PICTURE.value: picture,
|
||||
ParserType.AUDIO.value: audio,
|
||||
ParserType.EMAIL.value: email
|
||||
}
|
||||
FACTORY = {ParserType.PRESENTATION.value: presentation, ParserType.PICTURE.value: picture, ParserType.AUDIO.value: audio, ParserType.EMAIL.value: email}
|
||||
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": "Plain Text"}
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
for file in file_objs:
|
||||
kwargs = {
|
||||
"lang": "English",
|
||||
"callback": dummy,
|
||||
"parser_config": parser_config,
|
||||
"from_page": 0,
|
||||
"to_page": 100000,
|
||||
"tenant_id": user_id
|
||||
}
|
||||
kwargs = {"lang": "English", "callback": dummy, "parser_config": parser_config, "from_page": 0, "to_page": 100000, "tenant_id": user_id}
|
||||
filetype = filename_type(file.filename)
|
||||
blob = file.read()
|
||||
threads.append(exe.submit(FACTORY.get(FileService.get_parser(filetype, file.filename, ""), naive).chunk, file.filename, blob, **kwargs))
|
||||
@ -434,3 +492,4 @@ class FileService(CommonService):
|
||||
if re.search(r"\.(eml)$", filename):
|
||||
return ParserType.EMAIL.value
|
||||
return default
|
||||
|
||||
|
||||
@ -13,22 +13,115 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from api.db import StatusEnum, TenantPermission
|
||||
from api.db.db_models import Knowledgebase, DB, Tenant, User, UserTenant,Document
|
||||
from api.db.services.common_service import CommonService
|
||||
from datetime import datetime
|
||||
|
||||
from peewee import fn
|
||||
|
||||
from api.db import StatusEnum, TenantPermission
|
||||
from api.db.db_models import DB, Document, Knowledgebase, Tenant, User, UserTenant
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class KnowledgebaseService(CommonService):
|
||||
"""Service class for managing knowledge base operations.
|
||||
|
||||
This class extends CommonService to provide specialized functionality for knowledge base
|
||||
management, including document parsing status tracking, access control, and configuration
|
||||
management. It handles operations such as listing, creating, updating, and deleting
|
||||
knowledge bases, as well as managing their associated documents and permissions.
|
||||
|
||||
The class implements a comprehensive set of methods for:
|
||||
- Document parsing status verification
|
||||
- Knowledge base access control
|
||||
- Parser configuration management
|
||||
- Tenant-based knowledge base organization
|
||||
|
||||
Attributes:
|
||||
model: The Knowledgebase model class for database operations.
|
||||
"""
|
||||
model = Knowledgebase
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def list_documents_by_ids(cls,kb_ids):
|
||||
doc_ids=cls.model.select(Document.id.alias("document_id")).join(Document,on=(cls.model.id == Document.kb_id)).where(
|
||||
def accessible4deletion(cls, kb_id, user_id):
|
||||
"""Check if a knowledge base can be deleted by a specific user.
|
||||
|
||||
This method verifies whether a user has permission to delete a knowledge base
|
||||
by checking if they are the creator of that knowledge base.
|
||||
|
||||
Args:
|
||||
kb_id (str): The unique identifier of the knowledge base to check.
|
||||
user_id (str): The unique identifier of the user attempting the deletion.
|
||||
|
||||
Returns:
|
||||
bool: True if the user has permission to delete the knowledge base,
|
||||
False if the user doesn't have permission or the knowledge base doesn't exist.
|
||||
|
||||
Example:
|
||||
>>> KnowledgebaseService.accessible4deletion("kb123", "user456")
|
||||
True
|
||||
|
||||
Note:
|
||||
- This method only checks creator permissions
|
||||
- A return value of False can mean either:
|
||||
1. The knowledge base doesn't exist
|
||||
2. The user is not the creator of the knowledge base
|
||||
"""
|
||||
# Check if a knowledge base can be deleted by a user
|
||||
docs = cls.model.select(
|
||||
cls.model.id).where(cls.model.id == kb_id, cls.model.created_by == user_id).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def is_parsed_done(cls, kb_id):
|
||||
# Check if all documents in the knowledge base have completed parsing
|
||||
#
|
||||
# Args:
|
||||
# kb_id: Knowledge base ID
|
||||
#
|
||||
# Returns:
|
||||
# If all documents are parsed successfully, returns (True, None)
|
||||
# If any document is not fully parsed, returns (False, error_message)
|
||||
from api.db import TaskStatus
|
||||
from api.db.services.document_service import DocumentService
|
||||
|
||||
# Get knowledge base information
|
||||
kbs = cls.query(id=kb_id)
|
||||
if not kbs:
|
||||
return False, "Knowledge base not found"
|
||||
kb = kbs[0]
|
||||
|
||||
# Get all documents in the knowledge base
|
||||
docs, _ = DocumentService.get_by_kb_id(kb_id, 1, 1000, "create_time", True, "", [], [])
|
||||
|
||||
# Check parsing status of each document
|
||||
for doc in docs:
|
||||
# If document is being parsed, don't allow chat creation
|
||||
if doc['run'] == TaskStatus.RUNNING.value or doc['run'] == TaskStatus.CANCEL.value or doc['run'] == TaskStatus.FAIL.value:
|
||||
return False, f"Document '{doc['name']}' in dataset '{kb.name}' is still being parsed. Please wait until all documents are parsed before starting a chat."
|
||||
# If document is not yet parsed and has no chunks, don't allow chat creation
|
||||
if doc['run'] == TaskStatus.UNSTART.value and doc['chunk_num'] == 0:
|
||||
return False, f"Document '{doc['name']}' in dataset '{kb.name}' has not been parsed yet. Please parse all documents before starting a chat."
|
||||
|
||||
return True, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def list_documents_by_ids(cls, kb_ids):
|
||||
# Get document IDs associated with given knowledge base IDs
|
||||
# Args:
|
||||
# kb_ids: List of knowledge base IDs
|
||||
# Returns:
|
||||
# List of document IDs
|
||||
doc_ids = cls.model.select(Document.id.alias("document_id")).join(Document, on=(cls.model.id == Document.kb_id)).where(
|
||||
cls.model.id.in_(kb_ids)
|
||||
)
|
||||
doc_ids =list(doc_ids.dicts())
|
||||
doc_ids = list(doc_ids.dicts())
|
||||
doc_ids = [doc["document_id"] for doc in doc_ids]
|
||||
return doc_ids
|
||||
|
||||
@ -39,12 +132,25 @@ class KnowledgebaseService(CommonService):
|
||||
orderby, desc, keywords,
|
||||
parser_id=None
|
||||
):
|
||||
# Get knowledge bases by tenant IDs with pagination and filtering
|
||||
# Args:
|
||||
# joined_tenant_ids: List of tenant IDs
|
||||
# user_id: Current user ID
|
||||
# page_number: Page number for pagination
|
||||
# items_per_page: Number of items per page
|
||||
# orderby: Field to order by
|
||||
# desc: Boolean indicating descending order
|
||||
# keywords: Search keywords
|
||||
# parser_id: Optional parser ID filter
|
||||
# Returns:
|
||||
# Tuple of (knowledge_base_list, total_count)
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.name,
|
||||
cls.model.language,
|
||||
cls.model.description,
|
||||
cls.model.tenant_id,
|
||||
cls.model.permission,
|
||||
cls.model.doc_num,
|
||||
cls.model.token_num,
|
||||
@ -79,6 +185,7 @@ class KnowledgebaseService(CommonService):
|
||||
|
||||
count = kbs.count()
|
||||
|
||||
if page_number and items_per_page:
|
||||
kbs = kbs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(kbs.dicts()), count
|
||||
@ -86,6 +193,11 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_ids(cls, tenant_id):
|
||||
# Get all knowledge base IDs for a tenant
|
||||
# Args:
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# List of knowledge base IDs
|
||||
fields = [
|
||||
cls.model.id,
|
||||
]
|
||||
@ -96,9 +208,13 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_detail(cls, kb_id):
|
||||
# Get detailed information about a knowledge base
|
||||
# Args:
|
||||
# kb_id: Knowledge base ID
|
||||
# Returns:
|
||||
# Dictionary containing knowledge base details
|
||||
fields = [
|
||||
cls.model.id,
|
||||
# Tenant.embd_id,
|
||||
cls.model.embd_id,
|
||||
cls.model.avatar,
|
||||
cls.model.name,
|
||||
@ -110,7 +226,10 @@ class KnowledgebaseService(CommonService):
|
||||
cls.model.chunk_num,
|
||||
cls.model.parser_id,
|
||||
cls.model.parser_config,
|
||||
cls.model.pagerank]
|
||||
cls.model.pagerank,
|
||||
cls.model.create_time,
|
||||
cls.model.update_time
|
||||
]
|
||||
kbs = cls.model.select(*fields).join(Tenant, on=(
|
||||
(Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value))).where(
|
||||
(cls.model.id == kb_id),
|
||||
@ -119,17 +238,21 @@ class KnowledgebaseService(CommonService):
|
||||
if not kbs:
|
||||
return
|
||||
d = kbs[0].to_dict()
|
||||
# d["embd_id"] = kbs[0].tenant.embd_id
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_parser_config(cls, id, config):
|
||||
# Update parser configuration for a knowledge base
|
||||
# Args:
|
||||
# id: Knowledge base ID
|
||||
# config: New parser configuration
|
||||
e, m = cls.get_by_id(id)
|
||||
if not e:
|
||||
raise LookupError(f"knowledgebase({id}) not found.")
|
||||
|
||||
def dfs_update(old, new):
|
||||
# Deep update of nested configuration
|
||||
for k, v in new.items():
|
||||
if k not in old:
|
||||
old[k] = v
|
||||
@ -146,9 +269,24 @@ class KnowledgebaseService(CommonService):
|
||||
dfs_update(m.parser_config, config)
|
||||
cls.update_by_id(id, {"parser_config": m.parser_config})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_field_map(cls, id):
|
||||
e, m = cls.get_by_id(id)
|
||||
if not e:
|
||||
raise LookupError(f"knowledgebase({id}) not found.")
|
||||
|
||||
m.parser_config.pop("field_map", None)
|
||||
cls.update_by_id(id, {"parser_config": m.parser_config})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_field_map(cls, ids):
|
||||
# Get field mappings for knowledge bases
|
||||
# Args:
|
||||
# ids: List of knowledge base IDs
|
||||
# Returns:
|
||||
# Dictionary of field mappings
|
||||
conf = {}
|
||||
for k in cls.get_by_ids(ids):
|
||||
if k.parser_config and "field_map" in k.parser_config:
|
||||
@ -158,6 +296,12 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_name(cls, kb_name, tenant_id):
|
||||
# Get knowledge base by name and tenant ID
|
||||
# Args:
|
||||
# kb_name: Knowledge base name
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# Tuple of (exists, knowledge_base)
|
||||
kb = cls.model.select().where(
|
||||
(cls.model.name == kb_name)
|
||||
& (cls.model.tenant_id == tenant_id)
|
||||
@ -170,12 +314,27 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_ids(cls):
|
||||
# Get all knowledge base IDs
|
||||
# Returns:
|
||||
# List of all knowledge base IDs
|
||||
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, joined_tenant_ids, user_id,
|
||||
page_number, items_per_page, orderby, desc, id, name):
|
||||
# Get list of knowledge bases with filtering and pagination
|
||||
# Args:
|
||||
# joined_tenant_ids: List of tenant IDs
|
||||
# user_id: Current user ID
|
||||
# page_number: Page number for pagination
|
||||
# items_per_page: Number of items per page
|
||||
# orderby: Field to order by
|
||||
# desc: Boolean indicating descending order
|
||||
# id: Optional ID filter
|
||||
# name: Optional name filter
|
||||
# Returns:
|
||||
# List of knowledge bases
|
||||
kbs = cls.model.select()
|
||||
if id:
|
||||
kbs = kbs.where(cls.model.id == id)
|
||||
@ -199,6 +358,12 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible(cls, kb_id, user_id):
|
||||
# Check if a knowledge base is accessible by a user
|
||||
# Args:
|
||||
# kb_id: Knowledge base ID
|
||||
# user_id: User ID
|
||||
# Returns:
|
||||
# Boolean indicating accessibility
|
||||
docs = cls.model.select(
|
||||
cls.model.id).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
@ -210,6 +375,12 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_by_id(cls, kb_id, user_id):
|
||||
# Get knowledge base by ID and user ID
|
||||
# Args:
|
||||
# kb_id: Knowledge base ID
|
||||
# user_id: User ID
|
||||
# Returns:
|
||||
# List containing knowledge base information
|
||||
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
kbs = kbs.dicts()
|
||||
@ -218,6 +389,12 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_by_name(cls, kb_name, user_id):
|
||||
# Get knowledge base by name and user ID
|
||||
# Args:
|
||||
# kb_name: Knowledge base name
|
||||
# user_id: User ID
|
||||
# Returns:
|
||||
# List containing knowledge base information
|
||||
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.name == kb_name, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
kbs = kbs.dicts()
|
||||
@ -225,11 +402,37 @@ class KnowledgebaseService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible4deletion(cls, kb_id, user_id):
|
||||
docs = cls.model.select(
|
||||
cls.model.id).where(cls.model.id == kb_id, cls.model.created_by == user_id).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
return True
|
||||
def atomic_increase_doc_num_by_id(cls, kb_id):
|
||||
data = {}
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
data["doc_num"] = cls.model.doc_num + 1
|
||||
num = cls.model.update(data).where(cls.model.id == kb_id).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_document_number_in_init(cls, kb_id, doc_num):
|
||||
"""
|
||||
Only use this function when init system
|
||||
"""
|
||||
ok, kb = cls.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return
|
||||
kb.doc_num = doc_num
|
||||
|
||||
dirty_fields = kb.dirty_fields
|
||||
if cls.model._meta.combined.get("update_time") in dirty_fields:
|
||||
dirty_fields.remove(cls.model._meta.combined["update_time"])
|
||||
|
||||
if cls.model._meta.combined.get("update_date") in dirty_fields:
|
||||
dirty_fields.remove(cls.model._meta.combined["update_date"])
|
||||
|
||||
try:
|
||||
kb.save(only=dirty_fields)
|
||||
except ValueError as e:
|
||||
if str(e) == "no data to save!":
|
||||
pass # that's OK
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
71
api/db/services/langfuse_service.py
Normal file
71
api/db/services/langfuse_service.py
Normal file
@ -0,0 +1,71 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import peewee
|
||||
|
||||
from api.db.db_models import DB, TenantLangfuse
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class TenantLangfuseService(CommonService):
|
||||
"""
|
||||
All methods that modify the status should be enclosed within a DB.atomic() context to ensure atomicity
|
||||
and maintain data integrity in case of errors during execution.
|
||||
"""
|
||||
|
||||
model = TenantLangfuse
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_tenant(cls, tenant_id):
|
||||
fields = [cls.model.tenant_id, cls.model.host, cls.model.secret_key, cls.model.public_key]
|
||||
try:
|
||||
keys = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id).first()
|
||||
return keys
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_tenant_with_info(cls, tenant_id):
|
||||
fields = [cls.model.tenant_id, cls.model.host, cls.model.secret_key, cls.model.public_key]
|
||||
try:
|
||||
keys = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id).dicts().first()
|
||||
return keys
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def update_by_tenant(cls, tenant_id, langfuse_keys):
|
||||
langfuse_keys["update_time"] = current_timestamp()
|
||||
langfuse_keys["update_date"] = datetime_format(datetime.now())
|
||||
return cls.model.update(**langfuse_keys).where(cls.model.tenant_id == tenant_id).execute()
|
||||
|
||||
@classmethod
|
||||
def save(cls, **kwargs):
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
kwargs["create_date"] = datetime_format(datetime.now())
|
||||
kwargs["update_time"] = current_timestamp()
|
||||
kwargs["update_date"] = datetime_format(datetime.now())
|
||||
obj = cls.model.create(**kwargs)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def delete_model(cls, langfuse_model):
|
||||
langfuse_model.delete_instance()
|
||||
@ -13,17 +13,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.llm import EmbeddingModel, CvModel, ChatModel, RerankModel, Seq2txtModel, TTSModel
|
||||
from langfuse import Langfuse
|
||||
|
||||
from api import settings
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import DB
|
||||
from api.db.db_models import LLMFactories, LLM, TenantLLM
|
||||
from api.db.db_models import DB, LLM, LLMFactories, TenantLLM
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.langfuse_service import TenantLangfuseService
|
||||
from api.db.services.user_service import TenantService
|
||||
from rag.llm import ChatModel, CvModel, EmbeddingModel, RerankModel, Seq2txtModel, TTSModel
|
||||
|
||||
|
||||
class LLMFactoriesService(CommonService):
|
||||
@ -52,16 +52,8 @@ class TenantLLMService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_my_llms(cls, tenant_id):
|
||||
fields = [
|
||||
cls.model.llm_factory,
|
||||
LLMFactories.logo,
|
||||
LLMFactories.tags,
|
||||
cls.model.model_type,
|
||||
cls.model.llm_name,
|
||||
cls.model.used_tokens
|
||||
]
|
||||
objs = cls.model.select(*fields).join(LLMFactories, on=(cls.model.llm_factory == LLMFactories.name)).where(
|
||||
cls.model.tenant_id == tenant_id, ~cls.model.api_key.is_null()).dicts()
|
||||
fields = [cls.model.llm_factory, LLMFactories.logo, LLMFactories.tags, cls.model.model_type, cls.model.llm_name, cls.model.used_tokens]
|
||||
objs = cls.model.select(*fields).join(LLMFactories, on=(cls.model.llm_factory == LLMFactories.name)).where(cls.model.tenant_id == tenant_id, ~cls.model.api_key.is_null()).dicts()
|
||||
|
||||
return list(objs)
|
||||
|
||||
@ -75,7 +67,7 @@ class TenantLLMService(CommonService):
|
||||
|
||||
# model name must be xxx@yyy
|
||||
try:
|
||||
model_factories = json.load(open(os.path.join(get_project_base_directory(), "conf/llm_factories.json"), "r"))["factory_llm_infos"]
|
||||
model_factories = settings.FACTORY_LLM_INFOS
|
||||
model_providers = set([f["name"] for f in model_factories])
|
||||
if arr[-1] not in model_providers:
|
||||
return model_name, None
|
||||
@ -86,8 +78,7 @@ class TenantLLMService(CommonService):
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type,
|
||||
llm_name=None, lang="Chinese"):
|
||||
def get_model_config(cls, tenant_id, llm_type, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
@ -109,8 +100,15 @@ class TenantLLMService(CommonService):
|
||||
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
if not model_config: # for some cases seems fid mismatch
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
if model_config:
|
||||
model_config = model_config.to_dict()
|
||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||
if not llm and fid: # for some cases seems fid mismatch
|
||||
llm = LLMService.query(llm_name=mdlnm)
|
||||
if llm:
|
||||
model_config["is_tools"] = llm[0].is_tools
|
||||
if not model_config:
|
||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||
@ -118,47 +116,41 @@ class TenantLLMService(CommonService):
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key": "", "llm_name": mdlnm, "api_base": ""}
|
||||
if not model_config:
|
||||
if mdlnm == "flag-embedding":
|
||||
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "",
|
||||
"llm_name": llm_name, "api_base": ""}
|
||||
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "", "llm_name": llm_name, "api_base": ""}
|
||||
else:
|
||||
if not mdlnm:
|
||||
raise LookupError(f"Type of {llm_type} model is not set.")
|
||||
raise LookupError("Model({}) not authorized".format(mdlnm))
|
||||
return model_config
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type, llm_name=None, lang="Chinese"):
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
if model_config["llm_factory"] not in EmbeddingModel:
|
||||
return
|
||||
return EmbeddingModel[model_config["llm_factory"]](
|
||||
model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
return EmbeddingModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
|
||||
if llm_type == LLMType.RERANK:
|
||||
if model_config["llm_factory"] not in RerankModel:
|
||||
return
|
||||
return RerankModel[model_config["llm_factory"]](
|
||||
model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
return RerankModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
|
||||
if llm_type == LLMType.IMAGE2TEXT.value:
|
||||
if model_config["llm_factory"] not in CvModel:
|
||||
return
|
||||
return CvModel[model_config["llm_factory"]](
|
||||
model_config["api_key"], model_config["llm_name"], lang,
|
||||
base_url=model_config["api_base"]
|
||||
)
|
||||
return CvModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], lang, base_url=model_config["api_base"])
|
||||
|
||||
if llm_type == LLMType.CHAT.value:
|
||||
if model_config["llm_factory"] not in ChatModel:
|
||||
return
|
||||
return ChatModel[model_config["llm_factory"]](
|
||||
model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
return ChatModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
|
||||
if llm_type == LLMType.SPEECH2TEXT:
|
||||
if model_config["llm_factory"] not in Seq2txtModel:
|
||||
return
|
||||
return Seq2txtModel[model_config["llm_factory"]](
|
||||
key=model_config["api_key"], model_name=model_config["llm_name"],
|
||||
lang=lang,
|
||||
base_url=model_config["api_base"]
|
||||
)
|
||||
return Seq2txtModel[model_config["llm_factory"]](key=model_config["api_key"], model_name=model_config["llm_name"], lang=lang, base_url=model_config["api_base"])
|
||||
if llm_type == LLMType.TTS:
|
||||
if model_config["llm_factory"] not in TTSModel:
|
||||
return
|
||||
@ -173,133 +165,218 @@ class TenantLLMService(CommonService):
|
||||
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
logging.error(f"Tenant not found: {tenant_id}")
|
||||
return 0
|
||||
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
mdlnm = tenant.embd_id
|
||||
elif llm_type == LLMType.SPEECH2TEXT.value:
|
||||
mdlnm = tenant.asr_id
|
||||
elif llm_type == LLMType.IMAGE2TEXT.value:
|
||||
mdlnm = tenant.img2txt_id
|
||||
elif llm_type == LLMType.CHAT.value:
|
||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.RERANK:
|
||||
mdlnm = tenant.rerank_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.TTS:
|
||||
mdlnm = tenant.tts_id if not llm_name else llm_name
|
||||
else:
|
||||
assert False, "LLM type error"
|
||||
llm_map = {
|
||||
LLMType.EMBEDDING.value: tenant.embd_id,
|
||||
LLMType.SPEECH2TEXT.value: tenant.asr_id,
|
||||
LLMType.IMAGE2TEXT.value: tenant.img2txt_id,
|
||||
LLMType.CHAT.value: tenant.llm_id if not llm_name else llm_name,
|
||||
LLMType.RERANK.value: tenant.rerank_id if not llm_name else llm_name,
|
||||
LLMType.TTS.value: tenant.tts_id if not llm_name else llm_name,
|
||||
}
|
||||
|
||||
mdlnm = llm_map.get(llm_type)
|
||||
if mdlnm is None:
|
||||
logging.error(f"LLM type error: {llm_type}")
|
||||
return 0
|
||||
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
|
||||
num = 0
|
||||
try:
|
||||
if llm_factory:
|
||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory)
|
||||
else:
|
||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name)
|
||||
if not tenant_llms:
|
||||
return num
|
||||
else:
|
||||
tenant_llm = tenant_llms[0]
|
||||
num = cls.model.update(used_tokens=tenant_llm.used_tokens + used_tokens) \
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_factory == tenant_llm.llm_factory, cls.model.llm_name == llm_name) \
|
||||
num = (
|
||||
cls.model.update(used_tokens=cls.model.used_tokens + used_tokens)
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == llm_name, cls.model.llm_factory == llm_factory if llm_factory else True)
|
||||
.execute()
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("TenantLLMService.increase_usage got exception")
|
||||
logging.exception("TenantLLMService.increase_usage got exception,Failed to update used_tokens for tenant_id=%s, llm_name=%s", tenant_id, llm_name)
|
||||
return 0
|
||||
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_openai_models(cls):
|
||||
objs = cls.model.select().where(
|
||||
(cls.model.llm_factory == "OpenAI"),
|
||||
~(cls.model.llm_name == "text-embedding-3-small"),
|
||||
~(cls.model.llm_name == "text-embedding-3-large")
|
||||
).dicts()
|
||||
objs = cls.model.select().where((cls.model.llm_factory == "OpenAI"), ~(cls.model.llm_name == "text-embedding-3-small"), ~(cls.model.llm_name == "text-embedding-3-large")).dicts()
|
||||
return list(objs)
|
||||
|
||||
|
||||
class LLMBundle(object):
|
||||
class LLMBundle:
|
||||
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese"):
|
||||
self.tenant_id = tenant_id
|
||||
self.llm_type = llm_type
|
||||
self.llm_name = llm_name
|
||||
self.mdl = TenantLLMService.model_instance(
|
||||
tenant_id, llm_type, llm_name, lang=lang)
|
||||
assert self.mdl, "Can't find model for {}/{}/{}".format(
|
||||
tenant_id, llm_type, llm_name)
|
||||
self.max_length = 8192
|
||||
for lm in LLMService.query(llm_name=llm_name):
|
||||
self.max_length = lm.max_tokens
|
||||
break
|
||||
self.mdl = TenantLLMService.model_instance(tenant_id, llm_type, llm_name, lang=lang)
|
||||
assert self.mdl, "Can't find model for {}/{}/{}".format(tenant_id, llm_type, llm_name)
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
self.max_length = model_config.get("max_tokens", 8192)
|
||||
|
||||
self.is_tools = model_config.get("is_tools", False)
|
||||
|
||||
langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=tenant_id)
|
||||
if langfuse_keys:
|
||||
langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
|
||||
if langfuse.auth_check():
|
||||
self.langfuse = langfuse
|
||||
self.trace = self.langfuse.trace(name=f"{self.llm_type}-{self.llm_name}")
|
||||
else:
|
||||
self.langfuse = None
|
||||
|
||||
def bind_tools(self, toolcall_session, tools):
|
||||
if not self.is_tools:
|
||||
logging.warning(f"Model {self.llm_name} does not support tool call, but you have assigned one or more tools to it!")
|
||||
return
|
||||
self.mdl.bind_tools(toolcall_session, tools)
|
||||
|
||||
def encode(self, texts: list):
|
||||
if self.langfuse:
|
||||
generation = self.trace.generation(name="encode", model=self.llm_name, input={"texts": texts})
|
||||
|
||||
embeddings, used_tokens = self.mdl.encode(texts)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error(
|
||||
"LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.end(usage_details={"total_tokens": used_tokens})
|
||||
|
||||
return embeddings, used_tokens
|
||||
|
||||
def encode_queries(self, query: str):
|
||||
if self.langfuse:
|
||||
generation = self.trace.generation(name="encode_queries", model=self.llm_name, input={"query": query})
|
||||
|
||||
emd, used_tokens = self.mdl.encode_queries(query)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error(
|
||||
"LLMBundle.encode_queries can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.encode_queries can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.end(usage_details={"total_tokens": used_tokens})
|
||||
|
||||
return emd, used_tokens
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
if self.langfuse:
|
||||
generation = self.trace.generation(name="similarity", model=self.llm_name, input={"query": query, "texts": texts})
|
||||
|
||||
sim, used_tokens = self.mdl.similarity(query, texts)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error(
|
||||
"LLMBundle.similarity can't update token usage for {}/RERANK used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.similarity can't update token usage for {}/RERANK used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.end(usage_details={"total_tokens": used_tokens})
|
||||
|
||||
return sim, used_tokens
|
||||
|
||||
def describe(self, image, max_tokens=300):
|
||||
txt, used_tokens = self.mdl.describe(image, max_tokens)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error(
|
||||
"LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
if self.langfuse:
|
||||
generation = self.trace.generation(name="describe", metadata={"model": self.llm_name})
|
||||
|
||||
txt, used_tokens = self.mdl.describe(image)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.end(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||
|
||||
return txt
|
||||
|
||||
def describe_with_prompt(self, image, prompt):
|
||||
if self.langfuse:
|
||||
generation = self.trace.generation(name="describe_with_prompt", metadata={"model": self.llm_name, "prompt": prompt})
|
||||
|
||||
txt, used_tokens = self.mdl.describe_with_prompt(image, prompt)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.end(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||
|
||||
return txt
|
||||
|
||||
def transcription(self, audio):
|
||||
if self.langfuse:
|
||||
generation = self.trace.generation(name="transcription", metadata={"model": self.llm_name})
|
||||
|
||||
txt, used_tokens = self.mdl.transcription(audio)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error(
|
||||
"LLMBundle.transcription can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.transcription can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.end(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||
|
||||
return txt
|
||||
|
||||
def tts(self, text):
|
||||
if self.langfuse:
|
||||
span = self.trace.span(name="tts", input={"text": text})
|
||||
|
||||
for chunk in self.mdl.tts(text):
|
||||
if isinstance(chunk, int):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||
logging.error("LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
return
|
||||
yield chunk
|
||||
|
||||
if self.langfuse:
|
||||
span.end()
|
||||
|
||||
def _remove_reasoning_content(self, txt: str) -> str:
|
||||
first_think_start = txt.find("<think>")
|
||||
if first_think_start == -1:
|
||||
return txt
|
||||
|
||||
last_think_end = txt.rfind("</think>")
|
||||
if last_think_end == -1:
|
||||
return txt
|
||||
|
||||
if last_think_end < first_think_start:
|
||||
return txt
|
||||
|
||||
return txt[last_think_end + len("</think>") :]
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
|
||||
if isinstance(txt, int) and not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name,
|
||||
used_tokens))
|
||||
if self.langfuse:
|
||||
generation = self.trace.generation(name="chat", model=self.llm_name, input={"system": system, "history": history})
|
||||
|
||||
chat = self.mdl.chat
|
||||
if self.is_tools and self.mdl.is_tools:
|
||||
chat = self.mdl.chat_with_tools
|
||||
|
||||
txt, used_tokens = chat(system, history, gen_conf)
|
||||
txt = self._remove_reasoning_content(txt)
|
||||
|
||||
if isinstance(txt, int) and not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||
logging.error("LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.end(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||
|
||||
return txt
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
for txt in self.mdl.chat_streamly(system, history, gen_conf):
|
||||
if self.langfuse:
|
||||
generation = self.trace.generation(name="chat_streamly", model=self.llm_name, input={"system": system, "history": history})
|
||||
|
||||
ans = ""
|
||||
chat_streamly = self.mdl.chat_streamly
|
||||
total_tokens = 0
|
||||
if self.is_tools and self.mdl.is_tools:
|
||||
chat_streamly = self.mdl.chat_streamly_with_tools
|
||||
|
||||
for txt in chat_streamly(system, history, gen_conf):
|
||||
if isinstance(txt, int):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name,
|
||||
txt))
|
||||
return
|
||||
yield txt
|
||||
total_tokens = txt
|
||||
if self.langfuse:
|
||||
generation.end(output={"output": ans})
|
||||
break
|
||||
|
||||
if txt.endswith("</think>"):
|
||||
ans = ans.rstrip("</think>")
|
||||
|
||||
ans += txt
|
||||
yield ans
|
||||
if total_tokens > 0:
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||
logging.error("LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name, txt))
|
||||
|
||||
@ -28,7 +28,7 @@ from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.utils import current_timestamp, get_uuid
|
||||
from deepdoc.parser.excel_parser import RAGFlowExcelParser
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.settings import get_svr_queue_name
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from api import settings
|
||||
@ -36,6 +36,12 @@ from rag.nlp import search
|
||||
|
||||
|
||||
def trim_header_by_lines(text: str, max_length) -> str:
|
||||
# Trim header text to maximum length while preserving line breaks
|
||||
# Args:
|
||||
# text: Input text to trim
|
||||
# max_length: Maximum allowed length
|
||||
# Returns:
|
||||
# Trimmed text
|
||||
len_text = len(text)
|
||||
if len_text <= max_length:
|
||||
return text
|
||||
@ -46,11 +52,37 @@ def trim_header_by_lines(text: str, max_length) -> str:
|
||||
|
||||
|
||||
class TaskService(CommonService):
|
||||
"""Service class for managing document processing tasks.
|
||||
|
||||
This class extends CommonService to provide specialized functionality for document
|
||||
processing task management, including task creation, progress tracking, and chunk
|
||||
management. It handles various document types (PDF, Excel, etc.) and manages their
|
||||
processing lifecycle.
|
||||
|
||||
The class implements a robust task queue system with retry mechanisms and progress
|
||||
tracking, supporting both synchronous and asynchronous task execution.
|
||||
|
||||
Attributes:
|
||||
model: The Task model class for database operations.
|
||||
"""
|
||||
model = Task
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_task(cls, task_id):
|
||||
"""Retrieve detailed task information by task ID.
|
||||
|
||||
This method fetches comprehensive task details including associated document,
|
||||
knowledge base, and tenant information. It also handles task retry logic and
|
||||
progress updates.
|
||||
|
||||
Args:
|
||||
task_id (str): The unique identifier of the task to retrieve.
|
||||
|
||||
Returns:
|
||||
dict: Task details dictionary containing all task information and related metadata.
|
||||
Returns None if task is not found or has exceeded retry limit.
|
||||
"""
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.doc_id,
|
||||
@ -105,6 +137,18 @@ class TaskService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tasks(cls, doc_id: str):
|
||||
"""Retrieve all tasks associated with a document.
|
||||
|
||||
This method fetches all processing tasks for a given document, ordered by page
|
||||
number and creation time. It includes task progress and chunk information.
|
||||
|
||||
Args:
|
||||
doc_id (str): The unique identifier of the document.
|
||||
|
||||
Returns:
|
||||
list[dict]: List of task dictionaries containing task details.
|
||||
Returns None if no tasks are found.
|
||||
"""
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.from_page,
|
||||
@ -124,11 +168,31 @@ class TaskService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_chunk_ids(cls, id: str, chunk_ids: str):
|
||||
"""Update the chunk IDs associated with a task.
|
||||
|
||||
This method updates the chunk_ids field of a task, which stores the IDs of
|
||||
processed document chunks in a space-separated string format.
|
||||
|
||||
Args:
|
||||
id (str): The unique identifier of the task.
|
||||
chunk_ids (str): Space-separated string of chunk identifiers.
|
||||
"""
|
||||
cls.model.update(chunk_ids=chunk_ids).where(cls.model.id == id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_ongoing_doc_name(cls):
|
||||
"""Get names of documents that are currently being processed.
|
||||
|
||||
This method retrieves information about documents that are in the processing state,
|
||||
including their locations and associated IDs. It uses database locking to ensure
|
||||
thread safety when accessing the task information.
|
||||
|
||||
Returns:
|
||||
list[tuple]: A list of tuples, each containing (parent_id/kb_id, location)
|
||||
for documents currently being processed. Returns empty list if
|
||||
no documents are being processed.
|
||||
"""
|
||||
with DB.lock("get_task", -1):
|
||||
docs = (
|
||||
cls.model.select(
|
||||
@ -172,6 +236,18 @@ class TaskService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def do_cancel(cls, id):
|
||||
"""Check if a task should be cancelled based on its document status.
|
||||
|
||||
This method determines whether a task should be cancelled by checking the
|
||||
associated document's run status and progress. A task should be cancelled
|
||||
if its document is marked for cancellation or has negative progress.
|
||||
|
||||
Args:
|
||||
id (str): The unique identifier of the task to check.
|
||||
|
||||
Returns:
|
||||
bool: True if the task should be cancelled, False otherwise.
|
||||
"""
|
||||
task = cls.model.get_by_id(id)
|
||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
@ -179,6 +255,18 @@ class TaskService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls, id, info):
|
||||
"""Update the progress information for a task.
|
||||
|
||||
This method updates both the progress message and completion percentage of a task.
|
||||
It handles platform-specific behavior (macOS vs others) and uses database locking
|
||||
when necessary to ensure thread safety.
|
||||
|
||||
Args:
|
||||
id (str): The unique identifier of the task to update.
|
||||
info (dict): Dictionary containing progress information with keys:
|
||||
- progress_msg (str, optional): Progress message to append
|
||||
- progress (float, optional): Progress percentage (0.0 to 1.0)
|
||||
"""
|
||||
if os.environ.get("MACOS"):
|
||||
if info["progress_msg"]:
|
||||
task = cls.model.get_by_id(id)
|
||||
@ -201,7 +289,26 @@ class TaskService(CommonService):
|
||||
).execute()
|
||||
|
||||
|
||||
def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
def queue_tasks(doc: dict, bucket: str, name: str, priority: int):
|
||||
"""Create and queue document processing tasks.
|
||||
|
||||
This function creates processing tasks for a document based on its type and configuration.
|
||||
It handles different document types (PDF, Excel, etc.) differently and manages task
|
||||
chunking and configuration. It also implements task reuse optimization by checking
|
||||
for previously completed tasks.
|
||||
|
||||
Args:
|
||||
doc (dict): Document dictionary containing metadata and configuration.
|
||||
bucket (str): Storage bucket name where the document is stored.
|
||||
name (str): File name of the document.
|
||||
priority (int, optional): Priority level for task queueing (default is 0).
|
||||
|
||||
Note:
|
||||
- For PDF documents, tasks are created per page range based on configuration
|
||||
- For Excel documents, tasks are created per row range
|
||||
- Task digests are calculated for optimization and reuse
|
||||
- Previous task chunks may be reused if available
|
||||
"""
|
||||
def new_task():
|
||||
return {"id": get_uuid(), "doc_id": doc["id"], "progress": 0.0, "from_page": 0, "to_page": 100000000}
|
||||
|
||||
@ -252,6 +359,7 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
task_digest = hasher.hexdigest()
|
||||
task["digest"] = task_digest
|
||||
task["progress"] = 0.0
|
||||
task["priority"] = priority
|
||||
|
||||
prev_tasks = TaskService.get_tasks(doc["id"])
|
||||
ck_num = 0
|
||||
@ -274,11 +382,31 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
unfinished_task_array = [task for task in parse_task_array if task["progress"] < 1.0]
|
||||
for unfinished_task in unfinished_task_array:
|
||||
assert REDIS_CONN.queue_product(
|
||||
SVR_QUEUE_NAME, message=unfinished_task
|
||||
get_svr_queue_name(priority), message=unfinished_task
|
||||
), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
|
||||
"""Attempt to reuse chunks from previous tasks for optimization.
|
||||
|
||||
This function checks if chunks from previously completed tasks can be reused for
|
||||
the current task, which can significantly improve processing efficiency. It matches
|
||||
tasks based on page ranges and configuration digests.
|
||||
|
||||
Args:
|
||||
task (dict): Current task dictionary to potentially reuse chunks for.
|
||||
prev_tasks (list[dict]): List of previous task dictionaries to check for reuse.
|
||||
chunking_config (dict): Configuration dictionary for chunk processing.
|
||||
|
||||
Returns:
|
||||
int: Number of chunks successfully reused. Returns 0 if no chunks could be reused.
|
||||
|
||||
Note:
|
||||
Chunks can only be reused if:
|
||||
- A previous task exists with matching page range and configuration digest
|
||||
- The previous task was completed successfully (progress = 1.0)
|
||||
- The previous task has valid chunk IDs
|
||||
"""
|
||||
idx = 0
|
||||
while idx < len(prev_tasks):
|
||||
prev_task = prev_tasks[idx]
|
||||
|
||||
46
api/db/services/user_canvas_version.py
Normal file
46
api/db/services/user_canvas_version.py
Normal file
@ -0,0 +1,46 @@
|
||||
from api.db.db_models import UserCanvasVersion, DB
|
||||
from api.db.services.common_service import CommonService
|
||||
from peewee import DoesNotExist
|
||||
|
||||
class UserCanvasVersionService(CommonService):
|
||||
model = UserCanvasVersion
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def list_by_canvas_id(cls, user_canvas_id):
|
||||
try:
|
||||
user_canvas_version = cls.model.select(
|
||||
*[cls.model.id,
|
||||
cls.model.create_time,
|
||||
cls.model.title,
|
||||
cls.model.create_date,
|
||||
cls.model.update_date,
|
||||
cls.model.user_canvas_id,
|
||||
cls.model.update_time]
|
||||
).where(cls.model.user_canvas_id == user_canvas_id)
|
||||
return user_canvas_version
|
||||
except DoesNotExist:
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_all_versions(cls, user_canvas_id):
|
||||
try:
|
||||
user_canvas_version = cls.model.select().where(cls.model.user_canvas_id == user_canvas_id).order_by(cls.model.create_time.desc())
|
||||
if user_canvas_version.count() > 20:
|
||||
delete_ids = []
|
||||
for i in range(20, user_canvas_version.count()):
|
||||
delete_ids.append(user_canvas_version[i].id)
|
||||
|
||||
cls.delete_by_ids(delete_ids)
|
||||
return True
|
||||
except DoesNotExist:
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
@ -29,11 +29,27 @@ from rag.settings import MINIO
|
||||
|
||||
|
||||
class UserService(CommonService):
|
||||
"""Service class for managing user-related database operations.
|
||||
|
||||
This class extends CommonService to provide specialized functionality for user management,
|
||||
including authentication, user creation, updates, and deletions.
|
||||
|
||||
Attributes:
|
||||
model: The User model class for database operations.
|
||||
"""
|
||||
model = User
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_id(cls, user_id):
|
||||
"""Retrieve a user by their ID.
|
||||
|
||||
Args:
|
||||
user_id: The unique identifier of the user.
|
||||
|
||||
Returns:
|
||||
User object if found, None otherwise.
|
||||
"""
|
||||
try:
|
||||
user = cls.model.select().where(cls.model.id == user_id).get()
|
||||
return user
|
||||
@ -43,6 +59,15 @@ class UserService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query_user(cls, email, password):
|
||||
"""Authenticate a user with email and password.
|
||||
|
||||
Args:
|
||||
email: User's email address.
|
||||
password: User's password in plain text.
|
||||
|
||||
Returns:
|
||||
User object if authentication successful, None otherwise.
|
||||
"""
|
||||
user = cls.model.select().where((cls.model.email == email),
|
||||
(cls.model.status == StatusEnum.VALID.value)).first()
|
||||
if user and check_password_hash(str(user.password), password):
|
||||
@ -85,6 +110,14 @@ class UserService(CommonService):
|
||||
|
||||
|
||||
class TenantService(CommonService):
|
||||
"""Service class for managing tenant-related database operations.
|
||||
|
||||
This class extends CommonService to provide functionality for tenant management,
|
||||
including tenant information retrieval and credit management.
|
||||
|
||||
Attributes:
|
||||
model: The Tenant model class for database operations.
|
||||
"""
|
||||
model = Tenant
|
||||
|
||||
@classmethod
|
||||
@ -136,8 +169,25 @@ class TenantService(CommonService):
|
||||
|
||||
|
||||
class UserTenantService(CommonService):
|
||||
"""Service class for managing user-tenant relationship operations.
|
||||
|
||||
This class extends CommonService to handle the many-to-many relationship
|
||||
between users and tenants, managing user roles and tenant memberships.
|
||||
|
||||
Attributes:
|
||||
model: The UserTenant model class for database operations.
|
||||
"""
|
||||
model = UserTenant
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_id(cls, user_tenant_id):
|
||||
try:
|
||||
user_tenant = cls.model.select().where((cls.model.id == user_tenant_id) & (cls.model.status == StatusEnum.VALID.value)).get()
|
||||
return user_tenant
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def save(cls, **kwargs):
|
||||
@ -150,6 +200,7 @@ class UserTenantService(CommonService):
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_id(cls, tenant_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.user_id,
|
||||
cls.model.status,
|
||||
cls.model.role,
|
||||
@ -181,3 +232,21 @@ class UserTenantService(CommonService):
|
||||
return list(cls.model.select(*fields)
|
||||
.join(User, on=((cls.model.tenant_id == User.id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value)))
|
||||
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_num_members(cls, user_id: str):
|
||||
cnt_members = cls.model.select(peewee.fn.COUNT(cls.model.id)).where(cls.model.tenant_id == user_id).scalar()
|
||||
return cnt_members
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_tenant_and_user_id(cls, tenant_id, user_id):
|
||||
try:
|
||||
user_tenant = cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id) & (cls.model.status == StatusEnum.VALID.value) &
|
||||
(cls.model.user_id == user_id)
|
||||
).first()
|
||||
return user_tenant
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
@ -19,6 +19,7 @@
|
||||
# beartype_all(conf=BeartypeConf(violation_type=UserWarning)) # <-- emit warnings from all code
|
||||
|
||||
from api.utils.log_utils import initRootLogger
|
||||
from plugin import GlobalPluginManager
|
||||
initRootLogger("ragflow_server")
|
||||
|
||||
import logging
|
||||
@ -28,6 +29,8 @@ import sys
|
||||
import time
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
from werkzeug.serving import run_simple
|
||||
from api import settings
|
||||
@ -41,16 +44,32 @@ from api.db.init_data import init_web_data
|
||||
from api.versions import get_ragflow_version
|
||||
from api.utils import show_configs
|
||||
from rag.settings import print_rag_settings
|
||||
from rag.utils.redis_conn import RedisDistributedLock
|
||||
|
||||
stop_event = threading.Event()
|
||||
|
||||
RAGFLOW_DEBUGPY_LISTEN = int(os.environ.get('RAGFLOW_DEBUGPY_LISTEN', "0"))
|
||||
|
||||
def update_progress():
|
||||
while True:
|
||||
time.sleep(6)
|
||||
lock_value = str(uuid.uuid4())
|
||||
redis_lock = RedisDistributedLock("update_progress", lock_value=lock_value, timeout=60)
|
||||
logging.info(f"update_progress lock_value: {lock_value}")
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
if redis_lock.acquire():
|
||||
DocumentService.update_progress()
|
||||
redis_lock.release()
|
||||
stop_event.wait(6)
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
finally:
|
||||
redis_lock.release()
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
logging.info("Received interrupt signal, shutting down...")
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.info(r"""
|
||||
@ -71,6 +90,11 @@ if __name__ == '__main__':
|
||||
settings.init_settings()
|
||||
print_rag_settings()
|
||||
|
||||
if RAGFLOW_DEBUGPY_LISTEN > 0:
|
||||
logging.info(f"debugpy listen on {RAGFLOW_DEBUGPY_LISTEN}")
|
||||
import debugpy
|
||||
debugpy.listen(("0.0.0.0", RAGFLOW_DEBUGPY_LISTEN))
|
||||
|
||||
# init db
|
||||
init_web_db()
|
||||
init_web_data()
|
||||
@ -96,6 +120,11 @@ if __name__ == '__main__':
|
||||
RuntimeConfig.init_env()
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||
|
||||
GlobalPluginManager.load_plugins()
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
thread = ThreadPoolExecutor(max_workers=1)
|
||||
thread.submit(update_progress)
|
||||
|
||||
@ -112,4 +141,6 @@ if __name__ == '__main__':
|
||||
)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
|
||||
155
api/settings.py
155
api/settings.py
@ -13,19 +13,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
from datetime import date
|
||||
from enum import IntEnum, Enum
|
||||
import rag.utils.es_conn
|
||||
import rag.utils.infinity_conn
|
||||
from enum import Enum, IntEnum
|
||||
|
||||
import rag.utils
|
||||
from rag.nlp import search
|
||||
from graphrag import search as kg_search
|
||||
from api.utils import get_base_config, decrypt_database_config
|
||||
import rag.utils.es_conn
|
||||
import rag.utils.infinity_conn
|
||||
import rag.utils.opensearch_coon
|
||||
from api.constants import RAG_FLOW_SERVICE_NAME
|
||||
from api.utils import decrypt_database_config, get_base_config
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from graphrag import search as kg_search
|
||||
from rag.nlp import search
|
||||
|
||||
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
||||
LIGHTEN = int(os.environ.get("LIGHTEN", "0"))
|
||||
|
||||
LLM = None
|
||||
LLM_FACTORY = None
|
||||
@ -40,8 +43,9 @@ PARSERS = None
|
||||
HOST_IP = None
|
||||
HOST_PORT = None
|
||||
SECRET_KEY = None
|
||||
FACTORY_LLM_INFOS = None
|
||||
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
|
||||
# authentication
|
||||
@ -52,128 +56,105 @@ CLIENT_AUTHENTICATION = None
|
||||
HTTP_APP_KEY = None
|
||||
GITHUB_OAUTH = None
|
||||
FEISHU_OAUTH = None
|
||||
|
||||
OAUTH_CONFIG = None
|
||||
DOC_ENGINE = None
|
||||
docStoreConn = None
|
||||
|
||||
retrievaler = None
|
||||
kg_retrievaler = None
|
||||
|
||||
# user registration switch
|
||||
REGISTER_ENABLED = 1
|
||||
|
||||
|
||||
# sandbox-executor-manager
|
||||
SANDBOX_ENABLED = 0
|
||||
SANDBOX_HOST = None
|
||||
|
||||
BUILTIN_EMBEDDING_MODELS = ["BAAI/bge-large-zh-v1.5@BAAI", "maidalun1020/bce-embedding-base_v1@Youdao"]
|
||||
|
||||
|
||||
def init_settings():
|
||||
global LLM, LLM_FACTORY, LLM_BASE_URL, LIGHTEN, DATABASE_TYPE, DATABASE
|
||||
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||
global LLM, LLM_FACTORY, LLM_BASE_URL, LIGHTEN, DATABASE_TYPE, DATABASE, FACTORY_LLM_INFOS, REGISTER_ENABLED
|
||||
LIGHTEN = int(os.environ.get("LIGHTEN", "0"))
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
LLM = get_base_config("user_default_llm", {})
|
||||
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
|
||||
LLM_DEFAULT_MODELS = LLM.get("default_models", {})
|
||||
LLM_FACTORY = LLM.get("factory")
|
||||
LLM_BASE_URL = LLM.get("base_url")
|
||||
try:
|
||||
REGISTER_ENABLED = int(os.environ.get("REGISTER_ENABLED", "1"))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
with open(os.path.join(get_project_base_directory(), "conf", "llm_factories.json"), "r") as f:
|
||||
FACTORY_LLM_INFOS = json.load(f)["factory_llm_infos"]
|
||||
except Exception:
|
||||
FACTORY_LLM_INFOS = []
|
||||
|
||||
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
|
||||
if not LIGHTEN:
|
||||
default_llm = {
|
||||
"Tongyi-Qianwen": {
|
||||
"chat_model": "qwen-plus",
|
||||
"embedding_model": "text-embedding-v2",
|
||||
"image2text_model": "qwen-vl-max",
|
||||
"asr_model": "paraformer-realtime-8k-v1",
|
||||
},
|
||||
"OpenAI": {
|
||||
"chat_model": "gpt-3.5-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"Azure-OpenAI": {
|
||||
"chat_model": "gpt-35-turbo",
|
||||
"embedding_model": "text-embedding-ada-002",
|
||||
"image2text_model": "gpt-4-vision-preview",
|
||||
"asr_model": "whisper-1",
|
||||
},
|
||||
"ZHIPU-AI": {
|
||||
"chat_model": "glm-3-turbo",
|
||||
"embedding_model": "embedding-2",
|
||||
"image2text_model": "glm-4v",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Ollama": {
|
||||
"chat_model": "qwen-14B-chat",
|
||||
"embedding_model": "flag-embedding",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"Moonshot": {
|
||||
"chat_model": "moonshot-v1-8k",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"DeepSeek": {
|
||||
"chat_model": "deepseek-chat",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"VolcEngine": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
},
|
||||
"BAAI": {
|
||||
"chat_model": "",
|
||||
"embedding_model": "BAAI/bge-large-zh-v1.5",
|
||||
"image2text_model": "",
|
||||
"asr_model": "",
|
||||
"rerank_model": "BAAI/bge-reranker-v2-m3",
|
||||
}
|
||||
}
|
||||
EMBEDDING_MDL = BUILTIN_EMBEDDING_MODELS[0]
|
||||
|
||||
if LLM_FACTORY:
|
||||
CHAT_MDL = default_llm[LLM_FACTORY]["chat_model"] + f"@{LLM_FACTORY}"
|
||||
ASR_MDL = default_llm[LLM_FACTORY]["asr_model"] + f"@{LLM_FACTORY}"
|
||||
IMAGE2TEXT_MDL = default_llm[LLM_FACTORY]["image2text_model"] + f"@{LLM_FACTORY}"
|
||||
EMBEDDING_MDL = default_llm["BAAI"]["embedding_model"] + "@BAAI"
|
||||
RERANK_MDL = default_llm["BAAI"]["rerank_model"] + "@BAAI"
|
||||
if LLM_DEFAULT_MODELS:
|
||||
CHAT_MDL = LLM_DEFAULT_MODELS.get("chat_model", CHAT_MDL)
|
||||
EMBEDDING_MDL = LLM_DEFAULT_MODELS.get("embedding_model", EMBEDDING_MDL)
|
||||
RERANK_MDL = LLM_DEFAULT_MODELS.get("rerank_model", RERANK_MDL)
|
||||
ASR_MDL = LLM_DEFAULT_MODELS.get("asr_model", ASR_MDL)
|
||||
IMAGE2TEXT_MDL = LLM_DEFAULT_MODELS.get("image2text_model", IMAGE2TEXT_MDL)
|
||||
|
||||
# factory can be specified in the config name with "@". LLM_FACTORY will be used if not specified
|
||||
CHAT_MDL = CHAT_MDL + (f"@{LLM_FACTORY}" if "@" not in CHAT_MDL and CHAT_MDL != "" else "")
|
||||
EMBEDDING_MDL = EMBEDDING_MDL + (f"@{LLM_FACTORY}" if "@" not in EMBEDDING_MDL and EMBEDDING_MDL != "" else "")
|
||||
RERANK_MDL = RERANK_MDL + (f"@{LLM_FACTORY}" if "@" not in RERANK_MDL and RERANK_MDL != "" else "")
|
||||
ASR_MDL = ASR_MDL + (f"@{LLM_FACTORY}" if "@" not in ASR_MDL and ASR_MDL != "" else "")
|
||||
IMAGE2TEXT_MDL = IMAGE2TEXT_MDL + (f"@{LLM_FACTORY}" if "@" not in IMAGE2TEXT_MDL and IMAGE2TEXT_MDL != "" else "")
|
||||
|
||||
global API_KEY, PARSERS, HOST_IP, HOST_PORT, SECRET_KEY
|
||||
API_KEY = LLM.get("api_key", "")
|
||||
API_KEY = LLM.get("api_key")
|
||||
PARSERS = LLM.get(
|
||||
"parsers",
|
||||
"naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email,tag:Tag")
|
||||
"parsers", "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag"
|
||||
)
|
||||
|
||||
HOST_IP = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
|
||||
HOST_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
|
||||
|
||||
SECRET_KEY = get_base_config(
|
||||
RAG_FLOW_SERVICE_NAME,
|
||||
{}).get("secret_key", str(date.today()))
|
||||
SECRET_KEY = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("secret_key", str(date.today()))
|
||||
|
||||
global AUTHENTICATION_CONF, CLIENT_AUTHENTICATION, HTTP_APP_KEY, GITHUB_OAUTH, FEISHU_OAUTH
|
||||
global AUTHENTICATION_CONF, CLIENT_AUTHENTICATION, HTTP_APP_KEY, GITHUB_OAUTH, FEISHU_OAUTH, OAUTH_CONFIG
|
||||
# authentication
|
||||
AUTHENTICATION_CONF = get_base_config("authentication", {})
|
||||
|
||||
# client
|
||||
CLIENT_AUTHENTICATION = AUTHENTICATION_CONF.get(
|
||||
"client", {}).get(
|
||||
"switch", False)
|
||||
CLIENT_AUTHENTICATION = AUTHENTICATION_CONF.get("client", {}).get("switch", False)
|
||||
HTTP_APP_KEY = AUTHENTICATION_CONF.get("client", {}).get("http_app_key")
|
||||
GITHUB_OAUTH = get_base_config("oauth", {}).get("github")
|
||||
FEISHU_OAUTH = get_base_config("oauth", {}).get("feishu")
|
||||
|
||||
OAUTH_CONFIG = get_base_config("oauth", {})
|
||||
|
||||
global DOC_ENGINE, docStoreConn, retrievaler, kg_retrievaler
|
||||
DOC_ENGINE = os.environ.get('DOC_ENGINE', "elasticsearch")
|
||||
DOC_ENGINE = os.environ.get("DOC_ENGINE", "elasticsearch")
|
||||
# DOC_ENGINE = os.environ.get('DOC_ENGINE', "opensearch")
|
||||
lower_case_doc_engine = DOC_ENGINE.lower()
|
||||
if lower_case_doc_engine == "elasticsearch":
|
||||
docStoreConn = rag.utils.es_conn.ESConnection()
|
||||
elif lower_case_doc_engine == "infinity":
|
||||
docStoreConn = rag.utils.infinity_conn.InfinityConnection()
|
||||
elif lower_case_doc_engine == "opensearch":
|
||||
docStoreConn = rag.utils.opensearch_coon.OSConnection()
|
||||
else:
|
||||
raise Exception(f"Not supported doc engine: {DOC_ENGINE}")
|
||||
|
||||
retrievaler = search.Dealer(docStoreConn)
|
||||
kg_retrievaler = kg_search.KGSearch(docStoreConn)
|
||||
|
||||
if int(os.environ.get("SANDBOX_ENABLED", "0")):
|
||||
global SANDBOX_HOST
|
||||
SANDBOX_HOST = os.environ.get("SANDBOX_HOST", "sandbox-executor-manager")
|
||||
|
||||
|
||||
class CustomEnum(Enum):
|
||||
@classmethod
|
||||
|
||||
@ -70,6 +70,12 @@ def show_configs():
|
||||
if "password" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["password"] = "*" * 8
|
||||
if "access_key" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["access_key"] = "*" * 8
|
||||
if "secret_key" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["secret_key"] = "*" * 8
|
||||
msg += f"\n\t{k}: {v}"
|
||||
logging.info(msg)
|
||||
|
||||
@ -351,6 +357,26 @@ def decrypt(line):
|
||||
line), "Fail to decrypt password!").decode('utf-8')
|
||||
|
||||
|
||||
def decrypt2(crypt_text):
|
||||
from base64 import b64decode, b16decode
|
||||
from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5
|
||||
from Crypto.PublicKey import RSA
|
||||
decode_data = b64decode(crypt_text)
|
||||
if len(decode_data) == 127:
|
||||
hex_fixed = '00' + decode_data.hex()
|
||||
decode_data = b16decode(hex_fixed.upper())
|
||||
|
||||
file_path = os.path.join(
|
||||
file_utils.get_project_base_directory(),
|
||||
"conf",
|
||||
"private.pem")
|
||||
pem = open(file_path).read()
|
||||
rsa_key = RSA.importKey(pem, "Welcome")
|
||||
cipher = Cipher_PKCS1_v1_5.new(rsa_key)
|
||||
decrypt_text = cipher.decrypt(decode_data, None)
|
||||
return (b64decode(decrypt_text)).decode()
|
||||
|
||||
|
||||
def download_img(url):
|
||||
if not url:
|
||||
return ""
|
||||
|
||||
@ -13,12 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
from base64 import b64encode
|
||||
from copy import deepcopy
|
||||
from functools import wraps
|
||||
from hmac import HMAC
|
||||
from io import BytesIO
|
||||
@ -27,59 +28,62 @@ from uuid import uuid1
|
||||
|
||||
import requests
|
||||
from flask import (
|
||||
Response, jsonify, send_file, make_response,
|
||||
Response,
|
||||
jsonify,
|
||||
make_response,
|
||||
send_file,
|
||||
)
|
||||
from flask import (
|
||||
request as flask_request,
|
||||
)
|
||||
from itsdangerous import URLSafeTimedSerializer
|
||||
from peewee import OperationalError
|
||||
from werkzeug.http import HTTP_STATUS_CODES
|
||||
|
||||
from api.db.db_models import APIToken
|
||||
from api import settings
|
||||
from api.constants import REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService
|
||||
from api.utils import CustomJSONEncoder, get_uuid, json_dumps
|
||||
|
||||
from api.utils import CustomJSONEncoder, get_uuid
|
||||
from api.utils import json_dumps
|
||||
from api.constants import REQUEST_WAIT_SEC, REQUEST_MAX_WAIT_SEC
|
||||
|
||||
requests.models.complexjson.dumps = functools.partial(
|
||||
json.dumps, cls=CustomJSONEncoder)
|
||||
requests.models.complexjson.dumps = functools.partial(json.dumps, cls=CustomJSONEncoder)
|
||||
|
||||
|
||||
def request(**kwargs):
|
||||
sess = requests.Session()
|
||||
stream = kwargs.pop('stream', sess.stream)
|
||||
timeout = kwargs.pop('timeout', None)
|
||||
kwargs['headers'] = {
|
||||
k.replace(
|
||||
'_',
|
||||
'-').upper(): v for k,
|
||||
v in kwargs.get(
|
||||
'headers',
|
||||
{}).items()}
|
||||
stream = kwargs.pop("stream", sess.stream)
|
||||
timeout = kwargs.pop("timeout", None)
|
||||
kwargs["headers"] = {k.replace("_", "-").upper(): v for k, v in kwargs.get("headers", {}).items()}
|
||||
prepped = requests.Request(**kwargs).prepare()
|
||||
|
||||
if settings.CLIENT_AUTHENTICATION and settings.HTTP_APP_KEY and settings.SECRET_KEY:
|
||||
timestamp = str(round(time() * 1000))
|
||||
nonce = str(uuid1())
|
||||
signature = b64encode(HMAC(settings.SECRET_KEY.encode('ascii'), b'\n'.join([
|
||||
timestamp.encode('ascii'),
|
||||
nonce.encode('ascii'),
|
||||
settings.HTTP_APP_KEY.encode('ascii'),
|
||||
prepped.path_url.encode('ascii'),
|
||||
prepped.body if kwargs.get('json') else b'',
|
||||
urlencode(
|
||||
sorted(
|
||||
kwargs['data'].items()),
|
||||
quote_via=quote,
|
||||
safe='-._~').encode('ascii')
|
||||
if kwargs.get('data') and isinstance(kwargs['data'], dict) else b'',
|
||||
]), 'sha1').digest()).decode('ascii')
|
||||
signature = b64encode(
|
||||
HMAC(
|
||||
settings.SECRET_KEY.encode("ascii"),
|
||||
b"\n".join(
|
||||
[
|
||||
timestamp.encode("ascii"),
|
||||
nonce.encode("ascii"),
|
||||
settings.HTTP_APP_KEY.encode("ascii"),
|
||||
prepped.path_url.encode("ascii"),
|
||||
prepped.body if kwargs.get("json") else b"",
|
||||
urlencode(sorted(kwargs["data"].items()), quote_via=quote, safe="-._~").encode("ascii") if kwargs.get("data") and isinstance(kwargs["data"], dict) else b"",
|
||||
]
|
||||
),
|
||||
"sha1",
|
||||
).digest()
|
||||
).decode("ascii")
|
||||
|
||||
prepped.headers.update({
|
||||
'TIMESTAMP': timestamp,
|
||||
'NONCE': nonce,
|
||||
'APP-KEY': settings.HTTP_APP_KEY,
|
||||
'SIGNATURE': signature,
|
||||
})
|
||||
prepped.headers.update(
|
||||
{
|
||||
"TIMESTAMP": timestamp,
|
||||
"NONCE": nonce,
|
||||
"APP-KEY": settings.HTTP_APP_KEY,
|
||||
"SIGNATURE": signature,
|
||||
}
|
||||
)
|
||||
|
||||
return sess.send(prepped, stream=stream, timeout=timeout)
|
||||
|
||||
@ -87,7 +91,7 @@ def request(**kwargs):
|
||||
def get_exponential_backoff_interval(retries, full_jitter=False):
|
||||
"""Calculate the exponential backoff wait time."""
|
||||
# Will be zero if factor equals 0
|
||||
countdown = min(REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC * (2 ** retries))
|
||||
countdown = min(REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC * (2**retries))
|
||||
# Full jitter according to
|
||||
# https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
|
||||
if full_jitter:
|
||||
@ -96,12 +100,9 @@ def get_exponential_backoff_interval(retries, full_jitter=False):
|
||||
return max(0, countdown)
|
||||
|
||||
|
||||
def get_data_error_result(code=settings.RetCode.DATA_ERROR,
|
||||
message='Sorry! Data missing!'):
|
||||
def get_data_error_result(code=settings.RetCode.DATA_ERROR, message="Sorry! Data missing!"):
|
||||
logging.exception(Exception(message))
|
||||
result_dict = {
|
||||
"code": code,
|
||||
"message": message}
|
||||
result_dict = {"code": code, "message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
@ -119,23 +120,27 @@ def server_error_response(e):
|
||||
except BaseException:
|
||||
pass
|
||||
if len(e.args) > 1:
|
||||
return get_json_result(
|
||||
code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||
if repr(e).find("index_not_found_exception") >= 0:
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR,
|
||||
message="No chunk found, please upload file and parse it.")
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message="No chunk found, please upload file and parse it.")
|
||||
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||
|
||||
|
||||
def error_response(response_code, message=None):
|
||||
if message is None:
|
||||
message = HTTP_STATUS_CODES.get(response_code, 'Unknown Error')
|
||||
message = HTTP_STATUS_CODES.get(response_code, "Unknown Error")
|
||||
|
||||
return Response(json.dumps({
|
||||
'message': message,
|
||||
'code': response_code,
|
||||
}), status=response_code, mimetype='application/json')
|
||||
return Response(
|
||||
json.dumps(
|
||||
{
|
||||
"message": message,
|
||||
"code": response_code,
|
||||
}
|
||||
),
|
||||
status=response_code,
|
||||
mimetype="application/json",
|
||||
)
|
||||
|
||||
|
||||
def validate_request(*args, **kwargs):
|
||||
@ -160,13 +165,10 @@ def validate_request(*args, **kwargs):
|
||||
if no_arguments or error_arguments:
|
||||
error_string = ""
|
||||
if no_arguments:
|
||||
error_string += "required argument are missing: {}; ".format(
|
||||
",".join(no_arguments))
|
||||
error_string += "required argument are missing: {}; ".format(",".join(no_arguments))
|
||||
if error_arguments:
|
||||
error_string += "required argument values: {}".format(
|
||||
",".join(["{}={}".format(a[0], a[1]) for a in error_arguments]))
|
||||
return get_json_result(
|
||||
code=settings.RetCode.ARGUMENT_ERROR, message=error_string)
|
||||
error_string += "required argument values: {}".format(",".join(["{}={}".format(a[0], a[1]) for a in error_arguments]))
|
||||
return get_json_result(code=settings.RetCode.ARGUMENT_ERROR, message=error_string)
|
||||
return func(*_args, **_kwargs)
|
||||
|
||||
return decorated_function
|
||||
@ -180,8 +182,7 @@ def not_allowed_parameters(*params):
|
||||
input_arguments = flask_request.json or flask_request.form.to_dict()
|
||||
for param in params:
|
||||
if param in input_arguments:
|
||||
return get_json_result(
|
||||
code=settings.RetCode.ARGUMENT_ERROR, message=f"Parameter {param} isn't allowed")
|
||||
return get_json_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"Parameter {param} isn't allowed")
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
@ -190,14 +191,14 @@ def not_allowed_parameters(*params):
|
||||
|
||||
|
||||
def is_localhost(ip):
|
||||
return ip in {'127.0.0.1', '::1', '[::1]', 'localhost'}
|
||||
return ip in {"127.0.0.1", "::1", "[::1]", "localhost"}
|
||||
|
||||
|
||||
def send_file_in_mem(data, filename):
|
||||
if not isinstance(data, (str, bytes)):
|
||||
data = json_dumps(data)
|
||||
if isinstance(data, str):
|
||||
data = data.encode('utf-8')
|
||||
data = data.encode("utf-8")
|
||||
|
||||
f = BytesIO()
|
||||
f.write(data)
|
||||
@ -206,7 +207,7 @@ def send_file_in_mem(data, filename):
|
||||
return send_file(f, as_attachment=True, attachment_filename=filename)
|
||||
|
||||
|
||||
def get_json_result(code=settings.RetCode.SUCCESS, message='success', data=None):
|
||||
def get_json_result(code=settings.RetCode.SUCCESS, message="success", data=None):
|
||||
response = {"code": code, "message": message, "data": data}
|
||||
return jsonify(response)
|
||||
|
||||
@ -214,27 +215,24 @@ def get_json_result(code=settings.RetCode.SUCCESS, message='success', data=None)
|
||||
def apikey_required(func):
|
||||
@wraps(func)
|
||||
def decorated_function(*args, **kwargs):
|
||||
token = flask_request.headers.get('Authorization').split()[1]
|
||||
token = flask_request.headers.get("Authorization").split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return build_error_result(
|
||||
message='API-KEY is invalid!', code=settings.RetCode.FORBIDDEN
|
||||
)
|
||||
kwargs['tenant_id'] = objs[0].tenant_id
|
||||
return build_error_result(message="API-KEY is invalid!", code=settings.RetCode.FORBIDDEN)
|
||||
kwargs["tenant_id"] = objs[0].tenant_id
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return decorated_function
|
||||
|
||||
|
||||
def build_error_result(code=settings.RetCode.FORBIDDEN, message='success'):
|
||||
def build_error_result(code=settings.RetCode.FORBIDDEN, message="success"):
|
||||
response = {"code": code, "message": message}
|
||||
response = jsonify(response)
|
||||
response.status_code = code
|
||||
return response
|
||||
|
||||
|
||||
def construct_response(code=settings.RetCode.SUCCESS,
|
||||
message='success', data=None, auth=None):
|
||||
def construct_response(code=settings.RetCode.SUCCESS, message="success", data=None, auth=None):
|
||||
result_dict = {"code": code, "message": message, "data": data}
|
||||
response_dict = {}
|
||||
for key, value in result_dict.items():
|
||||
@ -253,7 +251,7 @@ def construct_response(code=settings.RetCode.SUCCESS,
|
||||
return response
|
||||
|
||||
|
||||
def construct_result(code=settings.RetCode.DATA_ERROR, message='data is missing'):
|
||||
def construct_result(code=settings.RetCode.DATA_ERROR, message="data is missing"):
|
||||
result_dict = {"code": code, "message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
@ -264,7 +262,7 @@ def construct_result(code=settings.RetCode.DATA_ERROR, message='data is missing'
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
def construct_json_result(code=settings.RetCode.SUCCESS, message='success', data=None):
|
||||
def construct_json_result(code=settings.RetCode.SUCCESS, message="success", data=None):
|
||||
if data is None:
|
||||
return jsonify({"code": code, "message": message})
|
||||
else:
|
||||
@ -286,7 +284,7 @@ def construct_error_response(e):
|
||||
def token_required(func):
|
||||
@wraps(func)
|
||||
def decorated_function(*args, **kwargs):
|
||||
authorization_str = flask_request.headers.get('Authorization')
|
||||
authorization_str = flask_request.headers.get("Authorization")
|
||||
if not authorization_str:
|
||||
return get_json_result(data=False, message="`Authorization` can't be empty")
|
||||
authorization_list = authorization_str.split()
|
||||
@ -295,11 +293,8 @@ def token_required(func):
|
||||
token = authorization_list[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
kwargs['tenant_id'] = objs[0].tenant_id
|
||||
return get_json_result(data=False, message="Authentication error: API key is invalid!", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
kwargs["tenant_id"] = objs[0].tenant_id
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return decorated_function
|
||||
@ -316,11 +311,11 @@ def get_result(code=settings.RetCode.SUCCESS, message="", data=None):
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
def get_error_data_result(message='Sorry! Data missing!', code=settings.RetCode.DATA_ERROR,
|
||||
):
|
||||
result_dict = {
|
||||
"code": code,
|
||||
"message": message}
|
||||
def get_error_data_result(
|
||||
message="Sorry! Data missing!",
|
||||
code=settings.RetCode.DATA_ERROR,
|
||||
):
|
||||
result_dict = {"code": code, "message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
@ -330,23 +325,21 @@ def get_error_data_result(message='Sorry! Data missing!', code=settings.RetCode.
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
def generate_confirmation_token(tenent_id):
|
||||
serializer = URLSafeTimedSerializer(tenent_id)
|
||||
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenent_id)[2:34]
|
||||
def get_error_argument_result(message="Invalid arguments"):
|
||||
return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=message)
|
||||
|
||||
|
||||
def valid(permission, valid_permission, language, valid_language, chunk_method, valid_chunk_method):
|
||||
if valid_parameter(permission, valid_permission):
|
||||
return valid_parameter(permission, valid_permission)
|
||||
if valid_parameter(language, valid_language):
|
||||
return valid_parameter(language, valid_language)
|
||||
if valid_parameter(chunk_method, valid_chunk_method):
|
||||
return valid_parameter(chunk_method, valid_chunk_method)
|
||||
def get_error_permission_result(message="Permission error"):
|
||||
return get_result(code=settings.RetCode.PERMISSION_ERROR, message=message)
|
||||
|
||||
|
||||
def valid_parameter(parameter, valid_values):
|
||||
if parameter and parameter not in valid_values:
|
||||
return get_error_data_result(f"'{parameter}' is not in {valid_values}")
|
||||
def get_error_operating_result(message="Operating error"):
|
||||
return get_result(code=settings.RetCode.OPERATING_ERROR, message=message)
|
||||
|
||||
|
||||
def generate_confirmation_token(tenant_id):
|
||||
serializer = URLSafeTimedSerializer(tenant_id)
|
||||
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenant_id)[2:34]
|
||||
|
||||
|
||||
def get_parser_config(chunk_method, parser_config):
|
||||
@ -355,8 +348,7 @@ def get_parser_config(chunk_method, parser_config):
|
||||
if not chunk_method:
|
||||
chunk_method = "naive"
|
||||
key_mapping = {
|
||||
"naive": {"chunk_token_num": 128, "delimiter": "\\n!?;。;!?", "html4excel": False, "layout_recognize": "DeepDOC",
|
||||
"raptor": {"use_raptor": False}},
|
||||
"naive": {"chunk_token_num": 128, "delimiter": r"\n", "html4excel": False, "layout_recognize": "DeepDOC", "raptor": {"use_raptor": False}},
|
||||
"qa": {"raptor": {"use_raptor": False}},
|
||||
"tag": None,
|
||||
"resume": None,
|
||||
@ -367,9 +359,201 @@ def get_parser_config(chunk_method, parser_config):
|
||||
"laws": {"raptor": {"use_raptor": False}},
|
||||
"presentation": {"raptor": {"use_raptor": False}},
|
||||
"one": None,
|
||||
"knowledge_graph": {"chunk_token_num": 8192, "delimiter": "\\n!?;。;!?",
|
||||
"entity_types": ["organization", "person", "location", "event", "time"]},
|
||||
"knowledge_graph": {"chunk_token_num": 8192, "delimiter": r"\n", "entity_types": ["organization", "person", "location", "event", "time"]},
|
||||
"email": None,
|
||||
"picture": None}
|
||||
"picture": None,
|
||||
}
|
||||
parser_config = key_mapping[chunk_method]
|
||||
return parser_config
|
||||
|
||||
|
||||
def get_data_openai(
|
||||
id=None,
|
||||
created=None,
|
||||
model=None,
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
content=None,
|
||||
finish_reason=None,
|
||||
object="chat.completion",
|
||||
param=None,
|
||||
):
|
||||
total_tokens = prompt_tokens + completion_tokens
|
||||
return {
|
||||
"id": f"{id}",
|
||||
"object": object,
|
||||
"created": int(time.time()) if created else None,
|
||||
"model": model,
|
||||
"param": param,
|
||||
"usage": {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"total_tokens": total_tokens,
|
||||
"completion_tokens_details": {"reasoning_tokens": 0, "accepted_prediction_tokens": 0, "rejected_prediction_tokens": 0},
|
||||
},
|
||||
"choices": [{"message": {"role": "assistant", "content": content}, "logprobs": None, "finish_reason": finish_reason, "index": 0}],
|
||||
}
|
||||
|
||||
|
||||
def check_duplicate_ids(ids, id_type="item"):
|
||||
"""
|
||||
Check for duplicate IDs in a list and return unique IDs and error messages.
|
||||
|
||||
Args:
|
||||
ids (list): List of IDs to check for duplicates
|
||||
id_type (str): Type of ID for error messages (e.g., 'document', 'dataset', 'chunk')
|
||||
|
||||
Returns:
|
||||
tuple: (unique_ids, error_messages)
|
||||
- unique_ids (list): List of unique IDs
|
||||
- error_messages (list): List of error messages for duplicate IDs
|
||||
"""
|
||||
id_count = {}
|
||||
duplicate_messages = []
|
||||
|
||||
# Count occurrences of each ID
|
||||
for id_value in ids:
|
||||
id_count[id_value] = id_count.get(id_value, 0) + 1
|
||||
|
||||
# Check for duplicates
|
||||
for id_value, count in id_count.items():
|
||||
if count > 1:
|
||||
duplicate_messages.append(f"Duplicate {id_type} ids: {id_value}")
|
||||
|
||||
# Return unique IDs and error messages
|
||||
return list(set(ids)), duplicate_messages
|
||||
|
||||
|
||||
def verify_embedding_availability(embd_id: str, tenant_id: str) -> tuple[bool, Response | None]:
|
||||
"""
|
||||
Verifies availability of an embedding model for a specific tenant.
|
||||
|
||||
Implements a four-stage validation process:
|
||||
1. Model identifier parsing and validation
|
||||
2. System support verification
|
||||
3. Tenant authorization check
|
||||
4. Database operation error handling
|
||||
|
||||
Args:
|
||||
embd_id (str): Unique identifier for the embedding model in format "model_name@factory"
|
||||
tenant_id (str): Tenant identifier for access control
|
||||
|
||||
Returns:
|
||||
tuple[bool, Response | None]:
|
||||
- First element (bool):
|
||||
- True: Model is available and authorized
|
||||
- False: Validation failed
|
||||
- Second element contains:
|
||||
- None on success
|
||||
- Error detail dict on failure
|
||||
|
||||
Raises:
|
||||
ValueError: When model identifier format is invalid
|
||||
OperationalError: When database connection fails (auto-handled)
|
||||
|
||||
Examples:
|
||||
>>> verify_embedding_availability("text-embedding@openai", "tenant_123")
|
||||
(True, None)
|
||||
|
||||
>>> verify_embedding_availability("invalid_model", "tenant_123")
|
||||
(False, {'code': 101, 'message': "Unsupported model: <invalid_model>"})
|
||||
"""
|
||||
try:
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(embd_id)
|
||||
if not LLMService.query(llm_name=llm_name, fid=llm_factory, model_type="embedding"):
|
||||
return False, get_error_argument_result(f"Unsupported model: <{embd_id}>")
|
||||
|
||||
# Tongyi-Qianwen is added to TenantLLM by default, but remains unusable with empty api_key
|
||||
tenant_llms = TenantLLMService.get_my_llms(tenant_id=tenant_id)
|
||||
is_tenant_model = any(llm["llm_name"] == llm_name and llm["llm_factory"] == llm_factory and llm["model_type"] == "embedding" for llm in tenant_llms)
|
||||
|
||||
is_builtin_model = embd_id in settings.BUILTIN_EMBEDDING_MODELS
|
||||
if not (is_builtin_model or is_tenant_model):
|
||||
return False, get_error_argument_result(f"Unauthorized model: <{embd_id}>")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return False, get_error_data_result(message="Database operation failed")
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def deep_merge(default: dict, custom: dict) -> dict:
|
||||
"""
|
||||
Recursively merges two dictionaries with priority given to `custom` values.
|
||||
|
||||
Creates a deep copy of the `default` dictionary and iteratively merges nested
|
||||
dictionaries using a stack-based approach. Non-dict values in `custom` will
|
||||
completely override corresponding entries in `default`.
|
||||
|
||||
Args:
|
||||
default (dict): Base dictionary containing default values.
|
||||
custom (dict): Dictionary containing overriding values.
|
||||
|
||||
Returns:
|
||||
dict: New merged dictionary combining values from both inputs.
|
||||
|
||||
Example:
|
||||
>>> from copy import deepcopy
|
||||
>>> default = {"a": 1, "nested": {"x": 10, "y": 20}}
|
||||
>>> custom = {"b": 2, "nested": {"y": 99, "z": 30}}
|
||||
>>> deep_merge(default, custom)
|
||||
{'a': 1, 'b': 2, 'nested': {'x': 10, 'y': 99, 'z': 30}}
|
||||
|
||||
>>> deep_merge({"config": {"mode": "auto"}}, {"config": "manual"})
|
||||
{'config': 'manual'}
|
||||
|
||||
Notes:
|
||||
1. Merge priority is always given to `custom` values at all nesting levels
|
||||
2. Non-dict values (e.g. list, str) in `custom` will replace entire values
|
||||
in `default`, even if the original value was a dictionary
|
||||
3. Time complexity: O(N) where N is total key-value pairs in `custom`
|
||||
4. Recommended for configuration merging and nested data updates
|
||||
"""
|
||||
merged = deepcopy(default)
|
||||
stack = [(merged, custom)]
|
||||
|
||||
while stack:
|
||||
base_dict, override_dict = stack.pop()
|
||||
|
||||
for key, val in override_dict.items():
|
||||
if key in base_dict and isinstance(val, dict) and isinstance(base_dict[key], dict):
|
||||
stack.append((base_dict[key], val))
|
||||
else:
|
||||
base_dict[key] = val
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def remap_dictionary_keys(source_data: dict, key_aliases: dict = None) -> dict:
|
||||
"""
|
||||
Transform dictionary keys using a configurable mapping schema.
|
||||
|
||||
Args:
|
||||
source_data: Original dictionary to process
|
||||
key_aliases: Custom key transformation rules (Optional)
|
||||
When provided, overrides default key mapping
|
||||
Format: {<original_key>: <new_key>, ...}
|
||||
|
||||
Returns:
|
||||
dict: New dictionary with transformed keys preserving original values
|
||||
|
||||
Example:
|
||||
>>> input_data = {"old_key": "value", "another_field": 42}
|
||||
>>> remap_dictionary_keys(input_data, {"old_key": "new_key"})
|
||||
{'new_key': 'value', 'another_field': 42}
|
||||
"""
|
||||
DEFAULT_KEY_MAP = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
|
||||
transformed_data = {}
|
||||
mapping = key_aliases or DEFAULT_KEY_MAP
|
||||
|
||||
for original_key, value in source_data.items():
|
||||
mapped_key = mapping.get(original_key, original_key)
|
||||
transformed_data[mapped_key] = value
|
||||
|
||||
return transformed_data
|
||||
|
||||
@ -17,19 +17,28 @@ import base64
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
from io import BytesIO
|
||||
|
||||
import pdfplumber
|
||||
from PIL import Image
|
||||
from cachetools import LRUCache, cached
|
||||
from PIL import Image
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from api.db import FileType
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
from api.db import FileType
|
||||
|
||||
PROJECT_BASE = os.getenv("RAG_PROJECT_BASE") or os.getenv("RAG_DEPLOY_BASE")
|
||||
RAG_BASE = os.getenv("RAG_BASE")
|
||||
|
||||
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
|
||||
if LOCK_KEY_pdfplumber not in sys.modules:
|
||||
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
|
||||
|
||||
|
||||
def get_project_base_directory(*args):
|
||||
global PROJECT_BASE
|
||||
@ -68,7 +77,7 @@ def get_rag_python_directory(*args):
|
||||
|
||||
|
||||
def get_home_cache_dir():
|
||||
dir = os.path.join(os.path.expanduser('~'), ".ragflow")
|
||||
dir = os.path.join(os.path.expanduser("~"), ".ragflow")
|
||||
try:
|
||||
os.mkdir(dir)
|
||||
except OSError:
|
||||
@ -86,9 +95,7 @@ def load_json_conf(conf_path):
|
||||
with open(json_conf_path) as f:
|
||||
return json.load(f)
|
||||
except BaseException:
|
||||
raise EnvironmentError(
|
||||
"loading json file config from '{}' failed!".format(json_conf_path)
|
||||
)
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def dump_json_conf(config_data, conf_path):
|
||||
@ -100,9 +107,7 @@ def dump_json_conf(config_data, conf_path):
|
||||
with open(json_conf_path, "w") as f:
|
||||
json.dump(config_data, f, indent=4)
|
||||
except BaseException:
|
||||
raise EnvironmentError(
|
||||
"loading json file config from '{}' failed!".format(json_conf_path)
|
||||
)
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def load_json_conf_real_time(conf_path):
|
||||
@ -114,9 +119,7 @@ def load_json_conf_real_time(conf_path):
|
||||
with open(json_conf_path) as f:
|
||||
return json.load(f)
|
||||
except BaseException:
|
||||
raise EnvironmentError(
|
||||
"loading json file config from '{}' failed!".format(json_conf_path)
|
||||
)
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def load_yaml_conf(conf_path):
|
||||
@ -124,12 +127,10 @@ def load_yaml_conf(conf_path):
|
||||
conf_path = os.path.join(get_project_base_directory(), conf_path)
|
||||
try:
|
||||
with open(conf_path) as f:
|
||||
yaml = YAML(typ='safe', pure=True)
|
||||
yaml = YAML(typ="safe", pure=True)
|
||||
return yaml.load(f)
|
||||
except Exception as e:
|
||||
raise EnvironmentError(
|
||||
"loading yaml file config from {} failed:".format(conf_path), e
|
||||
)
|
||||
raise EnvironmentError("loading yaml file config from {} failed:".format(conf_path), e)
|
||||
|
||||
|
||||
def rewrite_yaml_conf(conf_path, config):
|
||||
@ -140,13 +141,11 @@ def rewrite_yaml_conf(conf_path, config):
|
||||
yaml = YAML(typ="safe")
|
||||
yaml.dump(config, f)
|
||||
except Exception as e:
|
||||
raise EnvironmentError(
|
||||
"rewrite yaml file config {} failed:".format(conf_path), e
|
||||
)
|
||||
raise EnvironmentError("rewrite yaml file config {} failed:".format(conf_path), e)
|
||||
|
||||
|
||||
def rewrite_json_file(filepath, json_data):
|
||||
with open(filepath, "w", encoding='utf-8') as f:
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
json.dump(json_data, f, indent=4, separators=(",", ": "))
|
||||
f.close()
|
||||
|
||||
@ -156,12 +155,10 @@ def filename_type(filename):
|
||||
if re.match(r".*\.pdf$", filename):
|
||||
return FileType.PDF.value
|
||||
|
||||
if re.match(
|
||||
r".*\.(eml|doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html|sql)$", filename):
|
||||
if re.match(r".*\.(eml|doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html|sql)$", filename):
|
||||
return FileType.DOC.value
|
||||
|
||||
if re.match(
|
||||
r".*\.(wav|flac|ape|alac|wavpack|wv|mp3|aac|ogg|vorbis|opus|mp3)$", filename):
|
||||
if re.match(r".*\.(wav|flac|ape|alac|wavpack|wv|mp3|aac|ogg|vorbis|opus|mp3)$", filename):
|
||||
return FileType.AURAL.value
|
||||
|
||||
if re.match(r".*\.(jpg|jpeg|png|tif|gif|pcx|tga|exif|fpx|svg|psd|cdr|pcd|dxf|ufo|eps|ai|raw|WMF|webp|avif|apng|icon|ico|mpg|mpeg|avi|rm|rmvb|mov|wmv|asf|dat|asx|wvx|mpe|mpa|mp4)$", filename):
|
||||
@ -169,13 +166,16 @@ def filename_type(filename):
|
||||
|
||||
return FileType.OTHER.value
|
||||
|
||||
|
||||
def thumbnail_img(filename, blob):
|
||||
"""
|
||||
MySQL LongText max length is 65535
|
||||
"""
|
||||
filename = filename.lower()
|
||||
if re.match(r".*\.pdf$", filename):
|
||||
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||
pdf = pdfplumber.open(BytesIO(blob))
|
||||
|
||||
buffered = BytesIO()
|
||||
resolution = 32
|
||||
img = None
|
||||
@ -188,6 +188,7 @@ def thumbnail_img(filename, blob):
|
||||
buffered = BytesIO()
|
||||
else:
|
||||
break
|
||||
pdf.close()
|
||||
return img
|
||||
|
||||
elif re.match(r".*\.(jpg|jpeg|png|tif|gif|icon|ico|webp)$", filename):
|
||||
@ -198,8 +199,9 @@ def thumbnail_img(filename, blob):
|
||||
return buffered.getvalue()
|
||||
|
||||
elif re.match(r".*\.(ppt|pptx)$", filename):
|
||||
import aspose.slides as slides
|
||||
import aspose.pydrawing as drawing
|
||||
import aspose.slides as slides
|
||||
|
||||
try:
|
||||
with slides.Presentation(BytesIO(blob)) as presentation:
|
||||
buffered = BytesIO()
|
||||
@ -207,8 +209,7 @@ def thumbnail_img(filename, blob):
|
||||
img = None
|
||||
for _ in range(10):
|
||||
# https://reference.aspose.com/slides/python-net/aspose.slides/slide/get_thumbnail/#float-float
|
||||
presentation.slides[0].get_thumbnail(scale, scale).save(
|
||||
buffered, drawing.imaging.ImageFormat.png)
|
||||
presentation.slides[0].get_thumbnail(scale, scale).save(buffered, drawing.imaging.ImageFormat.png)
|
||||
img = buffered.getvalue()
|
||||
if len(img) >= 64000:
|
||||
scale = scale / 2.0
|
||||
@ -224,10 +225,9 @@ def thumbnail_img(filename, blob):
|
||||
def thumbnail(filename, blob):
|
||||
img = thumbnail_img(filename, blob)
|
||||
if img is not None:
|
||||
return IMG_BASE64_PREFIX + \
|
||||
base64.b64encode(img).decode("utf-8")
|
||||
return IMG_BASE64_PREFIX + base64.b64encode(img).decode("utf-8")
|
||||
else:
|
||||
return ''
|
||||
return ""
|
||||
|
||||
|
||||
def traversal_files(base):
|
||||
@ -235,3 +235,52 @@ def traversal_files(base):
|
||||
for f in fs:
|
||||
fullname = os.path.join(root, f)
|
||||
yield fullname
|
||||
|
||||
|
||||
def repair_pdf_with_ghostscript(input_bytes):
|
||||
if shutil.which("gs") is None:
|
||||
return input_bytes
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".pdf") as temp_in, tempfile.NamedTemporaryFile(suffix=".pdf") as temp_out:
|
||||
temp_in.write(input_bytes)
|
||||
temp_in.flush()
|
||||
|
||||
cmd = [
|
||||
"gs",
|
||||
"-o",
|
||||
temp_out.name,
|
||||
"-sDEVICE=pdfwrite",
|
||||
"-dPDFSETTINGS=/prepress",
|
||||
temp_in.name,
|
||||
]
|
||||
try:
|
||||
proc = subprocess.run(cmd, capture_output=True, text=True)
|
||||
if proc.returncode != 0:
|
||||
return input_bytes
|
||||
except Exception:
|
||||
return input_bytes
|
||||
|
||||
temp_out.seek(0)
|
||||
repaired_bytes = temp_out.read()
|
||||
|
||||
return repaired_bytes
|
||||
|
||||
|
||||
def read_potential_broken_pdf(blob):
|
||||
def try_open(blob):
|
||||
try:
|
||||
with pdfplumber.open(BytesIO(blob)) as pdf:
|
||||
if pdf.pages:
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
return False
|
||||
|
||||
if try_open(blob):
|
||||
return blob
|
||||
|
||||
repaired = repair_pdf_with_ghostscript(blob)
|
||||
if try_open(repaired):
|
||||
return repaired
|
||||
|
||||
return blob
|
||||
|
||||
@ -18,6 +18,8 @@ import os.path
|
||||
import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
initialized_root_logger = False
|
||||
|
||||
def get_project_base_directory():
|
||||
PROJECT_BASE = os.path.abspath(
|
||||
os.path.join(
|
||||
@ -29,10 +31,13 @@ def get_project_base_directory():
|
||||
return PROJECT_BASE
|
||||
|
||||
def initRootLogger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||
logger = logging.getLogger()
|
||||
if logger.hasHandlers():
|
||||
global initialized_root_logger
|
||||
if initialized_root_logger:
|
||||
return
|
||||
initialized_root_logger = True
|
||||
|
||||
logger = logging.getLogger()
|
||||
logger.handlers.clear()
|
||||
log_path = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{logfile_basename}.log"))
|
||||
|
||||
os.makedirs(os.path.dirname(log_path), exist_ok=True)
|
||||
|
||||
653
api/utils/validation_utils.py
Normal file
653
api/utils/validation_utils.py
Normal file
@ -0,0 +1,653 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from collections import Counter
|
||||
from enum import auto
|
||||
from typing import Annotated, Any
|
||||
from uuid import UUID
|
||||
|
||||
from flask import Request
|
||||
from pydantic import BaseModel, Field, StringConstraints, ValidationError, field_validator
|
||||
from pydantic_core import PydanticCustomError
|
||||
from strenum import StrEnum
|
||||
from werkzeug.exceptions import BadRequest, UnsupportedMediaType
|
||||
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
|
||||
|
||||
def validate_and_parse_json_request(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None, exclude_unset: bool = False) -> tuple[dict[str, Any] | None, str | None]:
|
||||
"""
|
||||
Validates and parses JSON requests through a multi-stage validation pipeline.
|
||||
|
||||
Implements a four-stage validation process:
|
||||
1. Content-Type verification (must be application/json)
|
||||
2. JSON syntax validation
|
||||
3. Payload structure type checking
|
||||
4. Pydantic model validation with error formatting
|
||||
|
||||
Args:
|
||||
request (Request): Flask request object containing HTTP payload
|
||||
validator (type[BaseModel]): Pydantic model class for data validation
|
||||
extras (dict[str, Any] | None): Additional fields to merge into payload
|
||||
before validation. These fields will be removed from the final output
|
||||
exclude_unset (bool): Whether to exclude fields that have not been explicitly set
|
||||
|
||||
Returns:
|
||||
tuple[Dict[str, Any] | None, str | None]:
|
||||
- First element:
|
||||
- Validated dictionary on success
|
||||
- None on validation failure
|
||||
- Second element:
|
||||
- None on success
|
||||
- Diagnostic error message on failure
|
||||
|
||||
Raises:
|
||||
UnsupportedMediaType: When Content-Type header is not application/json
|
||||
BadRequest: For structural JSON syntax errors
|
||||
ValidationError: When payload violates Pydantic schema rules
|
||||
|
||||
Examples:
|
||||
>>> validate_and_parse_json_request(valid_request, DatasetSchema)
|
||||
({"name": "Dataset1", "format": "csv"}, None)
|
||||
|
||||
>>> validate_and_parse_json_request(xml_request, DatasetSchema)
|
||||
(None, "Unsupported content type: Expected application/json, got text/xml")
|
||||
|
||||
>>> validate_and_parse_json_request(bad_json_request, DatasetSchema)
|
||||
(None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding")
|
||||
|
||||
Notes:
|
||||
1. Validation Priority:
|
||||
- Content-Type verification precedes JSON parsing
|
||||
- Structural validation occurs before schema validation
|
||||
2. Extra fields added via `extras` parameter are automatically removed
|
||||
from the final output after validation
|
||||
"""
|
||||
try:
|
||||
payload = request.get_json() or {}
|
||||
except UnsupportedMediaType:
|
||||
return None, f"Unsupported content type: Expected application/json, got {request.content_type}"
|
||||
except BadRequest:
|
||||
return None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding"
|
||||
|
||||
if not isinstance(payload, dict):
|
||||
return None, f"Invalid request payload: expected object, got {type(payload).__name__}"
|
||||
|
||||
try:
|
||||
if extras is not None:
|
||||
payload.update(extras)
|
||||
validated_request = validator(**payload)
|
||||
except ValidationError as e:
|
||||
return None, format_validation_error_message(e)
|
||||
|
||||
parsed_payload = validated_request.model_dump(by_alias=True, exclude_unset=exclude_unset)
|
||||
|
||||
if extras is not None:
|
||||
for key in list(parsed_payload.keys()):
|
||||
if key in extras:
|
||||
del parsed_payload[key]
|
||||
|
||||
return parsed_payload, None
|
||||
|
||||
|
||||
def validate_and_parse_request_args(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None) -> tuple[dict[str, Any] | None, str | None]:
|
||||
"""
|
||||
Validates and parses request arguments against a Pydantic model.
|
||||
|
||||
This function performs a complete request validation workflow:
|
||||
1. Extracts query parameters from the request
|
||||
2. Merges with optional extra values (if provided)
|
||||
3. Validates against the specified Pydantic model
|
||||
4. Cleans the output by removing extra values
|
||||
5. Returns either parsed data or an error message
|
||||
|
||||
Args:
|
||||
request (Request): Web framework request object containing query parameters
|
||||
validator (type[BaseModel]): Pydantic model class for validation
|
||||
extras (dict[str, Any] | None): Optional additional values to include in validation
|
||||
but exclude from final output. Defaults to None.
|
||||
|
||||
Returns:
|
||||
tuple[dict[str, Any] | None, str | None]:
|
||||
- First element: Validated/parsed arguments as dict if successful, None otherwise
|
||||
- Second element: Formatted error message if validation failed, None otherwise
|
||||
|
||||
Behavior:
|
||||
- Query parameters are merged with extras before validation
|
||||
- Extras are automatically removed from the final output
|
||||
- All validation errors are formatted into a human-readable string
|
||||
|
||||
Raises:
|
||||
TypeError: If validator is not a Pydantic BaseModel subclass
|
||||
|
||||
Examples:
|
||||
Successful validation:
|
||||
>>> validate_and_parse_request_args(request, MyValidator)
|
||||
({'param1': 'value'}, None)
|
||||
|
||||
Failed validation:
|
||||
>>> validate_and_parse_request_args(request, MyValidator)
|
||||
(None, "param1: Field required")
|
||||
|
||||
With extras:
|
||||
>>> validate_and_parse_request_args(request, MyValidator, extras={'internal_id': 123})
|
||||
({'param1': 'value'}, None) # internal_id removed from output
|
||||
|
||||
Notes:
|
||||
- Uses request.args.to_dict() for Flask-compatible parameter extraction
|
||||
- Maintains immutability of original request arguments
|
||||
- Preserves type conversion from Pydantic validation
|
||||
"""
|
||||
args = request.args.to_dict(flat=True)
|
||||
try:
|
||||
if extras is not None:
|
||||
args.update(extras)
|
||||
validated_args = validator(**args)
|
||||
except ValidationError as e:
|
||||
return None, format_validation_error_message(e)
|
||||
|
||||
parsed_args = validated_args.model_dump()
|
||||
if extras is not None:
|
||||
for key in list(parsed_args.keys()):
|
||||
if key in extras:
|
||||
del parsed_args[key]
|
||||
|
||||
return parsed_args, None
|
||||
|
||||
|
||||
def format_validation_error_message(e: ValidationError) -> str:
|
||||
"""
|
||||
Formats validation errors into a standardized string format.
|
||||
|
||||
Processes pydantic ValidationError objects to create human-readable error messages
|
||||
containing field locations, error descriptions, and input values.
|
||||
|
||||
Args:
|
||||
e (ValidationError): The validation error instance containing error details
|
||||
|
||||
Returns:
|
||||
str: Formatted error messages joined by newlines. Each line contains:
|
||||
- Field path (dot-separated)
|
||||
- Error message
|
||||
- Truncated input value (max 128 chars)
|
||||
|
||||
Example:
|
||||
>>> try:
|
||||
... UserModel(name=123, email="invalid")
|
||||
... except ValidationError as e:
|
||||
... print(format_validation_error_message(e))
|
||||
Field: <name> - Message: <Input should be a valid string> - Value: <123>
|
||||
Field: <email> - Message: <value is not a valid email address> - Value: <invalid>
|
||||
"""
|
||||
error_messages = []
|
||||
|
||||
for error in e.errors():
|
||||
field = ".".join(map(str, error["loc"]))
|
||||
msg = error["msg"]
|
||||
input_val = error["input"]
|
||||
input_str = str(input_val)
|
||||
|
||||
if len(input_str) > 128:
|
||||
input_str = input_str[:125] + "..."
|
||||
|
||||
error_msg = f"Field: <{field}> - Message: <{msg}> - Value: <{input_str}>"
|
||||
error_messages.append(error_msg)
|
||||
|
||||
return "\n".join(error_messages)
|
||||
|
||||
|
||||
def normalize_str(v: Any) -> Any:
|
||||
"""
|
||||
Normalizes string values to a standard format while preserving non-string inputs.
|
||||
|
||||
Performs the following transformations when input is a string:
|
||||
1. Trims leading/trailing whitespace (str.strip())
|
||||
2. Converts to lowercase (str.lower())
|
||||
|
||||
Non-string inputs are returned unchanged, making this function safe for mixed-type
|
||||
processing pipelines.
|
||||
|
||||
Args:
|
||||
v (Any): Input value to normalize. Accepts any Python object.
|
||||
|
||||
Returns:
|
||||
Any: Normalized string if input was string-type, original value otherwise.
|
||||
|
||||
Behavior Examples:
|
||||
String Input: " Admin " → "admin"
|
||||
Empty String: " " → "" (empty string)
|
||||
Non-String:
|
||||
- 123 → 123
|
||||
- None → None
|
||||
- ["User"] → ["User"]
|
||||
|
||||
Typical Use Cases:
|
||||
- Standardizing user input
|
||||
- Preparing data for case-insensitive comparison
|
||||
- Cleaning API parameters
|
||||
- Normalizing configuration values
|
||||
|
||||
Edge Cases:
|
||||
- Unicode whitespace is handled by str.strip()
|
||||
- Locale-independent lowercasing (str.lower())
|
||||
- Preserves falsy values (0, False, etc.)
|
||||
|
||||
Example:
|
||||
>>> normalize_str(" ReadOnly ")
|
||||
'readonly'
|
||||
>>> normalize_str(42)
|
||||
42
|
||||
"""
|
||||
if isinstance(v, str):
|
||||
stripped = v.strip()
|
||||
normalized = stripped.lower()
|
||||
return normalized
|
||||
return v
|
||||
|
||||
|
||||
def validate_uuid1_hex(v: Any) -> str:
|
||||
"""
|
||||
Validates and converts input to a UUID version 1 hexadecimal string.
|
||||
|
||||
This function performs strict validation and normalization:
|
||||
1. Accepts either UUID objects or UUID-formatted strings
|
||||
2. Verifies the UUID is version 1 (time-based)
|
||||
3. Returns the 32-character hexadecimal representation
|
||||
|
||||
Args:
|
||||
v (Any): Input value to validate. Can be:
|
||||
- UUID object (must be version 1)
|
||||
- String in UUID format (e.g. "550e8400-e29b-41d4-a716-446655440000")
|
||||
|
||||
Returns:
|
||||
str: 32-character lowercase hexadecimal string without hyphens
|
||||
Example: "550e8400e29b41d4a716446655440000"
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: With code "invalid_UUID1_format" when:
|
||||
- Input is not a UUID object or valid UUID string
|
||||
- UUID version is not 1
|
||||
- String doesn't match UUID format
|
||||
|
||||
Examples:
|
||||
Valid cases:
|
||||
>>> validate_uuid1_hex("550e8400-e29b-41d4-a716-446655440000")
|
||||
'550e8400e29b41d4a716446655440000'
|
||||
>>> validate_uuid1_hex(UUID('550e8400-e29b-41d4-a716-446655440000'))
|
||||
'550e8400e29b41d4a716446655440000'
|
||||
|
||||
Invalid cases:
|
||||
>>> validate_uuid1_hex("not-a-uuid") # raises PydanticCustomError
|
||||
>>> validate_uuid1_hex(12345) # raises PydanticCustomError
|
||||
>>> validate_uuid1_hex(UUID(int=0)) # v4, raises PydanticCustomError
|
||||
|
||||
Notes:
|
||||
- Uses Python's built-in UUID parser for format validation
|
||||
- Version check prevents accidental use of other UUID versions
|
||||
- Hyphens in input strings are automatically removed in output
|
||||
"""
|
||||
try:
|
||||
uuid_obj = UUID(v) if isinstance(v, str) else v
|
||||
if uuid_obj.version != 1:
|
||||
raise PydanticCustomError("invalid_UUID1_format", "Must be a UUID1 format")
|
||||
return uuid_obj.hex
|
||||
except (AttributeError, ValueError, TypeError):
|
||||
raise PydanticCustomError("invalid_UUID1_format", "Invalid UUID1 format")
|
||||
|
||||
|
||||
class PermissionEnum(StrEnum):
|
||||
me = auto()
|
||||
team = auto()
|
||||
|
||||
|
||||
class ChunkMethodnEnum(StrEnum):
|
||||
naive = auto()
|
||||
book = auto()
|
||||
email = auto()
|
||||
laws = auto()
|
||||
manual = auto()
|
||||
one = auto()
|
||||
paper = auto()
|
||||
picture = auto()
|
||||
presentation = auto()
|
||||
qa = auto()
|
||||
table = auto()
|
||||
tag = auto()
|
||||
|
||||
|
||||
class GraphragMethodEnum(StrEnum):
|
||||
light = auto()
|
||||
general = auto()
|
||||
|
||||
|
||||
class Base(BaseModel):
|
||||
class Config:
|
||||
extra = "forbid"
|
||||
|
||||
|
||||
class RaptorConfig(Base):
|
||||
use_raptor: bool = Field(default=False)
|
||||
prompt: Annotated[
|
||||
str,
|
||||
StringConstraints(strip_whitespace=True, min_length=1),
|
||||
Field(
|
||||
default="Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize."
|
||||
),
|
||||
]
|
||||
max_token: int = Field(default=256, ge=1, le=2048)
|
||||
threshold: float = Field(default=0.1, ge=0.0, le=1.0)
|
||||
max_cluster: int = Field(default=64, ge=1, le=1024)
|
||||
random_seed: int = Field(default=0, ge=0)
|
||||
|
||||
|
||||
class GraphragConfig(Base):
|
||||
use_graphrag: bool = Field(default=False)
|
||||
entity_types: list[str] = Field(default_factory=lambda: ["organization", "person", "geo", "event", "category"])
|
||||
method: GraphragMethodEnum = Field(default=GraphragMethodEnum.light)
|
||||
community: bool = Field(default=False)
|
||||
resolution: bool = Field(default=False)
|
||||
|
||||
|
||||
class ParserConfig(Base):
|
||||
auto_keywords: int = Field(default=0, ge=0, le=32)
|
||||
auto_questions: int = Field(default=0, ge=0, le=10)
|
||||
chunk_token_num: int = Field(default=128, ge=1, le=2048)
|
||||
delimiter: str = Field(default=r"\n", min_length=1)
|
||||
graphrag: GraphragConfig | None = None
|
||||
html4excel: bool = False
|
||||
layout_recognize: str = "DeepDOC"
|
||||
raptor: RaptorConfig | None = None
|
||||
tag_kb_ids: list[str] = Field(default_factory=list)
|
||||
topn_tags: int = Field(default=1, ge=1, le=10)
|
||||
filename_embd_weight: float | None = Field(default=None, ge=0.0, le=1.0)
|
||||
task_page_size: int | None = Field(default=None, ge=1)
|
||||
pages: list[list[int]] | None = None
|
||||
|
||||
|
||||
class CreateDatasetReq(Base):
|
||||
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(...)]
|
||||
avatar: str | None = Field(default=None, max_length=65535)
|
||||
description: str | None = Field(default=None, max_length=65535)
|
||||
embedding_model: Annotated[str, StringConstraints(strip_whitespace=True, max_length=255), Field(default="", serialization_alias="embd_id")]
|
||||
permission: PermissionEnum = Field(default=PermissionEnum.me, min_length=1, max_length=16)
|
||||
chunk_method: ChunkMethodnEnum = Field(default=ChunkMethodnEnum.naive, min_length=1, max_length=32, serialization_alias="parser_id")
|
||||
pagerank: int = Field(default=0, ge=0, le=100)
|
||||
parser_config: ParserConfig | None = Field(default=None)
|
||||
|
||||
@field_validator("avatar")
|
||||
@classmethod
|
||||
def validate_avatar_base64(cls, v: str | None) -> str | None:
|
||||
"""
|
||||
Validates Base64-encoded avatar string format and MIME type compliance.
|
||||
|
||||
Implements a three-stage validation workflow:
|
||||
1. MIME prefix existence check
|
||||
2. MIME type format validation
|
||||
3. Supported type verification
|
||||
|
||||
Args:
|
||||
v (str): Raw avatar field value
|
||||
|
||||
Returns:
|
||||
str: Validated Base64 string
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: For structural errors in these cases:
|
||||
- Missing MIME prefix header
|
||||
- Invalid MIME prefix format
|
||||
- Unsupported image MIME type
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Valid case
|
||||
CreateDatasetReq(avatar="data:image/png;base64,iVBORw0KGg...")
|
||||
|
||||
# Invalid cases
|
||||
CreateDatasetReq(avatar="image/jpeg;base64,...") # Missing 'data:' prefix
|
||||
CreateDatasetReq(avatar="data:video/mp4;base64,...") # Unsupported MIME type
|
||||
```
|
||||
"""
|
||||
if v is None:
|
||||
return v
|
||||
|
||||
if "," in v:
|
||||
prefix, _ = v.split(",", 1)
|
||||
if not prefix.startswith("data:"):
|
||||
raise PydanticCustomError("format_invalid", "Invalid MIME prefix format. Must start with 'data:'")
|
||||
|
||||
mime_type = prefix[5:].split(";")[0]
|
||||
supported_mime_types = ["image/jpeg", "image/png"]
|
||||
if mime_type not in supported_mime_types:
|
||||
raise PydanticCustomError("format_invalid", "Unsupported MIME type. Allowed: {supported_mime_types}", {"supported_mime_types": supported_mime_types})
|
||||
|
||||
return v
|
||||
else:
|
||||
raise PydanticCustomError("format_invalid", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>")
|
||||
|
||||
@field_validator("embedding_model", mode="after")
|
||||
@classmethod
|
||||
def validate_embedding_model(cls, v: str) -> str:
|
||||
"""
|
||||
Validates embedding model identifier format compliance.
|
||||
|
||||
Validation pipeline:
|
||||
1. Structural format verification
|
||||
2. Component non-empty check
|
||||
3. Value normalization
|
||||
|
||||
Args:
|
||||
v (str): Raw model identifier
|
||||
|
||||
Returns:
|
||||
str: Validated <model_name>@<provider> format
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: For these violations:
|
||||
- Missing @ separator
|
||||
- Empty model_name/provider
|
||||
- Invalid component structure
|
||||
|
||||
Examples:
|
||||
Valid: "text-embedding-3-large@openai"
|
||||
Invalid: "invalid_model" (no @)
|
||||
Invalid: "@openai" (empty model_name)
|
||||
Invalid: "text-embedding-3-large@" (empty provider)
|
||||
"""
|
||||
if "@" not in v:
|
||||
raise PydanticCustomError("format_invalid", "Embedding model identifier must follow <model_name>@<provider> format")
|
||||
|
||||
components = v.split("@", 1)
|
||||
if len(components) != 2 or not all(components):
|
||||
raise PydanticCustomError("format_invalid", "Both model_name and provider must be non-empty strings")
|
||||
|
||||
model_name, provider = components
|
||||
if not model_name.strip() or not provider.strip():
|
||||
raise PydanticCustomError("format_invalid", "Model name and provider cannot be whitespace-only strings")
|
||||
return v
|
||||
|
||||
@field_validator("permission", mode="before")
|
||||
@classmethod
|
||||
def normalize_permission(cls, v: Any) -> Any:
|
||||
return normalize_str(v)
|
||||
|
||||
@field_validator("parser_config", mode="before")
|
||||
@classmethod
|
||||
def normalize_empty_parser_config(cls, v: Any) -> Any:
|
||||
"""
|
||||
Normalizes empty parser configuration by converting empty dictionaries to None.
|
||||
|
||||
This validator ensures consistent handling of empty parser configurations across
|
||||
the application by converting empty dicts to None values.
|
||||
|
||||
Args:
|
||||
v (Any): Raw input value for the parser config field
|
||||
|
||||
Returns:
|
||||
Any: Returns None if input is an empty dict, otherwise returns the original value
|
||||
|
||||
Example:
|
||||
>>> normalize_empty_parser_config({})
|
||||
None
|
||||
|
||||
>>> normalize_empty_parser_config({"key": "value"})
|
||||
{"key": "value"}
|
||||
"""
|
||||
if v == {}:
|
||||
return None
|
||||
return v
|
||||
|
||||
@field_validator("parser_config", mode="after")
|
||||
@classmethod
|
||||
def validate_parser_config_json_length(cls, v: ParserConfig | None) -> ParserConfig | None:
|
||||
"""
|
||||
Validates serialized JSON length constraints for parser configuration.
|
||||
|
||||
Implements a two-stage validation workflow:
|
||||
1. Null check - bypass validation for empty configurations
|
||||
2. Model serialization - convert Pydantic model to JSON string
|
||||
3. Size verification - enforce maximum allowed payload size
|
||||
|
||||
Args:
|
||||
v (ParserConfig | None): Raw parser configuration object
|
||||
|
||||
Returns:
|
||||
ParserConfig | None: Validated configuration object
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: When serialized JSON exceeds 65,535 characters
|
||||
"""
|
||||
if v is None:
|
||||
return None
|
||||
|
||||
if (json_str := v.model_dump_json()) and len(json_str) > 65535:
|
||||
raise PydanticCustomError("string_too_long", "Parser config exceeds size limit (max 65,535 characters). Current size: {actual}", {"actual": len(json_str)})
|
||||
return v
|
||||
|
||||
|
||||
class UpdateDatasetReq(CreateDatasetReq):
|
||||
dataset_id: str = Field(...)
|
||||
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(default="")]
|
||||
|
||||
@field_validator("dataset_id", mode="before")
|
||||
@classmethod
|
||||
def validate_dataset_id(cls, v: Any) -> str:
|
||||
return validate_uuid1_hex(v)
|
||||
|
||||
|
||||
class DeleteReq(Base):
|
||||
ids: list[str] | None = Field(...)
|
||||
|
||||
@field_validator("ids", mode="after")
|
||||
@classmethod
|
||||
def validate_ids(cls, v_list: list[str] | None) -> list[str] | None:
|
||||
"""
|
||||
Validates and normalizes a list of UUID strings with None handling.
|
||||
|
||||
This post-processing validator performs:
|
||||
1. None input handling (pass-through)
|
||||
2. UUID version 1 validation for each list item
|
||||
3. Duplicate value detection
|
||||
4. Returns normalized UUID hex strings or None
|
||||
|
||||
Args:
|
||||
v_list (list[str] | None): Input list that has passed initial validation.
|
||||
Either a list of UUID strings or None.
|
||||
|
||||
Returns:
|
||||
list[str] | None:
|
||||
- None if input was None
|
||||
- List of normalized UUID hex strings otherwise:
|
||||
* 32-character lowercase
|
||||
* Valid UUID version 1
|
||||
* Unique within list
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: With structured error details when:
|
||||
- "invalid_UUID1_format": Any string fails UUIDv1 validation
|
||||
- "duplicate_uuids": If duplicate IDs are detected
|
||||
|
||||
Validation Rules:
|
||||
- None input returns None
|
||||
- Empty list returns empty list
|
||||
- All non-None items must be valid UUIDv1
|
||||
- No duplicates permitted
|
||||
- Original order preserved
|
||||
|
||||
Examples:
|
||||
Valid cases:
|
||||
>>> validate_ids(None)
|
||||
None
|
||||
>>> validate_ids([])
|
||||
[]
|
||||
>>> validate_ids(["550e8400-e29b-41d4-a716-446655440000"])
|
||||
["550e8400e29b41d4a716446655440000"]
|
||||
|
||||
Invalid cases:
|
||||
>>> validate_ids(["invalid"])
|
||||
# raises PydanticCustomError(invalid_UUID1_format)
|
||||
>>> validate_ids(["550e...", "550e..."])
|
||||
# raises PydanticCustomError(duplicate_uuids)
|
||||
|
||||
Security Notes:
|
||||
- Validates UUID version to prevent version spoofing
|
||||
- Duplicate check prevents data injection
|
||||
- None handling maintains pipeline integrity
|
||||
"""
|
||||
if v_list is None:
|
||||
return None
|
||||
|
||||
ids_list = []
|
||||
for v in v_list:
|
||||
try:
|
||||
ids_list.append(validate_uuid1_hex(v))
|
||||
except PydanticCustomError as e:
|
||||
raise e
|
||||
|
||||
duplicates = [item for item, count in Counter(ids_list).items() if count > 1]
|
||||
if duplicates:
|
||||
duplicates_str = ", ".join(duplicates)
|
||||
raise PydanticCustomError("duplicate_uuids", "Duplicate ids: '{duplicate_ids}'", {"duplicate_ids": duplicates_str})
|
||||
|
||||
return ids_list
|
||||
|
||||
|
||||
class DeleteDatasetReq(DeleteReq): ...
|
||||
|
||||
|
||||
class OrderByEnum(StrEnum):
|
||||
create_time = auto()
|
||||
update_time = auto()
|
||||
|
||||
|
||||
class BaseListReq(Base):
|
||||
id: str | None = None
|
||||
name: str | None = None
|
||||
page: int = Field(default=1, ge=1)
|
||||
page_size: int = Field(default=30, ge=1)
|
||||
orderby: OrderByEnum = Field(default=OrderByEnum.create_time)
|
||||
desc: bool = Field(default=True)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def validate_id(cls, v: Any) -> str:
|
||||
return validate_uuid1_hex(v)
|
||||
|
||||
@field_validator("orderby", mode="before")
|
||||
@classmethod
|
||||
def normalize_orderby(cls, v: Any) -> Any:
|
||||
return normalize_str(v)
|
||||
|
||||
|
||||
class ListDatasetReq(BaseListReq): ...
|
||||
@ -5,14 +5,14 @@
|
||||
"create_time": {"type": "varchar", "default": ""},
|
||||
"create_timestamp_flt": {"type": "float", "default": 0.0},
|
||||
"img_id": {"type": "varchar", "default": ""},
|
||||
"docnm_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"docnm_kwd": {"type": "varchar", "default": ""},
|
||||
"title_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"title_sm_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"name_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"important_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"tag_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"name_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"important_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"tag_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"important_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"question_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"question_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"question_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"content_with_weight": {"type": "varchar", "default": ""},
|
||||
"content_ltks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
@ -27,16 +27,18 @@
|
||||
"rank_int": {"type": "integer", "default": 0},
|
||||
"rank_flt": {"type": "float", "default": 0},
|
||||
"available_int": {"type": "integer", "default": 1},
|
||||
"knowledge_graph_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"entities_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"knowledge_graph_kwd": {"type": "varchar", "default": ""},
|
||||
"entities_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"pagerank_fea": {"type": "integer", "default": 0},
|
||||
"tag_feas": {"type": "varchar", "default": ""},
|
||||
|
||||
"from_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"to_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"entity_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||
"source_id": {"type": "varchar", "default": ""},
|
||||
"from_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"to_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"entity_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"source_id": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
"n_hop_with_weight": {"type": "varchar", "default": ""},
|
||||
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"}
|
||||
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||
|
||||
"doc_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
213
conf/os_mapping.json
Normal file
213
conf/os_mapping.json
Normal file
@ -0,0 +1,213 @@
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
"number_of_shards": 2,
|
||||
"number_of_replicas": 0,
|
||||
"refresh_interval": "1000ms",
|
||||
"knn": true,
|
||||
"similarity": {
|
||||
"scripted_sim": {
|
||||
"type": "scripted",
|
||||
"script": {
|
||||
"source": "double idf = Math.log(1+(field.docCount-term.docFreq+0.5)/(term.docFreq + 0.5))/Math.log(1+((field.docCount-0.5)/1.5)); return query.boost * idf * Math.min(doc.freq, 1);"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"lat_lon": {
|
||||
"type": "geo_point",
|
||||
"store": "true"
|
||||
}
|
||||
},
|
||||
"date_detection": "true",
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"int": {
|
||||
"match": "*_int",
|
||||
"mapping": {
|
||||
"type": "integer",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"ulong": {
|
||||
"match": "*_ulong",
|
||||
"mapping": {
|
||||
"type": "unsigned_long",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"long": {
|
||||
"match": "*_long",
|
||||
"mapping": {
|
||||
"type": "long",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"short": {
|
||||
"match": "*_short",
|
||||
"mapping": {
|
||||
"type": "short",
|
||||
"store": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"numeric": {
|
||||
"match": "*_flt",
|
||||
"mapping": {
|
||||
"type": "float",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"tks": {
|
||||
"match": "*_tks",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"similarity": "scripted_sim",
|
||||
"analyzer": "whitespace",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"ltks": {
|
||||
"match": "*_ltks",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"analyzer": "whitespace",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kwd": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^(.*_(kwd|id|ids|uid|uids)|uid)$",
|
||||
"mapping": {
|
||||
"type": "keyword",
|
||||
"similarity": "boolean",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"dt": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^.*(_dt|_time|_at)$",
|
||||
"mapping": {
|
||||
"type": "date",
|
||||
"format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||yyyy-MM-dd_HH:mm:ss",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"nested": {
|
||||
"match": "*_nst",
|
||||
"mapping": {
|
||||
"type": "nested"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"object": {
|
||||
"match": "*_obj",
|
||||
"mapping": {
|
||||
"type": "object",
|
||||
"dynamic": "true"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"string": {
|
||||
"match_pattern": "regex",
|
||||
"match": "^.*_(with_weight|list)$",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"index": "false",
|
||||
"store": true
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"rank_feature": {
|
||||
"match": "*_fea",
|
||||
"mapping": {
|
||||
"type": "rank_feature"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"rank_features": {
|
||||
"match": "*_feas",
|
||||
"mapping": {
|
||||
"type": "rank_features"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"knn_vector": {
|
||||
"match": "*_512_vec",
|
||||
"mapping": {
|
||||
"type": "knn_vector",
|
||||
"index": true,
|
||||
"space_type": "cosinesimil",
|
||||
"dimension": 512
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"knn_vector": {
|
||||
"match": "*_768_vec",
|
||||
"mapping": {
|
||||
"type": "knn_vector",
|
||||
"index": true,
|
||||
"space_type": "cosinesimil",
|
||||
"dimension": 768
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"knn_vector": {
|
||||
"match": "*_1024_vec",
|
||||
"mapping": {
|
||||
"type": "knn_vector",
|
||||
"index": true,
|
||||
"space_type": "cosinesimil",
|
||||
"dimension": 1024
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"knn_vector": {
|
||||
"match": "*_1536_vec",
|
||||
"mapping": {
|
||||
"type": "knn_vector",
|
||||
"index": true,
|
||||
"space_type": "cosinesimil",
|
||||
"dimension": 1536
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"binary": {
|
||||
"match": "*_bin",
|
||||
"mapping": {
|
||||
"type": "binary"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user