mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
1520 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6ca1aef52e | |||
| d285a12b85 | |||
| cbb90e171d | |||
| 59934b63aa | |||
| 31e229ff78 | |||
| f21d023260 | |||
| 40423878eb | |||
| db8a3f3480 | |||
| 0cf8c5bedb | |||
| 47c5cdccf6 | |||
| 0c2b8182e4 | |||
| 4a7ed9afef | |||
| 1ae7b942d9 | |||
| fed1221302 | |||
| 6ed81d6774 | |||
| 115850945e | |||
| 8e87436725 | |||
| e8e2a95165 | |||
| b908c33464 | |||
| 0ebf05440e | |||
| 7df1bd4b4a | |||
| 5d21cc3660 | |||
| b0275b8483 | |||
| 86c6fee320 | |||
| c0bee906d2 | |||
| bfaa469b9a | |||
| d73a08b9eb | |||
| a1f06a4fdc | |||
| cb26564d50 | |||
| 59705a1c1d | |||
| 205974c359 | |||
| 04edf9729f | |||
| bb1268ef4b | |||
| c5826d4720 | |||
| deb2faf7aa | |||
| 2777941b4e | |||
| ae8b628f0a | |||
| 0e9ff8c1f7 | |||
| d373c46976 | |||
| 008e55a65e | |||
| 772992812a | |||
| a8542508b7 | |||
| 0b4d366514 | |||
| e7a84bdac2 | |||
| d2b346cf9e | |||
| 1d0dcddf61 | |||
| d49025b501 | |||
| dd0fd13ea8 | |||
| 36e32dde1a | |||
| 53a2c8e452 | |||
| 5218ff775c | |||
| 5d5dbb3bcb | |||
| 5a0273e3ea | |||
| ce81e470e3 | |||
| 4ac61fc470 | |||
| bfe97d896d | |||
| e7a6a9e47e | |||
| d06431f670 | |||
| 2fa8e3309f | |||
| fe3b2acde0 | |||
| 01330fa428 | |||
| b4cc37f3c1 | |||
| a8dbb5d3b0 | |||
| 321a280031 | |||
| 5c9025918a | |||
| 573d46a4ef | |||
| 4ae8f87754 | |||
| 63af158086 | |||
| 3877bcfc21 | |||
| f8cc557892 | |||
| e39ceb2bd1 | |||
| 992398bca3 | |||
| baa108f5cc | |||
| 4a891f2d67 | |||
| 514c08a932 | |||
| d05e8a173d | |||
| ad412380cb | |||
| af35e84655 | |||
| 29f45a85e4 | |||
| ea5e8caa69 | |||
| 473aa28422 | |||
| ef0c4b134d | |||
| 35e36cb945 | |||
| 31718581b5 | |||
| 6bd7d572ec | |||
| 5b626870d0 | |||
| 2ccec93d71 | |||
| 2fe332d01d | |||
| a14865e6bb | |||
| d66c17ab5c | |||
| b781207752 | |||
| 34ec550014 | |||
| c2c63b07c3 | |||
| 332e6ffbd4 | |||
| 5352bdf4da | |||
| 138778b51b | |||
| 17e7571639 | |||
| 0fbca63e9d | |||
| 1657755b5d | |||
| 9d3dd13fef | |||
| 3827c47515 | |||
| e9053b6ed4 | |||
| e349635a3d | |||
| 014a1535f2 | |||
| 7b57ab5dea | |||
| e300d90c00 | |||
| 87317bcfc4 | |||
| 9849230a04 | |||
| fa32a2d0fd | |||
| 27ffc0ed74 | |||
| 539876af11 | |||
| b1c8746984 | |||
| bc3160f75a | |||
| 75b24ba02a | |||
| 953b3e1b3f | |||
| c98933499a | |||
| 2f768b96e8 | |||
| d6cc6453d1 | |||
| 45dfaf230c | |||
| 65537b8200 | |||
| 60787f8d5d | |||
| c4b3d3af95 | |||
| f29a5de9f5 | |||
| cb37f00a8f | |||
| fc379e90d1 | |||
| fea9d970ec | |||
| 6e7dd54a50 | |||
| f56b651acb | |||
| 2dbcc0a1bf | |||
| 1f82889001 | |||
| e6c824e606 | |||
| e2b0bceb1b | |||
| 713c055e04 | |||
| 1fc52033ba | |||
| ab27609a64 | |||
| 538a408608 | |||
| 093d280528 | |||
| de166d0ff2 | |||
| 942b94fc3c | |||
| 77bb7750e9 | |||
| 78380fa181 | |||
| c88e4b3fc0 | |||
| 552475dd5c | |||
| c69fbca24f | |||
| 5bb1c383ac | |||
| c7310f7fb2 | |||
| 3a43043c8a | |||
| dbfa859ca3 | |||
| 53c59c47a1 | |||
| af393b0003 | |||
| 1a5608d0f8 | |||
| 23dcbc94ef | |||
| af770c5ced | |||
| 8ce5e69b2f | |||
| 1aa97600df | |||
| 969c596d4c | |||
| 67b087019c | |||
| 6a45d93005 | |||
| 43e507d554 | |||
| a4be6c50cf | |||
| 5043143bc5 | |||
| bdebd1b2e3 | |||
| dadd8d9f94 | |||
| 3da8776a3c | |||
| 3052006ba8 | |||
| 1662c7eda3 | |||
| fef44a71c5 | |||
| b271cc34b3 | |||
| eead838353 | |||
| 02cc867c06 | |||
| 6e98cd311c | |||
| 97a13ef1ab | |||
| 7e1464a950 | |||
| e6a4d6bcf0 | |||
| c8c3b756b0 | |||
| 9a8dda8fc7 | |||
| ff442c48b5 | |||
| 216cd7474b | |||
| 2c62652ea8 | |||
| 4e8fd73a20 | |||
| 19931cd9ed | |||
| 0b460a9a12 | |||
| 4e31eea55f | |||
| 1366712560 | |||
| 51d9bde5a3 | |||
| 94181a990b | |||
| 03672df691 | |||
| e9669e7fb1 | |||
| 9a1ac8020d | |||
| b44bbd11b8 | |||
| 1e91318445 | |||
| f35ff65c36 | |||
| ba0e363d5a | |||
| dde8c26feb | |||
| 64dd187498 | |||
| 67dee2d74e | |||
| bcac195a0c | |||
| 8fca8faa7d | |||
| 1cc17eb611 | |||
| c8194f5fd0 | |||
| f2c9ffc056 | |||
| 10432a1be7 | |||
| e7f83b13ca | |||
| ad220a0a3c | |||
| 91c5a5c08f | |||
| 8362ab405c | |||
| 68b9dae6c0 | |||
| 9b956ac1a9 | |||
| d4dbdfb61d | |||
| 487aed419e | |||
| 8b8a2f2949 | |||
| 42e236f464 | |||
| 1b4016317e | |||
| b1798bafb0 | |||
| 86f76df586 | |||
| db82c15de4 | |||
| 627fd002ae | |||
| 9e7d052c8d | |||
| d9927f5185 | |||
| 5d253e0a34 | |||
| de5727f90a | |||
| 9c2dd70839 | |||
| e0e78112a2 | |||
| 48730e00a8 | |||
| e5f9d148e7 | |||
| f6b280e372 | |||
| 5af2d57086 | |||
| 7a34159737 | |||
| b1fa5a0754 | |||
| 018ff4dd0a | |||
| ed352710ec | |||
| 0a0c1edce3 | |||
| 18eb76f6b8 | |||
| ed5f81b02e | |||
| 23c5ce48d1 | |||
| de766ba628 | |||
| 5aae73c230 | |||
| b578451e6a | |||
| 53c653b099 | |||
| b70abe52b2 | |||
| 98670c3755 | |||
| 9b789c2ae9 | |||
| ffb9f01bea | |||
| ed7244f5f5 | |||
| e54c0e39b5 | |||
| 056ea68e52 | |||
| d9266ed65a | |||
| 6051abb4a3 | |||
| 4b125f0ffe | |||
| 43cf321942 | |||
| 9283e91aa0 | |||
| dc59aba132 | |||
| 8fb5edd927 | |||
| 3bb1e012e6 | |||
| 22758a2763 | |||
| a008b38cf5 | |||
| d0897312ac | |||
| aa99c6b896 | |||
| ae107f31d9 | |||
| 9d9f2dacd2 | |||
| 08bc5d3521 | |||
| 6e7fb75618 | |||
| c26c38ee12 | |||
| dc2c74b249 | |||
| a20439bf81 | |||
| a1fb32908d | |||
| 0b89458eb8 | |||
| 14a3efd756 | |||
| d64c6870bb | |||
| dc87c91f9d | |||
| d4574ffb49 | |||
| 5a8c479ff3 | |||
| c6b26a3159 | |||
| 2a5ad74ac6 | |||
| 2caf15b24c | |||
| f49588756e | |||
| 57e760883e | |||
| b213e88cca | |||
| e8f46c9207 | |||
| cded812b97 | |||
| 2acb02366e | |||
| 9ecc78feeb | |||
| fdc410e743 | |||
| 5b5558300a | |||
| b5918e7158 | |||
| 58f8026632 | |||
| a73fbc61ff | |||
| 0d1c5fdd2f | |||
| 6c77ef5a5e | |||
| e7a2a4b7ff | |||
| 724a36fcdb | |||
| 9ce6521582 | |||
| 160bf4ccb3 | |||
| aa25d09b0c | |||
| 2471a6e115 | |||
| fc02929946 | |||
| 3ae1e9e3c4 | |||
| 117f18240d | |||
| 31296ad70f | |||
| 132eae9d5b | |||
| ead5f7aba9 | |||
| 58e6e7b668 | |||
| 20b8ccd1e9 | |||
| d0dca16fee | |||
| fc21dd0a4a | |||
| 61c0dfab70 | |||
| 67330833af | |||
| ece59034f7 | |||
| 0a42e5777e | |||
| e2b66628f4 | |||
| 46b5e32cd7 | |||
| 7d9dd1e5d3 | |||
| 1985ff7918 | |||
| 60b9c027c8 | |||
| 2793c8e4fe | |||
| 805a8f1f47 | |||
| d4a3e9a7cc | |||
| ad4e59edb2 | |||
| aca4cf4369 | |||
| 9aa047257a | |||
| 65a8cd1772 | |||
| 563a84beaf | |||
| d32a35d8fd | |||
| 2632493c8b | |||
| c61df5dd25 | |||
| 1fbc4870f0 | |||
| f304492716 | |||
| f35c226ce7 | |||
| 0b48a2e0d1 | |||
| fd614a7aef | |||
| 0758c04941 | |||
| fe0396bbb9 | |||
| 974a467cf6 | |||
| 36b62e0fab | |||
| d2043ff9f2 | |||
| ecc9605a32 | |||
| 70dc56d26b | |||
| 82ccbd2cba | |||
| c4998d0e09 | |||
| 5eabfe3912 | |||
| df3890827d | |||
| 6599db1e99 | |||
| b7d7ad536a | |||
| 24d8ff7425 | |||
| 735d9dd949 | |||
| cc5f4a5efa | |||
| 93c26ae1ef | |||
| cc8029a732 | |||
| 6bf26e2a81 | |||
| 7a677cb095 | |||
| 12ad746ee6 | |||
| 163e71d06f | |||
| c8c91fd827 | |||
| d17970ebd0 | |||
| bf483fdf02 | |||
| b2b7ed8927 | |||
| 0a79dfd5cf | |||
| 1d73baf3d8 | |||
| f3ae4a3bae | |||
| 814a210f5d | |||
| 60c3a253ad | |||
| 384b6549a6 | |||
| b2ec39c59d | |||
| 095fc84cf2 | |||
| 542cf16292 | |||
| 27989eb9a5 | |||
| 05997e8215 | |||
| 5d9afce12d | |||
| ee6a0bd9db | |||
| b6f3242c6c | |||
| 390086c6ab | |||
| a40c5aea83 | |||
| f691b4ddd2 | |||
| 3c57a9986c | |||
| 5e0a77df2b | |||
| 66e557b6c0 | |||
| 200b6f55c6 | |||
| b77ce4e846 | |||
| 85eb367ede | |||
| 0b63346a1a | |||
| 85eb3775d6 | |||
| e4c8d703b5 | |||
| 60afb63d44 | |||
| ee5aa51d43 | |||
| a6aed0da46 | |||
| d77380f024 | |||
| efc4796f01 | |||
| d869e4d43f | |||
| 8eefc8b5fe | |||
| 4091af4560 | |||
| 394d1a86f6 | |||
| d88964f629 | |||
| 0e0ebaac5f | |||
| 8b7e53e643 | |||
| 979cdc3626 | |||
| a2a4bfe3e3 | |||
| 85480f6292 | |||
| f537b6ca00 | |||
| b5471978b0 | |||
| efdfb39a33 | |||
| 7cc5603a82 | |||
| 9ed004e90d | |||
| d83911b632 | |||
| bc58ecbfd7 | |||
| 221eae2c59 | |||
| 37303e38ec | |||
| b754bd523a | |||
| 1bb990719e | |||
| 7f80d7304d | |||
| ca9c3e59fa | |||
| 674f94228b | |||
| ef7e96e486 | |||
| dba0caa00b | |||
| 1d9ca172e3 | |||
| f0c4b28c6b | |||
| 6784e0dfee | |||
| 95497b4aab | |||
| 5b04b7d972 | |||
| 4eb3a8e1cc | |||
| 9611185eb4 | |||
| e4380843c4 | |||
| 046f0bba74 | |||
| e0c436b616 | |||
| dbf2ee56c6 | |||
| 1d6760dd84 | |||
| 344727f9ba | |||
| d17ec26c56 | |||
| 4236d81cfc | |||
| bb869aca33 | |||
| 9cad60fa6d | |||
| 42e89e4a92 | |||
| 8daec9a4c5 | |||
| 53ac27c3ff | |||
| e689532e6e | |||
| c2302abaf1 | |||
| 8157285a79 | |||
| c6e1a2ca8a | |||
| 41e112294b | |||
| 49086964b8 | |||
| dd81c30976 | |||
| 1d8daad223 | |||
| f540559c41 | |||
| d16033dd2c | |||
| 7eb417b24f | |||
| 9515ed401f | |||
| f982771131 | |||
| a087d13ccb | |||
| 6e5cbd0196 | |||
| 6e8d0e3177 | |||
| 5cf610af40 | |||
| 897fe85b5c | |||
| 57cbefa589 | |||
| 09291db805 | |||
| e9a6675c40 | |||
| 1333d3c02a | |||
| 222a2c8fa5 | |||
| 5841aa8189 | |||
| 1b9f63f799 | |||
| 1b130546f8 | |||
| 7e4d693054 | |||
| b0b4b7ba33 | |||
| d0eda83697 | |||
| 503e5829bb | |||
| 79482ff672 | |||
| 3a99c2b5f4 | |||
| 45fe02c8b3 | |||
| 2c3c4274be | |||
| 501c017a26 | |||
| d36420a87a | |||
| 5983803c8b | |||
| fabc5e9259 | |||
| 5748d58c74 | |||
| bfa8d342b3 | |||
| 37f3486483 | |||
| 3e19044dee | |||
| 8495036ff9 | |||
| 7f701a5756 | |||
| 634e7a41c5 | |||
| d1d651080a | |||
| 0fa44c5dd3 | |||
| 89a69eed72 | |||
| 1842ca0334 | |||
| e5a8b23684 | |||
| 4fffee6695 | |||
| 485bc7d7d6 | |||
| b5ba8b783a | |||
| d7774cf049 | |||
| 9d94acbedb | |||
| b77e844fc3 | |||
| a6ab2c71c3 | |||
| 5c8ad6702a | |||
| f0601afa75 | |||
| 56e984f657 | |||
| 5d75b6be62 | |||
| 12c3023a22 | |||
| 56b228f187 | |||
| 42eb99554f | |||
| c85b468b8d | |||
| 7463241896 | |||
| c00def5b71 | |||
| f16418ccf7 | |||
| 2d4a60cae6 | |||
| 47926f7d21 | |||
| 940072592f | |||
| 4ff609b6a8 | |||
| 0a877941f4 | |||
| baf3b9be7c | |||
| 4df4bf68a2 | |||
| 471bd92b4c | |||
| 3af1063737 | |||
| 9c8060f619 | |||
| e213873852 | |||
| 56acb340d2 | |||
| e05cdc2f9c | |||
| 3571270191 | |||
| bd5eb47441 | |||
| 7cd37c37cd | |||
| d660f6b9a5 | |||
| 80389ae61e | |||
| 6e13922bdc | |||
| c57f16d16f | |||
| 3c43a7aee8 | |||
| dd8779b257 | |||
| 46bdfb9661 | |||
| e3ea4b7ec2 | |||
| 41c67ce8dd | |||
| 870a6e93da | |||
| 80f87913bb | |||
| 45123dcc0a | |||
| 49d560583f | |||
| 1c663b32b9 | |||
| caecaa7562 | |||
| ed11be23bf | |||
| 7bd5a52019 | |||
| 87763ef0a0 | |||
| 939e668096 | |||
| 45318e7575 | |||
| 8250b9f6b0 | |||
| 1abf03351d | |||
| 46b95d5cfe | |||
| 59ba4777ee | |||
| d44739283c | |||
| 9c953a67a6 | |||
| bd3fa317e7 | |||
| 715e2b48ca | |||
| 90d18143ba | |||
| 4b6809b32d | |||
| 7b96146d3f | |||
| 21c55a2e0f | |||
| 8e965040ce | |||
| 780ee2b2be | |||
| 6f9cd96ec5 | |||
| 47e244ee9f | |||
| df11fe75d3 | |||
| bf0d516e49 | |||
| b18da35da6 | |||
| 8ba1e6c183 | |||
| d4f84f0b54 | |||
| 6ec6ca6971 | |||
| 1163e9e409 | |||
| 15736c57c3 | |||
| fa817a8ab3 | |||
| 8b99635eb3 | |||
| 1919780880 | |||
| 82f5d901c8 | |||
| dc4d4342cd | |||
| e05658685c | |||
| b29539b442 | |||
| b1a46d5adc | |||
| 50c510d16b | |||
| 8a84d1048c | |||
| 2ad852d8df | |||
| ca39f5204d | |||
| 5b0e38060a | |||
| 66938e0b68 | |||
| 64c6cc4cf3 | |||
| 3418984848 | |||
| 3c79990934 | |||
| da3f279495 | |||
| b1bbb9e210 | |||
| 0e3e129a83 | |||
| c87b58511e | |||
| 8d61dcc8ab | |||
| 06b29d7da4 | |||
| 5229a76f68 | |||
| 4f9504305a | |||
| 27153dde85 | |||
| 9fc7174612 | |||
| 8fb8374dfc | |||
| ff35c140dc | |||
| df9b7b2fe9 | |||
| 48f3f49e80 | |||
| 94d7af00b8 | |||
| 251ba7f058 | |||
| 28296955f1 | |||
| 1b2fc3cc9a | |||
| b8da2eeb69 | |||
| 5f62f0c9d7 | |||
| a54843cc65 | |||
| 4326873af6 | |||
| a64f4539e7 | |||
| ec68ab1c8c | |||
| e5041749a2 | |||
| 78b2e0be89 | |||
| b6aded378d | |||
| 11e3f5e8b2 | |||
| f65c3ae62b | |||
| 02c955babb | |||
| ca04ae9540 | |||
| b0c21b00d9 | |||
| 47684fa17c | |||
| 148a7e7002 | |||
| 76e8285904 | |||
| 555c70672e | |||
| 850e218051 | |||
| fb4b5b0a06 | |||
| f256e1a59a | |||
| 9816b868f9 | |||
| 6e828f0fcb | |||
| 4d6484b03e | |||
| afe9269534 | |||
| 688cb8f19d | |||
| f6dd2cd1af | |||
| 69dc14f5d6 | |||
| 202acbd628 | |||
| a283fefd18 | |||
| d9bbaf5d6c | |||
| 1075b975c5 | |||
| c813c1ff4c | |||
| abac2ca2c5 | |||
| 64e9702a26 | |||
| 76cb4cd174 | |||
| 65d7c19979 | |||
| b67697b6f2 | |||
| 131f272e69 | |||
| 03d1265cfd | |||
| c190086707 | |||
| 5d89a8010b | |||
| 7a81fa00e9 | |||
| 606ed0c8ab | |||
| 8b1a4365ed | |||
| 8a2542157f | |||
| d6836444c9 | |||
| 3b30799b7e | |||
| e61da33672 | |||
| 6a71314d70 | |||
| 06e0c7d1a9 | |||
| 7600ebd263 | |||
| 21943ce0e2 | |||
| aa313e112a | |||
| 2c7428e2ee | |||
| 014f2ef900 | |||
| b418ce5643 | |||
| fe1c48178e | |||
| 35f13e882e | |||
| 85924e898e | |||
| 622b72db4b | |||
| a0a7b46cff | |||
| 37aacb3960 | |||
| 79bc9d97c9 | |||
| f150687dbc | |||
| b2a5482d2c | |||
| 5fdfb8d465 | |||
| 8b2c04abc4 | |||
| 83d0949498 | |||
| 244cf49ba4 | |||
| 651422127c | |||
| 11de7599e5 | |||
| 7a6e70d6b3 | |||
| 230865c4f7 | |||
| 4c9a3e918f | |||
| 5beb022ee1 | |||
| 170abf9b7f | |||
| afaa7144a5 | |||
| eaa1adb3b2 | |||
| fa76974e24 | |||
| f372bd8809 | |||
| 0284248c93 | |||
| d9dd1171a3 | |||
| fefea3a2a5 | |||
| 0e920a91dd | |||
| 63e3398f49 | |||
| cdcaae17c6 | |||
| 96e9d50060 | |||
| 5cab6c4ccb | |||
| b3b341173f | |||
| a9e4695b74 | |||
| 4f40f685d9 | |||
| ffb4cda475 | |||
| 5859a3df72 | |||
| 5c6a7cb4b8 | |||
| 4e2afcd3b8 | |||
| 11e6d84d46 | |||
| 53b9e7b52f | |||
| e5e9ca0015 | |||
| 150ab9c6a4 | |||
| f789463982 | |||
| 955801db2e | |||
| 93b2e80eb8 | |||
| 1a41b92f77 | |||
| 58a8f1f1b0 | |||
| daddfc9e1b | |||
| ecf5f6976f | |||
| e2448fb6dd | |||
| 9c9f2dbe3f | |||
| b3d579e2c1 | |||
| eb72d598b1 | |||
| 033a4cf21e | |||
| fda9b58ab7 | |||
| ca865df87f | |||
| f9f75aa119 | |||
| db42d0e0ae | |||
| df3d0f61bd | |||
| c6bc69cbc5 | |||
| 8c9df482ab | |||
| 1137b04154 | |||
| ec96426c00 | |||
| 4d22daefa7 | |||
| bcc92e04c9 | |||
| 9aa222f738 | |||
| 605cfdb8dc | |||
| 041d72b755 | |||
| 569e40544d | |||
| 3d605a23fe | |||
| 4f2816c01c | |||
| a0b461a18e | |||
| 7ce675030b | |||
| 217caecfda | |||
| ef8847eda7 | |||
| d78010c376 | |||
| 3444cb15e3 | |||
| 0151d42156 | |||
| 392f28882f | |||
| cdb3e6434a | |||
| bf5f6ec262 | |||
| 1a755e75c5 | |||
| 46ff897107 | |||
| f5d63bb7df | |||
| c54ec09519 | |||
| 7b3d700d5f | |||
| 744ff55c62 | |||
| c326f14fed | |||
| 07ddb8fcff | |||
| 84bcd8b3bc | |||
| f52970b038 | |||
| 39b96849a9 | |||
| f298e55ded | |||
| ed943b1b5b | |||
| 0c6d787f92 | |||
| a4f9aa2172 | |||
| c432ce6be5 | |||
| c5b32b2211 | |||
| 24efa86f26 | |||
| 38e551cc3d | |||
| ef95f08c48 | |||
| 3ced290eb5 | |||
| fab0f07379 | |||
| 8525f55ad0 | |||
| e6c024f8bf | |||
| c28bc41a96 | |||
| 29a59ed7e2 | |||
| f8b80f3f93 | |||
| 189007e44d | |||
| 3cffadc7a2 | |||
| 18e43831bc | |||
| 3356de55ed | |||
| 375e727f9a | |||
| a2b8ba472f | |||
| 00c7ddbc9b | |||
| 3e0bc9e36b | |||
| d6ba4bd255 | |||
| 84b4b38cbb | |||
| 4694604836 | |||
| 224c5472c8 | |||
| 409310aae9 | |||
| 9ff825f39d | |||
| 7b5d831296 | |||
| 42ee209084 | |||
| e4096fbc33 | |||
| 3aa5c2a699 | |||
| 2ddf278e2d | |||
| f46448d04c | |||
| ab17606e79 | |||
| 7c90b87715 | |||
| d2929e432e | |||
| 88daa349f9 | |||
| f29da49893 | |||
| 194e8ea696 | |||
| 810f997276 | |||
| 6daae7f226 | |||
| f9fe6ac642 | |||
| b4ad565df6 | |||
| 754d5ea364 | |||
| 26add87c3d | |||
| 986062a604 | |||
| 29ceeba95f | |||
| 849d9eb463 | |||
| dce7053c24 | |||
| 042f4c90c6 | |||
| c1583a3e1d | |||
| 17fa2e9e8e | |||
| ff237f2dbc | |||
| 50c99599f2 | |||
| 891ee85fa6 | |||
| a03f5dd9f6 | |||
| 415c4b7ed5 | |||
| d599707154 | |||
| 7f06712a30 | |||
| b08bb56f6c | |||
| 9bcccadebd | |||
| 1287558f24 | |||
| 6b389e01b5 | |||
| 8fcca1b958 | |||
| a1cf792245 | |||
| 978b580dcf | |||
| d197f33646 | |||
| 521d25d4e6 | |||
| ca1648052a | |||
| f34b913bd8 | |||
| 0d3ed37b48 | |||
| bc68f18c48 | |||
| 6e42687e65 | |||
| e4bd879686 | |||
| 78982d88e0 | |||
| fa5c7edab4 | |||
| 6fa34d5532 | |||
| 9e5427dc6e | |||
| a357190eff | |||
| bfcc2abe47 | |||
| f64ae9dc33 | |||
| 5a51bdd824 | |||
| b48c85dcf9 | |||
| f374dd38b6 | |||
| ccb72e6787 | |||
| 55823dbdf6 | |||
| 588207d7c1 | |||
| 2aa0cdde8f | |||
| 44d798d8f0 | |||
| 4150805073 | |||
| 448fa1c4d4 | |||
| e786f596e2 | |||
| fe9e9a644f | |||
| d2961b2d25 | |||
| a73e1750b6 | |||
| c1d71e9a3f | |||
| 2a07eb69a7 | |||
| a3a70431f3 | |||
| 6f2c3a3c3c | |||
| 54803c4ef2 | |||
| efbaa484d7 | |||
| 3411d0a2ce | |||
| 283d036cba | |||
| 307717b045 | |||
| 8e74bc8e42 | |||
| 4b9c4c0705 | |||
| b2bb560007 | |||
| e1526846da | |||
| 7a7f98b1a9 | |||
| 036f37a627 | |||
| 191587346c | |||
| 50055c47ec | |||
| 6f30397bb5 | |||
| d970d0ef39 | |||
| ce8658aa84 | |||
| bc6a768b90 | |||
| 656a2fab41 | |||
| 47b28a27a6 | |||
| c354239b79 | |||
| b4303f6010 | |||
| 4776fa5e4e | |||
| c24137bd11 | |||
| 4011c8f68c | |||
| 2cb8edc42c | |||
| 284b4d4430 | |||
| 42f7261509 | |||
| f33415b751 | |||
| 530b0dab17 | |||
| c4b1c4e6f4 | |||
| 3c2c8942d5 | |||
| 71c132f76d | |||
| 9d717f0b6e | |||
| 8b49734241 | |||
| 898ae7fa80 | |||
| fa4277225d | |||
| 1bff6b7333 | |||
| e9ccba0395 | |||
| f1d9f4290e | |||
| 4230402fbb | |||
| e14d6ae441 | |||
| 55f2b7c4d5 | |||
| 07b3e55903 | |||
| 86892959a0 | |||
| bbc1d02c96 | |||
| b23a4a8fea | |||
| 240e7d7c22 | |||
| 52fa8bdcf3 | |||
| 13f04b7cca | |||
| c4b9e903c8 | |||
| 15f9406e7b | |||
| c5c0dd2da0 | |||
| dd0ebbea35 | |||
| 1a367664f1 | |||
| 336e5fb37f | |||
| 598e142c85 | |||
| cbc3c5297e | |||
| 4b82275ae5 | |||
| 3894de895b | |||
| 583050a876 | |||
| a2946b0fb0 | |||
| 21052b2972 | |||
| 5632613eb5 | |||
| fc35821f81 | |||
| db80376427 | |||
| 99430a7db7 | |||
| a3391c4d55 | |||
| e0f52eebc6 | |||
| 367babda2f | |||
| 2962284c79 | |||
| 75e1981e13 | |||
| 4f9f9405b8 | |||
| 938492cbae | |||
| f4d084bcf1 | |||
| 69984554a5 | |||
| 03d7a51d49 | |||
| 0efe7a544b | |||
| c0799c53b3 | |||
| 4946e43941 | |||
| 37235315e1 | |||
| 39be08c83d | |||
| 3805621564 | |||
| a75cda4957 | |||
| 961e8c4980 | |||
| 57b4e0c464 | |||
| c852a6dfbf | |||
| b4614e9517 | |||
| be5f830878 | |||
| 7944aacafa | |||
| e478586a8e | |||
| 713f38090b | |||
| 8f7ecde908 | |||
| 23ad459136 | |||
| f556f0239c | |||
| f318342c8e | |||
| d3c07794b5 | |||
| fd0bf3adf0 | |||
| c08382099a | |||
| d8346cb7a6 | |||
| 46c52d65b7 | |||
| e098fcf6ad | |||
| ecdb2a88bd | |||
| 2c7ba90cb4 | |||
| 95261f17f6 | |||
| 7d909d4d1b | |||
| 4dde73f897 | |||
| 93b30b2fb5 | |||
| 06c54367fa | |||
| 6acbd374d8 | |||
| 48bca0ca01 | |||
| 300d8ecf51 | |||
| dac54ded96 | |||
| c5da3cdd97 | |||
| f892d7d426 | |||
| f86d8906e7 | |||
| bc681e2ee9 | |||
| b6c71c1e01 | |||
| 7bebf4b7bf | |||
| d64df4de9c | |||
| af43cb04e8 | |||
| 3d66d78304 | |||
| b7ce4e7e62 | |||
| 5e64d79587 | |||
| 49cebd9fec | |||
| d9a4e4cc3b | |||
| ac89a2dda1 | |||
| 01a122dc9d | |||
| 8ec392adb0 | |||
| de822a108f | |||
| 2e40c2a6f6 | |||
| d088a34fe2 | |||
| 16e1681fa4 | |||
| bb24e5f739 | |||
| 1d93eb81ae | |||
| 439d20e41f | |||
| 45619702ff | |||
| b93c136797 | |||
| 983ec0666c | |||
| a9ba051582 | |||
| bad764bcda | |||
| 9c6cf12137 | |||
| 6288b6d02b | |||
| 52c20033d7 | |||
| 5dad15600c | |||
| 8674156d1c | |||
| 5083d92998 | |||
| 59a78408be | |||
| df22ead841 | |||
| 5883493c7d | |||
| 50f209204e | |||
| 564277736a | |||
| 061a22588a | |||
| 5071df9de1 | |||
| 419b546f03 | |||
| e5b1511c66 | |||
| 0e5124ec99 | |||
| 7c7b7d2689 | |||
| accd3a6c7e | |||
| 4ba4f622a5 | |||
| b52b0f68fc | |||
| d42e78bce2 | |||
| 8fb18f37f6 | |||
| f619d5a9b6 | |||
| d1971e988a | |||
| 54908ebd30 | |||
| 3ba2b8d80f | |||
| 713b837276 | |||
| 8feb4c1a99 | |||
| dd13a5d05c | |||
| 8cdf10148d | |||
| 7773afa561 | |||
| 798eb3647c | |||
| 2d17e5aa04 | |||
| c75aa11ae6 | |||
| c7818770f4 | |||
| 6f6303d017 | |||
| 146e8bb793 | |||
| f948c0d9f1 | |||
| c3e3f0fbb4 | |||
| a1a825c830 | |||
| a6f4153775 | |||
| 097aab09a2 | |||
| 600f435d27 | |||
| 722545e5e0 | |||
| 9fa73771ee | |||
| fe279754ac | |||
| 7e063283ba | |||
| 28eeb29b88 | |||
| 85511cb1fd | |||
| a3eeb5de32 | |||
| 1160b58b6e | |||
| 61790ebe15 | |||
| bc3288d390 | |||
| 4e5f92f01b | |||
| 7d8e0602aa | |||
| 03cbbf7784 | |||
| b7a7413419 | |||
| 321e9f3719 | |||
| 76cd23eecf | |||
| d030b4a680 | |||
| a9fd6066d2 | |||
| c373dba0bc | |||
| cf62230548 | |||
| 8d73cf6f02 | |||
| b635002666 | |||
| 4abc144d3d | |||
| a4bccc1ae7 | |||
| 8f070c3d56 | |||
| 31d67c850e | |||
| 2cbe064080 | |||
| cac7851fc5 | |||
| 96da618b6a | |||
| f13f503952 | |||
| cb45431412 | |||
| 85083ad400 | |||
| 35580af875 | |||
| a0dc9e1bdf | |||
| 6379a934ff | |||
| 10a62115c7 | |||
| e38e3bcc3b | |||
| 8dcf99611b | |||
| 213218a094 | |||
| 478da3118c | |||
| 101b8ff813 | |||
| d8fca43017 | |||
| b35e811fe7 | |||
| 7474348394 | |||
| 8939206531 | |||
| 57c99dd811 | |||
| 561eeabfa4 | |||
| 5fb9136251 | |||
| 044bb0334b | |||
| a5cf6fc546 | |||
| 57fe5d0864 | |||
| bfdc4944a3 | |||
| a45ba3a91e | |||
| e513ad2f16 | |||
| 1fdad50dac | |||
| 4764ca5ef7 | |||
| 85f3d92816 | |||
| 742eef028f | |||
| dfbdeaddaf | |||
| 50c2b9d562 | |||
| f8cef73244 | |||
| f8c9ec4d56 | |||
| db74a3ef34 | |||
| 00f99ecbd5 | |||
| 0a3c6fff7c | |||
| 79e435fc2e | |||
| 163c2a70fc | |||
| bedc09f69c | |||
| 251592eeeb | |||
| 09436f6c60 | |||
| e8b4e8b3d7 | |||
| 000cd6d615 | |||
| 1d65299791 | |||
| bcccaccc2b | |||
| fddac1345d | |||
| 4a95349492 | |||
| 0fcb564261 | |||
| 96667696d2 | |||
| ce1e855328 | |||
| b5e4a5563c | |||
| 1053ef5551 | |||
| cb6e9ce164 | |||
| 8ea631a2a0 | |||
| 7fb67c4f67 | |||
| 44ac87aef4 | |||
| 7ddccbb952 | |||
| 4a7bc4df92 | |||
| 3b7d182720 | |||
| 78527acd88 | |||
| e5c3083826 | |||
| 9b9039de92 | |||
| 9b2ef62aee | |||
| 86507af770 | |||
| 93635674c3 | |||
| 1defe0b19b | |||
| 0bca46ac3a | |||
| 1ecb687c51 | |||
| 68d46b2a1e | |||
| 7559bbd46d | |||
| 275b5d14f2 | |||
| 9ae81b42a3 | |||
| d6c74ff131 | |||
| e8d74108a5 | |||
| c8b1a564aa | |||
| 301f95837c | |||
| 835fd7abcd | |||
| bb8f97c9cd | |||
| 6d19294ddc | |||
| f61c276f74 | |||
| 409acf0d9f | |||
| 74c6b21f3b | |||
| beeacd3e3f | |||
| 95259af68f | |||
| 855455006b | |||
| b844ad6e06 | |||
| e0533f19e9 | |||
| 9a6d976252 | |||
| 3d76f10a91 | |||
| e9b8c30a38 | |||
| 601d74160b | |||
| fc4e644e5f | |||
| 03f00c9e6f | |||
| 87e46b4425 | |||
| d5a322a352 | |||
| 7d4f1c0645 | |||
| 927873bfa6 | |||
| 5fe0791684 | |||
| 3e134ac0ad | |||
| 7a6bf4326e | |||
| 41a0601735 | |||
| 60486ecde5 | |||
| 255f4ccffc | |||
| afe82feb57 | |||
| 044afa83d1 | |||
| 4b00be4173 | |||
| 215e9361ea | |||
| aaec630759 | |||
| 3d735dca87 | |||
| dcedfc5ec8 | |||
| 1254ecf445 | |||
| 0d68a6cd1b | |||
| e267a026f3 | |||
| 44d4686b20 | |||
| 95614175e6 | |||
| c817ff184b | |||
| f284578cea | |||
| e69e6b2274 | |||
| 8cdb805c0b | |||
| 885418f3b0 | |||
| b44321f9c3 | |||
| f54a8d7748 | |||
| 311a475b6f | |||
| 655b01a0a4 | |||
| d4ee082735 | |||
| 1f5a7c4b12 | |||
| dab58b9311 | |||
| e56a60b316 | |||
| f189452446 | |||
| f576c555e4 | |||
| d8eea624e2 | |||
| e64c7dfdf6 | |||
| c76e7b1e28 | |||
| 0d5486aa57 | |||
| 3a0e9f9263 | |||
| 1f0a153d0e | |||
| 8bdf1d98a3 | |||
| 8037dc7b76 | |||
| 56f473b680 | |||
| b502dc7399 | |||
| cfe23badb0 | |||
| 593ffc4067 | |||
| a88a1848ff | |||
| 5ae33184d5 | |||
| 78601ee1bd | |||
| 84afb4259c | |||
| 1b817a5b4c | |||
| 1b589609a4 | |||
| 289f4f1916 | |||
| cf37e2ef1a | |||
| 41e2dadea7 | |||
| f3318b2e49 | |||
| 3f3469130b | |||
| fc38afcec4 | |||
| efae7afd62 | |||
| 285bc58364 | |||
| 6657ca7cde | |||
| 87455d79e4 | |||
| 821fdf02b4 | |||
| 54980337e4 | |||
| 92ab7ef659 | |||
| 934dbc2e2b | |||
| 95da6de9e1 | |||
| ccdeeda9cc | |||
| 74b28ef1b0 | |||
| 7543047de3 | |||
| e66addc82d | |||
| 7b6a5ffaff | |||
| 19545282aa | |||
| 6a0583f5ad | |||
| ed7e46b6ca | |||
| 9654e64a0a | |||
| 8b650fc9ef | |||
| 69fb323581 | |||
| 9d093547e8 | |||
| c5f13629af | |||
| c4b6df350a | |||
| 976d112280 | |||
| 8fba5c4179 | |||
| d19f059f34 | |||
| deca6c1b72 | |||
| 3ee9ca749d | |||
| 7058ac0041 | |||
| a7efd3cac5 | |||
| 59a5813f1b | |||
| 08c1a5e1e8 | |||
| ea84cc2e33 | |||
| b5f643681f | |||
| 5497ea34b9 | |||
| e079656473 | |||
| d00297a763 | |||
| a19210daf1 | |||
| b2abc36baa | |||
| fadbe23bfe | |||
| ea8a59d0b0 | |||
| 381219aa41 | |||
| 0f08b0f053 | |||
| 0dafce31c4 | |||
| c93e0355c3 | |||
| 1e0fc76efa | |||
| d94386e00a | |||
| 0a62dd7a7e | |||
| 06a21d2031 | |||
| 9a3febb7c5 | |||
| 27cd765d6f | |||
| a0c0a957b4 | |||
| b89f7c69ad | |||
| fcdc6ad085 | |||
| 834c4d81f3 | |||
| a3e0ac9c0b | |||
| 80af3cc2d4 | |||
| 966bcda6b9 | |||
| 112ef42a19 | |||
| 91f1814a87 | |||
| 4e8e4fe53f | |||
| cdae8d28fe | |||
| 964a6f4ec4 | |||
| 9fcad0500d | |||
| ec560cc99d | |||
| 7ae8828e61 | |||
| 43e367f2ea | |||
| e678819f70 | |||
| bc701d7b4c | |||
| 9f57534843 | |||
| 52b3492b18 | |||
| 2229431803 | |||
| 57208d8e53 | |||
| 535b15ace9 | |||
| 2249d5d413 | |||
| 6fb1a181aa | |||
| 90ffcb4ddb | |||
| 7f48acb3fd | |||
| d61bbe6750 | |||
| ee37ee3d28 | |||
| 8b35776916 | |||
| b6f3f15f0b | |||
| fa8e2c1678 | |||
| 7669fc8f52 | |||
| 98cf1c2a9d | |||
| 5337cad7e4 | |||
| 0891a393d7 | |||
| 5c59651bda | |||
| f6c3d7ccf6 | |||
| 3df1663e4f | |||
| 32cf566a08 | |||
| 769c67a470 | |||
| 49494d4e3c | |||
| 3839d8abc7 | |||
| d8b150a34c | |||
| 4454b33e51 | |||
| ce6b4c0e05 | |||
| ddf01e0450 | |||
| 86e48179a1 | |||
| b2c33b4df7 | |||
| 9348616659 | |||
| a0e9b62de5 | |||
| 7874aaaf60 | |||
| 08ead81dde | |||
| e5af18d5ea | |||
| 609236f5c1 | |||
| 6a3f9bc32a | |||
| 934d6d9ad1 | |||
| 875096384b | |||
| a10c2f2eff | |||
| 646ac1f2b4 | |||
| 8872aed512 | |||
| 55692e4da6 | |||
| 6314d3c727 | |||
| 06b9256972 | |||
| cc219ff648 | |||
| ee33bf71eb | |||
| ee7fd71fdc | |||
| d56f52eef8 | |||
| 9f3141804f | |||
| 60a3e1a8dc | |||
| 9541d7e7bc | |||
| 811c49d7a2 | |||
| 482c1b59c8 | |||
| 691ea287c2 | |||
| b87d14492f | |||
| cc5960b88e | |||
| ee50f78d99 | |||
| 193b08a3ed | |||
| 3a3e23d8d9 | |||
| 30f111edb3 | |||
| d47ee88454 | |||
| 13ff463845 | |||
| bf9ebda3c8 | |||
| 85dd9fde43 | |||
| c7c8b3812f | |||
| 0ac6dc8f8c | |||
| 58a2200b80 | |||
| e10b0e6b60 | |||
| d9c882399d | |||
| 8930bfcff8 | |||
| 9b9afa9d6e | |||
| 362db857d0 | |||
| 541272eb99 | |||
| 5b44b99cfd | |||
| 6be7901df2 | |||
| 4d42bcd517 | |||
| 9b4c2868bd | |||
| d02a2b131a | |||
| 81c7b6afc5 | |||
| cad341e794 | |||
| e559cebcdc | |||
| 8b4407a68c | |||
| 289034f36e | |||
| 17a7ea42eb | |||
| 2044bb0039 | |||
| c4f2464935 | |||
| bcb6f7168f | |||
| 361cff34fc | |||
| 0cd5b64c3b | |||
| 16fbe9920d | |||
| e4280be5e5 | |||
| d42362deb6 | |||
| 883fafde72 | |||
| 568322aeaf | |||
| 31decadd8e | |||
| dec9b3e540 | |||
| d0f94a42ff | |||
| ed0d47fc8a | |||
| aa9a16e073 | |||
| eef84a86bf | |||
| ed72d1100b | |||
| f4e9dae33a | |||
| 50f7b7e0a3 | |||
| 01c2712941 | |||
| 4413683898 | |||
| 3824c1fec0 | |||
| 4b3eeaa6ef | |||
| 70cd5c1599 | |||
| f9643adc43 | |||
| 7b9e0723d6 | |||
| a1d01a1b2f | |||
| dc05f43eee | |||
| 77bdeb32bd | |||
| af18217d78 | |||
| 4ed5ca2666 | |||
| 1e90a1bf36 | |||
| ac033b62cf | |||
| cb3b9d7ada | |||
| ca9e97d2f2 | |||
| 6d451dbe06 | |||
| e0659a4f0e | |||
| a854bc22d1 | |||
| 48e060aa53 | |||
| 47abfc32d4 | |||
| a1ba228bc2 | |||
| 996c94a8e7 | |||
| 220aaddc62 | |||
| 6878d23a57 | |||
| df9d054551 | |||
| 30c1f7ee29 | |||
| e4c4fdabbd | |||
| 30f6421760 | |||
| ab4384e011 | |||
| 201bbef7c0 | |||
| 95d21e5d9f | |||
| c5368c7745 | |||
| 0657a09e2c | |||
| 4caf932808 | |||
| 400fc3f5e9 | |||
| e44e3a67b0 | |||
| 9d395ab74e | |||
| 83c6b1f308 | |||
| 7ab9715b0e | |||
| 632b23486f | |||
| ccf189cb7f | |||
| 1fe9a2e6fd | |||
| 9fc092a911 | |||
| fa54cd5f5c | |||
| 667d0e5537 | |||
| 91332fa0f8 | |||
| 0c95a3382b | |||
| 7274420ecd | |||
| a2a5631da4 | |||
| 567a7563e7 | |||
| 62a9afd382 | |||
| aa68d3b8db | |||
| 784ae896d1 | |||
| f4c52371ab | |||
| 00b6000b76 | |||
| db23d62827 | |||
| 70ea6661ed | |||
| a01fceb328 | |||
| e9e98ea093 | |||
| 528646a958 | |||
| 8536335e63 | |||
| 88072b1e90 | |||
| 34d1daac67 | |||
| 3faae0b2c2 | |||
| 5e5a35191e | |||
| 7c486ee3f9 | |||
| 20d686737a | |||
| 85047e7e36 | |||
| ac64e35a45 | |||
| 004487cca0 | |||
| 74d1eeb4d3 | |||
| 464a4d6ead | |||
| 3d3913419b | |||
| 63f7d3bae2 | |||
| 8b6e272197 | |||
| 5205bdab24 | |||
| 37d4708880 | |||
| d88f0d43ea | |||
| a2153d61ce | |||
| f16ef57979 | |||
| ff2bbb487f | |||
| 416efbe7e8 | |||
| 9c6cc20356 | |||
| 7c0d28b62d | |||
| 48ab6d7a45 | |||
| 96b5d2b3a9 | |||
| f45c29360c | |||
| cdcbe6c2b3 | |||
| 5038552ed9 | |||
| 1b3e39dd12 | |||
| fbcc0bb408 | |||
| d3bb5e9f3d | |||
| 4097912d59 | |||
| f3aaa0d453 | |||
| 0dff64f6ad | |||
| 601a128cd3 | |||
| af74bf01c0 | |||
| a418a343d1 | |||
| ab6e6019a7 | |||
| 13053172cb | |||
| 38ebf6b2c0 | |||
| a7bf4ca8fc | |||
| 7e89be5ed1 | |||
| b7b30c4b57 | |||
| 55953819c1 | |||
| 677f02c2a7 | |||
| 185c6a0c71 | |||
| 339639a9db | |||
| 18ae8a4091 | |||
| cbca7dfce6 | |||
| a9344e6838 | |||
| aa733b1ea4 | |||
| 8305632852 | |||
| 57f23e0808 | |||
| 16b6a78c1e | |||
| dd1146ec64 | |||
| 07c453500b | |||
| 3e4fc12d30 | |||
| 285fd6ae14 | |||
| 8d9238db14 | |||
| c06e765a5b | |||
| c7ea7e9974 | |||
| 37d71dfa90 | |||
| 44ad9a6cd7 | |||
| 7eafccf78a | |||
| b42d24575c | |||
| 3963aaa23e | |||
| 33e5e5db5b | |||
| 039cde7893 | |||
| fa9d76224b | |||
| 35a451c024 |
3
.gitattributes
vendored
3
.gitattributes
vendored
@ -1 +1,2 @@
|
|||||||
*.sh text eol=lf
|
*.sh text eol=lf
|
||||||
|
docker/entrypoint.sh text eol=lf executable
|
||||||
|
|||||||
30
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
30
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,30 +1,36 @@
|
|||||||
name: Bug Report
|
name: "🐞 Bug Report"
|
||||||
description: Create a bug issue for RAGFlow
|
description: Create a bug issue for RAGFlow
|
||||||
title: "[Bug]: "
|
title: "[Bug]: "
|
||||||
labels: [bug]
|
labels: ["🐞 bug"]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for the same bug?
|
label: Self Checks
|
||||||
description: Please check if an issue already exists for the bug you encountered.
|
description: "Please check the following in order to be responded in time :)"
|
||||||
options:
|
options:
|
||||||
- label: I have checked the existing issues.
|
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||||
required: true
|
required: true
|
||||||
|
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||||
|
required: true
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: "Please provide the following information to help us understand the issue."
|
value: "Please provide the following information to help us understand the issue."
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: Branch name
|
label: RAGFlow workspace code commit ID
|
||||||
description: Enter the name of the branch where you encountered the issue.
|
description: Enter the commit ID associated with the issue.
|
||||||
placeholder: e.g., main
|
placeholder: e.g., 26d3480e
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: Commit ID
|
label: RAGFlow image version
|
||||||
description: Enter the commit ID associated with the issue.
|
description: Enter the image version(shown in RAGFlow UI, `System` page) associated with the issue.
|
||||||
placeholder: e.g., c3b2a1
|
placeholder: e.g., 26d3480e(v0.13.0~174)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
|||||||
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
title: '[Feature Request]: '
|
|
||||||
about: Suggest an idea for RAGFlow
|
|
||||||
labels: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
**Summary**
|
|
||||||
|
|
||||||
Description for this feature.
|
|
||||||
16
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
16
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@ -1,14 +1,20 @@
|
|||||||
name: Feature request
|
name: "💞 Feature request"
|
||||||
description: Propose a feature request for RAGFlow.
|
description: Propose a feature request for RAGFlow.
|
||||||
title: "[Feature Request]: "
|
title: "[Feature Request]: "
|
||||||
labels: [feature request]
|
labels: ["💞 feature"]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for the same feature request?
|
label: Self Checks
|
||||||
description: Please check if an issue already exists for the feature you request.
|
description: "Please check the following in order to be responded in time :)"
|
||||||
options:
|
options:
|
||||||
- label: I have checked the existing issues.
|
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||||
|
required: true
|
||||||
|
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
17
.github/ISSUE_TEMPLATE/question.yml
vendored
17
.github/ISSUE_TEMPLATE/question.yml
vendored
@ -1,8 +1,21 @@
|
|||||||
name: Question
|
name: "🙋♀️ Question"
|
||||||
description: Ask questions on RAGFlow
|
description: Ask questions on RAGFlow
|
||||||
title: "[Question]: "
|
title: "[Question]: "
|
||||||
labels: [question]
|
labels: ["🙋♀️ question"]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Self Checks
|
||||||
|
description: "Please check the following in order to be responded in time :)"
|
||||||
|
options:
|
||||||
|
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||||
|
required: true
|
||||||
|
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||||
|
required: true
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
|
|||||||
118
.github/workflows/release.yml
vendored
Normal file
118
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
name: release
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 13 * * *' # This schedule runs every 13:00:00Z(21:00:00+08:00)
|
||||||
|
# The "create tags" trigger is specifically focused on the creation of new tags, while the "push tags" trigger is activated when tags are pushed, including both new tag creations and updates to existing tags.
|
||||||
|
create:
|
||||||
|
tags:
|
||||||
|
- "v*.*.*" # normal release
|
||||||
|
- "nightly" # the only one mutable tag
|
||||||
|
|
||||||
|
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
runs-on: [ "self-hosted", "overseas" ]
|
||||||
|
steps:
|
||||||
|
- name: Ensure workspace ownership
|
||||||
|
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||||
|
|
||||||
|
# https://github.com/actions/checkout/blob/v3/README.md
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Prepare release body
|
||||||
|
run: |
|
||||||
|
if [[ $GITHUB_EVENT_NAME == 'create' ]]; then
|
||||||
|
RELEASE_TAG=${GITHUB_REF#refs/tags/}
|
||||||
|
if [[ $RELEASE_TAG == 'nightly' ]]; then
|
||||||
|
PRERELEASE=true
|
||||||
|
else
|
||||||
|
PRERELEASE=false
|
||||||
|
fi
|
||||||
|
echo "Workflow triggered by create tag: $RELEASE_TAG"
|
||||||
|
else
|
||||||
|
RELEASE_TAG=nightly
|
||||||
|
PRERELEASE=true
|
||||||
|
echo "Workflow triggered by schedule"
|
||||||
|
fi
|
||||||
|
echo "RELEASE_TAG=$RELEASE_TAG" >> $GITHUB_ENV
|
||||||
|
echo "PRERELEASE=$PRERELEASE" >> $GITHUB_ENV
|
||||||
|
RELEASE_DATETIME=$(date --rfc-3339=seconds)
|
||||||
|
echo Release $RELEASE_TAG created from $GITHUB_SHA at $RELEASE_DATETIME > release_body.md
|
||||||
|
|
||||||
|
- name: Move the existing mutable tag
|
||||||
|
# https://github.com/softprops/action-gh-release/issues/171
|
||||||
|
run: |
|
||||||
|
git fetch --tags
|
||||||
|
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||||
|
# Determine if a given tag exists and matches a specific Git commit.
|
||||||
|
# actions/checkout@v4 fetch-tags doesn't work when triggered by schedule
|
||||||
|
if [ "$(git rev-parse -q --verify "refs/tags/$RELEASE_TAG")" = "$GITHUB_SHA" ]; then
|
||||||
|
echo "mutable tag $RELEASE_TAG exists and matches $GITHUB_SHA"
|
||||||
|
else
|
||||||
|
git tag -f $RELEASE_TAG $GITHUB_SHA
|
||||||
|
git push -f origin $RELEASE_TAG:refs/tags/$RELEASE_TAG
|
||||||
|
echo "created/moved mutable tag $RELEASE_TAG to $GITHUB_SHA"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Create or overwrite a release
|
||||||
|
# https://github.com/actions/upload-release-asset has been replaced by https://github.com/softprops/action-gh-release
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||||
|
prerelease: ${{ env.PRERELEASE }}
|
||||||
|
tag_name: ${{ env.RELEASE_TAG }}
|
||||||
|
# The body field does not support environment variable substitution directly.
|
||||||
|
body_path: release_body.md
|
||||||
|
|
||||||
|
# https://github.com/marketplace/actions/docker-login
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: infiniflow
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||||
|
- name: Build and push full image
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}
|
||||||
|
file: Dockerfile
|
||||||
|
platforms: linux/amd64
|
||||||
|
|
||||||
|
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||||
|
- name: Build and push slim image
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim
|
||||||
|
file: Dockerfile
|
||||||
|
build-args: LIGHTEN=1
|
||||||
|
platforms: linux/amd64
|
||||||
|
|
||||||
|
- name: Build ragflow-sdk
|
||||||
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
run: |
|
||||||
|
cd sdk/python && \
|
||||||
|
uv build
|
||||||
|
|
||||||
|
- name: Publish package distributions to PyPI
|
||||||
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
packages-dir: sdk/python/dist/
|
||||||
|
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
verbose: true
|
||||||
113
.github/workflows/tests.yml
vendored
113
.github/workflows/tests.yml
vendored
@ -15,6 +15,8 @@ on:
|
|||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
- '*.md'
|
- '*.md'
|
||||||
- '*.mdx'
|
- '*.mdx'
|
||||||
|
schedule:
|
||||||
|
- cron: '0 16 * * *' # This schedule runs every 16:00:00Z(00:00:00+08:00)
|
||||||
|
|
||||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||||
concurrency:
|
concurrency:
|
||||||
@ -32,45 +34,53 @@ jobs:
|
|||||||
# https://github.com/hmarr/debug-action
|
# https://github.com/hmarr/debug-action
|
||||||
#- uses: hmarr/debug-action@v2
|
#- uses: hmarr/debug-action@v2
|
||||||
|
|
||||||
- name: Show PR labels
|
- name: Show who triggered this workflow
|
||||||
run: |
|
run: |
|
||||||
echo "Workflow triggered by ${{ github.event_name }}"
|
echo "Workflow triggered by ${{ github.event_name }}"
|
||||||
if [[ ${{ github.event_name }} == 'pull_request' ]]; then
|
|
||||||
echo "PR labels: ${{ join(github.event.pull_request.labels.*.name, ', ') }}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Ensure workspace ownership
|
- name: Ensure workspace ownership
|
||||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||||
|
|
||||||
|
# https://github.com/actions/checkout/issues/1781
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Build ragflow:dev-slim
|
# https://github.com/astral-sh/ruff-action
|
||||||
|
- name: Static check with Ruff
|
||||||
|
uses: astral-sh/ruff-action@v3
|
||||||
|
with:
|
||||||
|
version: ">=0.11.x"
|
||||||
|
args: "check"
|
||||||
|
|
||||||
|
- name: Build ragflow:nightly-slim
|
||||||
run: |
|
run: |
|
||||||
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-$HOME}
|
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-$HOME}
|
||||||
cp -r ${RUNNER_WORKSPACE_PREFIX}/huggingface.co ${RUNNER_WORKSPACE_PREFIX}/nltk_data ${RUNNER_WORKSPACE_PREFIX}/libssl*.deb .
|
sudo docker pull ubuntu:22.04
|
||||||
sudo docker pull ubuntu:24.04
|
sudo docker build --progress=plain --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
sudo docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
|
|
||||||
- name: Build ragflow:dev
|
- name: Build ragflow:nightly
|
||||||
run: |
|
run: |
|
||||||
sudo docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
sudo docker build --progress=plain --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
|
|
||||||
- name: Start ragflow:dev-slim
|
- name: Start ragflow:nightly-slim
|
||||||
run: |
|
run: |
|
||||||
|
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||||
sudo docker compose -f docker/docker-compose.yml up -d
|
sudo docker compose -f docker/docker-compose.yml up -d
|
||||||
|
|
||||||
- name: Stop ragflow:dev-slim
|
- name: Stop ragflow:nightly-slim
|
||||||
if: always() # always run this step even if previous steps failed
|
if: always() # always run this step even if previous steps failed
|
||||||
run: |
|
run: |
|
||||||
sudo docker compose -f docker/docker-compose.yml down -v
|
sudo docker compose -f docker/docker-compose.yml down -v
|
||||||
|
|
||||||
- name: Start ragflow:dev
|
- name: Start ragflow:nightly
|
||||||
run: |
|
run: |
|
||||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:dev" >> docker/.env
|
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||||
sudo docker compose -f docker/docker-compose.yml up -d
|
sudo docker compose -f docker/docker-compose.yml up -d
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run sdk tests against Elasticsearch
|
||||||
run: |
|
run: |
|
||||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
@ -78,9 +88,78 @@ jobs:
|
|||||||
echo "Waiting for service to be available..."
|
echo "Waiting for service to be available..."
|
||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test && pytest t_dataset.py t_chat.py t_session.py
|
cd sdk/python && uv sync --python 3.10 --group test --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||||
|
|
||||||
- name: Stop ragflow:dev
|
- name: Run frontend api tests against Elasticsearch
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||||
|
|
||||||
|
- name: Run http api tests against Elasticsearch
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||||
|
export HTTP_API_TEST_LEVEL=p3
|
||||||
|
else
|
||||||
|
export HTTP_API_TEST_LEVEL=p2
|
||||||
|
fi
|
||||||
|
cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL}
|
||||||
|
|
||||||
|
- name: Stop ragflow:nightly
|
||||||
if: always() # always run this step even if previous steps failed
|
if: always() # always run this step even if previous steps failed
|
||||||
run: |
|
run: |
|
||||||
sudo docker compose -f docker/docker-compose.yml down -v
|
sudo docker compose -f docker/docker-compose.yml down -v
|
||||||
|
|
||||||
|
- name: Start ragflow:nightly
|
||||||
|
run: |
|
||||||
|
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml up -d
|
||||||
|
|
||||||
|
- name: Run sdk tests against Infinity
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && uv sync --python 3.10 --group test --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||||
|
|
||||||
|
- name: Run frontend api tests against Infinity
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||||
|
|
||||||
|
- name: Run http api tests against Infinity
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||||
|
export HTTP_API_TEST_LEVEL=p3
|
||||||
|
else
|
||||||
|
export HTTP_API_TEST_LEVEL=p2
|
||||||
|
fi
|
||||||
|
cd sdk/python && uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_http_api && DOC_ENGINE=infinity pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL}
|
||||||
|
|
||||||
|
- name: Stop ragflow:nightly
|
||||||
|
if: always() # always run this step even if previous steps failed
|
||||||
|
run: |
|
||||||
|
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml down -v
|
||||||
|
|||||||
10
.gitignore
vendored
10
.gitignore
vendored
@ -35,4 +35,12 @@ rag/res/deepdoc
|
|||||||
sdk/python/ragflow.egg-info/
|
sdk/python/ragflow.egg-info/
|
||||||
sdk/python/build/
|
sdk/python/build/
|
||||||
sdk/python/dist/
|
sdk/python/dist/
|
||||||
sdk/python/ragflow_sdk.egg-info/
|
sdk/python/ragflow_sdk.egg-info/
|
||||||
|
huggingface.co/
|
||||||
|
nltk_data/
|
||||||
|
|
||||||
|
# Exclude hash-like temporary files like 9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||||
|
*[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]*
|
||||||
|
.lh/
|
||||||
|
.venv
|
||||||
|
docker/data
|
||||||
|
|||||||
19
.pre-commit-config.yaml
Normal file
19
.pre-commit-config.yaml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.6.0
|
||||||
|
hooks:
|
||||||
|
- id: check-yaml
|
||||||
|
- id: check-json
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: trailing-whitespace
|
||||||
|
- id: check-case-conflict
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: mixed-line-ending
|
||||||
|
- id: check-symlinks
|
||||||
|
|
||||||
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
|
rev: v0.11.6
|
||||||
|
hooks:
|
||||||
|
- id: ruff
|
||||||
|
args: [ --fix ]
|
||||||
|
- id: ruff-format
|
||||||
244
Dockerfile
244
Dockerfile
@ -1,41 +1,142 @@
|
|||||||
# base stage
|
# base stage
|
||||||
FROM ubuntu:24.04 AS base
|
FROM ubuntu:22.04 AS base
|
||||||
USER root
|
USER root
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
|
||||||
ARG ARCH=amd64
|
ARG NEED_MIRROR=0
|
||||||
ENV LIGHTEN=0
|
ARG LIGHTEN=0
|
||||||
|
ENV LIGHTEN=${LIGHTEN}
|
||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
# Copy models downloaded via download_deps.py
|
||||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||||
|
cp /huggingface.co/InfiniFlow/huqie/huqie.txt.trie /ragflow/rag/res/ && \
|
||||||
|
tar --exclude='.*' -cf - \
|
||||||
|
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||||
|
/huggingface.co/InfiniFlow/deepdoc \
|
||||||
|
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||||
|
if [ "$LIGHTEN" != "1" ]; then \
|
||||||
|
(tar -cf - \
|
||||||
|
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||||
|
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||||
|
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
||||||
|
fi
|
||||||
|
|
||||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
# https://github.com/chrismattmann/tika-python
|
||||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||||
|
cp -r /deps/nltk_data /root/ && \
|
||||||
|
cp /deps/tika-server-standard-3.0.0.jar /deps/tika-server-standard-3.0.0.jar.md5 /ragflow/ && \
|
||||||
|
cp /deps/cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||||
|
|
||||||
# If you download Python modules too slow, you can use a pip mirror site to speed up apt and poetry
|
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.0.0.jar"
|
||||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list.d/ubuntu.sources
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
|
||||||
|
|
||||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
# Setup apt
|
||||||
apt update && apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus python3-pip python3-poetry \
|
# Python package and implicit dependencies:
|
||||||
&& pip3 install --user --break-system-packages poetry-plugin-pypi-mirror --index-url https://pypi.tuna.tsinghua.edu.cn/simple/ \
|
# opencv-python: libglib2.0-0 libglx-mesa0 libgl1
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
# aspose-slides: pkg-config libicu-dev libgdiplus libssl1.1_1.1.1f-1ubuntu2_amd64.deb
|
||||||
|
# python-pptx: default-jdk tika-server-standard-3.0.0.jar
|
||||||
|
# selenium: libatk-bridge2.0-0 chrome-linux64-121-0-6167-85
|
||||||
|
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
||||||
|
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||||
|
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
|
sed -i 's|http://ports.ubuntu.com|http://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||||
|
sed -i 's|http://archive.ubuntu.com|http://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||||
|
fi; \
|
||||||
|
rm -f /etc/apt/apt.conf.d/docker-clean && \
|
||||||
|
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache && \
|
||||||
|
chmod 1777 /tmp && \
|
||||||
|
apt update && \
|
||||||
|
apt --no-install-recommends install -y ca-certificates && \
|
||||||
|
apt update && \
|
||||||
|
apt install -y libglib2.0-0 libglx-mesa0 libgl1 && \
|
||||||
|
apt install -y pkg-config libicu-dev libgdiplus && \
|
||||||
|
apt install -y default-jdk && \
|
||||||
|
apt install -y libatk-bridge2.0-0 && \
|
||||||
|
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||||
|
apt install -y libjemalloc-dev && \
|
||||||
|
apt install -y python3-pip pipx nginx unzip curl wget git vim less && \
|
||||||
|
apt install -y ghostscript
|
||||||
|
|
||||||
|
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
|
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||||
|
pip3 config set global.trusted-host mirrors.aliyun.com; \
|
||||||
|
mkdir -p /etc/uv && \
|
||||||
|
echo "[[index]]" > /etc/uv/uv.toml && \
|
||||||
|
echo 'url = "https://mirrors.aliyun.com/pypi/simple"' >> /etc/uv/uv.toml && \
|
||||||
|
echo "default = true" >> /etc/uv/uv.toml; \
|
||||||
|
fi; \
|
||||||
|
pipx install uv
|
||||||
|
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||||
|
ENV PATH=/root/.local/bin:$PATH
|
||||||
|
|
||||||
|
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||||
|
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||||
|
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||||
|
apt purge -y nodejs npm cargo && \
|
||||||
|
apt autoremove -y && \
|
||||||
|
apt update && \
|
||||||
|
apt install -y nodejs
|
||||||
|
|
||||||
|
# A modern version of cargo is needed for the latest version of the Rust compiler.
|
||||||
|
RUN apt update && apt install -y curl build-essential \
|
||||||
|
&& if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
|
# Use TUNA mirrors for rustup/rust dist files
|
||||||
|
export RUSTUP_DIST_SERVER="https://mirrors.tuna.tsinghua.edu.cn/rustup"; \
|
||||||
|
export RUSTUP_UPDATE_ROOT="https://mirrors.tuna.tsinghua.edu.cn/rustup/rustup"; \
|
||||||
|
echo "Using TUNA mirrors for Rustup."; \
|
||||||
|
fi; \
|
||||||
|
# Force curl to use HTTP/1.1
|
||||||
|
curl --proto '=https' --tlsv1.2 --http1.1 -sSf https://sh.rustup.rs | bash -s -- -y --profile minimal \
|
||||||
|
&& echo 'export PATH="/root/.cargo/bin:${PATH}"' >> /root/.bashrc
|
||||||
|
|
||||||
|
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||||
|
|
||||||
|
RUN cargo --version && rustc --version
|
||||||
|
|
||||||
|
# Add msssql ODBC driver
|
||||||
|
# macOS ARM64 environment, install msodbcsql18.
|
||||||
|
# general x86_64 environment, install msodbcsql17.
|
||||||
|
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||||
|
curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \
|
||||||
|
curl https://packages.microsoft.com/config/ubuntu/22.04/prod.list > /etc/apt/sources.list.d/mssql-release.list && \
|
||||||
|
apt update && \
|
||||||
|
arch="$(uname -m)"; \
|
||||||
|
if [ "$arch" = "arm64" ] || [ "$arch" = "aarch64" ]; then \
|
||||||
|
# ARM64 (macOS/Apple Silicon or Linux aarch64)
|
||||||
|
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql18; \
|
||||||
|
else \
|
||||||
|
# x86_64 or others
|
||||||
|
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql17; \
|
||||||
|
fi || \
|
||||||
|
{ echo "Failed to install ODBC driver"; exit 1; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Add dependencies of selenium
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||||
|
unzip /chrome-linux64.zip && \
|
||||||
|
mv chrome-linux64 /opt/chrome && \
|
||||||
|
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||||
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||||
|
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||||
|
mv chromedriver /usr/local/bin/ && \
|
||||||
|
rm -f /usr/bin/google-chrome
|
||||||
|
|
||||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||||
# aspose-slides on linux/arm64 is unavailable
|
# aspose-slides on linux/arm64 is unavailable
|
||||||
RUN --mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_amd64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb \
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||||
if [ "${ARCH}" = "amd64" ]; then \
|
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
dpkg -i /deps/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||||
|
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||||
|
dpkg -i /deps/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
|
||||||
|
|
||||||
# Configure Poetry
|
|
||||||
ENV POETRY_NO_INTERACTION=1
|
|
||||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
|
||||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
|
||||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
|
||||||
|
|
||||||
# builder stage
|
# builder stage
|
||||||
FROM base AS builder
|
FROM base AS builder
|
||||||
@ -43,24 +144,38 @@ USER root
|
|||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
RUN --mount=type=cache,id=ragflow_builder_apt,target=/var/cache/apt,sharing=locked \
|
# install dependencies from uv.lock file
|
||||||
apt update && apt install -y nodejs npm cargo && \
|
COPY pyproject.toml uv.lock ./
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
# https://github.com/astral-sh/uv/issues/10462
|
||||||
|
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
||||||
|
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
||||||
|
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
|
sed -i 's|pypi.org|mirrors.aliyun.com/pypi|g' uv.lock; \
|
||||||
|
else \
|
||||||
|
sed -i 's|mirrors.aliyun.com/pypi|pypi.org|g' uv.lock; \
|
||||||
|
fi; \
|
||||||
|
if [ "$LIGHTEN" == "1" ]; then \
|
||||||
|
uv sync --python 3.10 --frozen; \
|
||||||
|
else \
|
||||||
|
uv sync --python 3.10 --frozen --all-extras; \
|
||||||
|
fi
|
||||||
|
|
||||||
COPY web web
|
COPY web web
|
||||||
COPY docs docs
|
COPY docs docs
|
||||||
RUN --mount=type=cache,id=ragflow_builder_npm,target=/root/.npm,sharing=locked \
|
RUN --mount=type=cache,id=ragflow_npm,target=/root/.npm,sharing=locked \
|
||||||
cd web && npm i --force && npm run build
|
cd web && npm install && npm run build
|
||||||
|
|
||||||
# install dependencies from poetry.lock file
|
COPY .git /ragflow/.git
|
||||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
|
||||||
|
|
||||||
RUN --mount=type=cache,id=ragflow_builder_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
RUN version_info=$(git describe --tags --match=v* --first-parent --always); \
|
||||||
if [ "$LIGHTEN" -eq 0 ]; then \
|
if [ "$LIGHTEN" == "1" ]; then \
|
||||||
poetry install --sync --no-root --with=full; \
|
version_info="$version_info slim"; \
|
||||||
else \
|
else \
|
||||||
poetry install --sync --no-root; \
|
version_info="$version_info full"; \
|
||||||
fi
|
fi; \
|
||||||
|
echo "RAGFlow version: $version_info"; \
|
||||||
|
echo $version_info > /ragflow/VERSION
|
||||||
|
|
||||||
# production stage
|
# production stage
|
||||||
FROM base AS production
|
FROM base AS production
|
||||||
@ -68,42 +183,6 @@ USER root
|
|||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
# Install python packages' dependencies
|
|
||||||
# cv2 requires libGL.so.1
|
|
||||||
RUN --mount=type=cache,id=ragflow_production_apt,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt install -y --no-install-recommends nginx libgl1 vim less && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY web web
|
|
||||||
COPY api api
|
|
||||||
COPY conf conf
|
|
||||||
COPY deepdoc deepdoc
|
|
||||||
COPY rag rag
|
|
||||||
COPY agent agent
|
|
||||||
COPY graphrag graphrag
|
|
||||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
|
||||||
|
|
||||||
# Copy models downloaded via download_deps.py
|
|
||||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
|
||||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
|
||||||
tar --exclude='.*' -cf - \
|
|
||||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
|
||||||
/huggingface.co/InfiniFlow/deepdoc \
|
|
||||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
|
||||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
|
||||||
tar -cf - \
|
|
||||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
|
||||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
|
||||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
|
||||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
|
||||||
| tar -xf - --strip-components=2 -C /root/.ragflow
|
|
||||||
|
|
||||||
# Copy nltk data downloaded via download_deps.py
|
|
||||||
COPY nltk_data /root/nltk_data
|
|
||||||
|
|
||||||
# Copy compiled web pages
|
|
||||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
|
||||||
|
|
||||||
# Copy Python environment and packages
|
# Copy Python environment and packages
|
||||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
ENV VIRTUAL_ENV=/ragflow/.venv
|
||||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
@ -111,7 +190,24 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
|||||||
|
|
||||||
ENV PYTHONPATH=/ragflow/
|
ENV PYTHONPATH=/ragflow/
|
||||||
|
|
||||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
COPY web web
|
||||||
RUN chmod +x ./entrypoint.sh
|
COPY api api
|
||||||
|
COPY conf conf
|
||||||
|
COPY deepdoc deepdoc
|
||||||
|
COPY rag rag
|
||||||
|
COPY agent agent
|
||||||
|
COPY graphrag graphrag
|
||||||
|
COPY agentic_reasoning agentic_reasoning
|
||||||
|
COPY pyproject.toml uv.lock ./
|
||||||
|
COPY mcp mcp
|
||||||
|
COPY plugin plugin
|
||||||
|
|
||||||
|
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||||
|
COPY docker/entrypoint.sh ./
|
||||||
|
RUN chmod +x ./entrypoint*.sh
|
||||||
|
|
||||||
|
# Copy compiled web pages
|
||||||
|
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||||
|
|
||||||
|
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
|
|||||||
10
Dockerfile.deps
Normal file
10
Dockerfile.deps
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# This builds an image that contains the resources needed by Dockerfile
|
||||||
|
#
|
||||||
|
FROM scratch
|
||||||
|
|
||||||
|
# Copy resources downloaded via download_deps.py
|
||||||
|
COPY chromedriver-linux64-121-0-6167-85 chrome-linux64-121-0-6167-85 cl100k_base.tiktoken libssl1.1_1.1.1f-1ubuntu2_amd64.deb libssl1.1_1.1.1f-1ubuntu2_arm64.deb tika-server-standard-3.0.0.jar tika-server-standard-3.0.0.jar.md5 libssl*.deb /
|
||||||
|
|
||||||
|
COPY nltk_data /nltk_data
|
||||||
|
|
||||||
|
COPY huggingface.co /huggingface.co
|
||||||
@ -33,6 +33,7 @@ ADD ./rag ./rag
|
|||||||
ADD ./requirements.txt ./requirements.txt
|
ADD ./requirements.txt ./requirements.txt
|
||||||
ADD ./agent ./agent
|
ADD ./agent ./agent
|
||||||
ADD ./graphrag ./graphrag
|
ADD ./graphrag ./graphrag
|
||||||
|
ADD ./plugin ./plugin
|
||||||
|
|
||||||
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||||
@ -53,6 +54,7 @@ RUN conda run -n py11 python -m nltk.downloader wordnet
|
|||||||
ENV PYTHONPATH=/ragflow/
|
ENV PYTHONPATH=/ragflow/
|
||||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
|
||||||
|
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||||
RUN chmod +x ./entrypoint.sh
|
RUN chmod +x ./entrypoint.sh
|
||||||
|
|
||||||
|
|||||||
109
Dockerfile.slim
109
Dockerfile.slim
@ -1,109 +0,0 @@
|
|||||||
# base stage
|
|
||||||
FROM ubuntu:24.04 AS base
|
|
||||||
USER root
|
|
||||||
|
|
||||||
ARG ARCH=amd64
|
|
||||||
ENV LIGHTEN=1
|
|
||||||
|
|
||||||
WORKDIR /ragflow
|
|
||||||
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
|
||||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
|
||||||
|
|
||||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
|
||||||
|
|
||||||
# If you download Python modules too slow, you can use a pip mirror site to speed up apt and poetry
|
|
||||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list.d/ubuntu.sources
|
|
||||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
|
||||||
|
|
||||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus python3-pip python3-poetry \
|
|
||||||
&& pip3 install --user --break-system-packages poetry-plugin-pypi-mirror --index-url https://pypi.tuna.tsinghua.edu.cn/simple/ \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
|
||||||
# aspose-slides on linux/arm64 is unavailable
|
|
||||||
RUN if [ "${ARCH}" = "amd64" ]; then \
|
|
||||||
curl -o libssl1.deb http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb && dpkg -i libssl1.deb && rm -f libssl1.deb; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
|
||||||
|
|
||||||
# Configure Poetry
|
|
||||||
ENV POETRY_NO_INTERACTION=1
|
|
||||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
|
||||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
|
||||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
|
||||||
|
|
||||||
# builder stage
|
|
||||||
FROM base AS builder
|
|
||||||
USER root
|
|
||||||
|
|
||||||
WORKDIR /ragflow
|
|
||||||
|
|
||||||
RUN --mount=type=cache,id=ragflow_builder_apt,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt install -y nodejs npm cargo && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY web web
|
|
||||||
COPY docs docs
|
|
||||||
RUN --mount=type=cache,id=ragflow_builder_npm,target=/root/.npm,sharing=locked \
|
|
||||||
cd web && npm i && npm run build
|
|
||||||
|
|
||||||
# install dependencies from poetry.lock file
|
|
||||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
|
||||||
|
|
||||||
RUN --mount=type=cache,id=ragflow_builder_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
|
||||||
if [ "$LIGHTEN" -eq 0 ]; then \
|
|
||||||
poetry install --sync --no-root --with=full; \
|
|
||||||
else \
|
|
||||||
poetry install --sync --no-root; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# production stage
|
|
||||||
FROM base AS production
|
|
||||||
USER root
|
|
||||||
|
|
||||||
WORKDIR /ragflow
|
|
||||||
|
|
||||||
# Install python packages' dependencies
|
|
||||||
# cv2 requires libGL.so.1
|
|
||||||
RUN --mount=type=cache,id=ragflow_production_apt,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt update && apt install -y --no-install-recommends nginx libgl1 vim less && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
COPY web web
|
|
||||||
COPY api api
|
|
||||||
COPY conf conf
|
|
||||||
COPY deepdoc deepdoc
|
|
||||||
COPY rag rag
|
|
||||||
COPY agent agent
|
|
||||||
COPY graphrag graphrag
|
|
||||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
|
||||||
|
|
||||||
# Copy models downloaded via download_deps.py
|
|
||||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
|
||||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
|
||||||
tar --exclude='.*' -cf - \
|
|
||||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
|
||||||
/huggingface.co/InfiniFlow/deepdoc \
|
|
||||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
|
||||||
|
|
||||||
# Copy nltk data downloaded via download_deps.py
|
|
||||||
COPY nltk_data /root/nltk_data
|
|
||||||
|
|
||||||
# Copy compiled web pages
|
|
||||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
|
||||||
|
|
||||||
# Copy Python environment and packages
|
|
||||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
|
||||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
|
||||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
|
||||||
|
|
||||||
ENV PYTHONPATH=/ragflow/
|
|
||||||
|
|
||||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
|
||||||
RUN chmod +x ./entrypoint.sh
|
|
||||||
|
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
|
||||||
198
README.md
198
README.md
@ -7,8 +7,11 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_tzh.md">繁体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a> |
|
<a href="./README_ja.md">日本語</a> |
|
||||||
<a href="./README_ko.md">한국어</a>
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||||
|
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
@ -19,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.13.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.13.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -31,14 +34,14 @@
|
|||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<details open>
|
<details open>
|
||||||
<summary></b>📕 Table of Contents</b></summary>
|
<summary><b>📕 Table of Contents</b></summary>
|
||||||
|
|
||||||
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
||||||
- 🎮 [Demo](#-demo)
|
- 🎮 [Demo](#-demo)
|
||||||
@ -67,23 +70,26 @@ data.
|
|||||||
## 🎮 Demo
|
## 🎮 Demo
|
||||||
|
|
||||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🔥 Latest Updates
|
## 🔥 Latest Updates
|
||||||
|
|
||||||
- 2024-09-29 Optimizes multi-round conversations.
|
- 2025-03-19 Supports using a multi-modal model to make sense of images within PDF or DOCX files.
|
||||||
- 2024-09-13 Adds search mode for knowledge base Q&A.
|
- 2025-02-28 Combined with Internet search (Tavily), supports reasoning like Deep Research for any LLMs.
|
||||||
- 2024-09-09 Adds a medical consultant agent template.
|
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
||||||
|
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
||||||
|
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||||
- 2024-08-22 Support text to SQL statements through RAG.
|
- 2024-08-22 Support text to SQL statements through RAG.
|
||||||
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
|
||||||
|
|
||||||
## 🎉 Stay Tuned
|
## 🎉 Stay Tuned
|
||||||
|
|
||||||
⭐️ Star our repository to stay up-to-date with exciting new features and improvements! Get instant notifications for new
|
⭐️ Star our repository to stay up-to-date with exciting new features and improvements! Get instant notifications for new
|
||||||
releases! 🌟
|
releases! 🌟
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
@ -131,8 +137,10 @@ releases! 🌟
|
|||||||
- RAM >= 16 GB
|
- RAM >= 16 GB
|
||||||
- Disk >= 50 GB
|
- Disk >= 50 GB
|
||||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux),
|
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Required only if you intend to use the code executor (sandbox) feature of RAGFlow.
|
||||||
see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
|
||||||
|
> [!TIP]
|
||||||
|
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||||
|
|
||||||
### 🚀 Start up the server
|
### 🚀 Start up the server
|
||||||
|
|
||||||
@ -152,7 +160,7 @@ releases! 🌟
|
|||||||
> ```
|
> ```
|
||||||
>
|
>
|
||||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the
|
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the
|
||||||
`vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
> `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> vm.max_map_count=262144
|
> vm.max_map_count=262144
|
||||||
@ -164,29 +172,29 @@ releases! 🌟
|
|||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Build the pre-built Docker images and start up the server:
|
3. Start up the server using the pre-built Docker images:
|
||||||
|
|
||||||
> The command below downloads the dev version Docker image for RAGFlow slim (`dev-slim`). Note that RAGFlow slim
|
> [!CAUTION]
|
||||||
Docker images do not include embedding models or Python libraries and hence are approximately 1GB in size.
|
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||||
|
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||||
|
|
||||||
|
> The command below downloads the `v0.19.0-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.19.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` for the full edition `v0.19.0`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> - To download a RAGFlow slim Docker image of a specific version, update the `RAGFlow_IMAGE` variable in *
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
*docker/.env** to your desired version. For example, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0-slim`. After
|
|-------------------|-----------------|-----------------------|--------------------------|
|
||||||
making this change, rerun the command above to initiate the download.
|
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
> - To download the dev version of RAGFlow Docker image *including* embedding models and Python libraries, update the
|
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||||
`RAGFlow_IMAGE` variable in **docker/.env** to `RAGFLOW_IMAGE=infiniflow/ragflow:dev`. After making this change,
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
rerun the command above to initiate the download.
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
> - To download a specific version of RAGFlow Docker image *including* embedding models and Python libraries, update
|
|
||||||
the `RAGFlow_IMAGE` variable in **docker/.env** to your desired version. For example,
|
|
||||||
`RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0`. After making this change, rerun the command above to initiate the
|
|
||||||
download.
|
|
||||||
|
|
||||||
> **NOTE:** A RAGFlow Docker image that includes embedding models and Python libraries is approximately 9GB in size
|
|
||||||
and may take significantly longer time to load.
|
|
||||||
|
|
||||||
4. Check the server status after having the server up and running:
|
4. Check the server status after having the server up and running:
|
||||||
|
|
||||||
@ -198,24 +206,22 @@ releases! 🌟
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
||||||
____ ___ ______ ______ __
|
____ ___ ______ ______ __
|
||||||
/ __ \ / | / ____// ____// /____ _ __
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network abnormal`
|
|
||||||
error because, at that moment, your RAGFlow may not be fully initialized.
|
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||||
|
> error because, at that moment, your RAGFlow may not be fully initialized.
|
||||||
|
|
||||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default
|
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default
|
||||||
HTTP serving port `80` can be omitted when using the default configurations.
|
> HTTP serving port `80` can be omitted when using the default configurations.
|
||||||
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update
|
6. In [service_conf.yaml.template](./docker/service_conf.yaml.template), select the desired LLM factory in `user_default_llm` and update
|
||||||
the `API_KEY` field with the corresponding API key.
|
the `API_KEY` field with the corresponding API key.
|
||||||
|
|
||||||
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
||||||
@ -228,17 +234,11 @@ When it comes to system configurations, you will need to manage the following fi
|
|||||||
|
|
||||||
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and
|
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and
|
||||||
`MINIO_PASSWORD`.
|
`MINIO_PASSWORD`.
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml): Configures the back-end services.
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Configures the back-end services. The environment variables in this file will be automatically populated when the Docker container starts. Any environment variables set within the Docker container will be available for use, allowing you to customize service behavior based on the deployment environment.
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): The system relies
|
- [docker-compose.yml](./docker/docker-compose.yml): The system relies on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
||||||
on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
|
||||||
|
|
||||||
You must ensure that changes to the [.env](./docker/.env) file are in line with what are in
|
|
||||||
the [service_conf.yaml](./docker/service_conf.yaml) file.
|
|
||||||
|
|
||||||
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service
|
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service
|
||||||
> configurations, and you are REQUIRED to ensure that all environment settings listed in
|
> configurations which can be used as `${ENV_VARS}` in the [service_conf.yaml.template](./docker/service_conf.yaml.template) file.
|
||||||
> the [./docker/README](./docker/README.md) file are aligned with the corresponding configurations in
|
|
||||||
> the [service_conf.yaml](./docker/service_conf.yaml) file.
|
|
||||||
|
|
||||||
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80`
|
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80`
|
||||||
to `<YOUR_SERVING_PORT>:80`.
|
to `<YOUR_SERVING_PORT>:80`.
|
||||||
@ -246,19 +246,41 @@ to `<YOUR_SERVING_PORT>:80`.
|
|||||||
Updates to the above configurations require a reboot of all containers to take effect:
|
Updates to the above configurations require a reboot of all containers to take effect:
|
||||||
|
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
|
### Switch doc engine from Elasticsearch to Infinity
|
||||||
|
|
||||||
|
RAGFlow uses Elasticsearch by default for storing full text and vectors. To switch to [Infinity](https://github.com/infiniflow/infinity/), follow these steps:
|
||||||
|
|
||||||
|
1. Stop all running containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> `-v` will delete the docker container volumes, and the existing data will be cleared.
|
||||||
|
|
||||||
|
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||||
|
|
||||||
|
3. Start the containers:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Switching to Infinity on a Linux/arm64 machine is not yet officially supported.
|
||||||
|
|
||||||
## 🔧 Build a Docker image without embedding models
|
## 🔧 Build a Docker image without embedding models
|
||||||
|
|
||||||
This image is approximately 1 GB in size and relies on external LLM and embedding services.
|
This image is approximately 2 GB in size and relies on external LLM and embedding services.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub nltk
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 Build a Docker image including embedding models
|
## 🔧 Build a Docker image including embedding models
|
||||||
@ -268,36 +290,38 @@ This image is approximately 9 GB in size. As it includes embedding models, it re
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub nltk
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 Launch service from source for development
|
## 🔨 Launch service from source for development
|
||||||
|
|
||||||
1. Install Poetry, or skip this step if it is already installed:
|
1. Install uv, or skip this step if it is already installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
pipx install uv pre-commit
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Clone the source code and install Python dependencies:
|
2. Clone the source code and install Python dependencies:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
uv run download_deps.py
|
||||||
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/service_conf.yaml** to `127.0.0.1`:
|
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/.env** to `127.0.0.1`:
|
||||||
|
|
||||||
|
```
|
||||||
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
```
|
```
|
||||||
127.0.0.1 es01 mysql minio redis
|
|
||||||
```
|
|
||||||
In **docker/service_conf.yaml**, update mysql port to `5455` and es port to `1200`, as specified in **docker/.env**.
|
|
||||||
|
|
||||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||||
|
|
||||||
@ -305,46 +329,68 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Launch backend service:
|
5. If your operating system does not have jemalloc, please install it as follows:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ubuntu
|
||||||
|
sudo apt-get install libjemalloc-dev
|
||||||
|
# centos
|
||||||
|
sudo yum install jemalloc
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Launch backend service:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Install frontend dependencies:
|
7. Install frontend dependencies:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install --force
|
npm install
|
||||||
```
|
```
|
||||||
7. Configure frontend to update `proxy.target` in **.umirc.ts** to `http://127.0.0.1:9380`:
|
|
||||||
8. Launch frontend service:
|
8. Launch frontend service:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm run dev
|
npm run dev
|
||||||
```
|
```
|
||||||
|
|
||||||
_The following output confirms a successful launch of the system:_
|
_The following output confirms a successful launch of the system:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
9. Stop RAGFlow front-end and back-end service after development is complete:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 Documentation
|
## 📚 Documentation
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 Roadmap
|
## 📜 Roadmap
|
||||||
|
|
||||||
See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
See the [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||||
|
|
||||||
## 🏄 Community
|
## 🏄 Community
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
## 🙌 Contributing
|
## 🙌 Contributing
|
||||||
|
|
||||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community.
|
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community.
|
||||||
If you would like to be a part, review our [Contribution Guidelines](./CONTRIBUTING.md) first.
|
If you would like to be a part, review our [Contribution Guidelines](https://ragflow.io/docs/dev/contributing) first.
|
||||||
|
|||||||
365
README_id.md
Normal file
365
README_id.md
Normal file
@ -0,0 +1,365 @@
|
|||||||
|
<div align="center">
|
||||||
|
<a href="https://demo.ragflow.io/">
|
||||||
|
<img src="web/src/assets/logo-with-text.png" width="520" alt="Logo ragflow">
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="./README.md">English</a> |
|
||||||
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_tzh.md">繁体中文</a> |
|
||||||
|
<a href="./README_ja.md">日本語</a> |
|
||||||
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||||
|
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||||
|
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="Ikuti di X (Twitter)">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
|
<img height="21" src="https://img.shields.io/badge/Lisensi-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="Lisensi">
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h4 align="center">
|
||||||
|
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Peta Jalan</a> |
|
||||||
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
|
</h4>
|
||||||
|
|
||||||
|
<details open>
|
||||||
|
<summary><b>📕 Daftar Isi </b> </summary>
|
||||||
|
|
||||||
|
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
||||||
|
- 🎮 [Demo](#-demo)
|
||||||
|
- 📌 [Pembaruan Terbaru](#-pembaruan-terbaru)
|
||||||
|
- 🌟 [Fitur Utama](#-fitur-utama)
|
||||||
|
- 🔎 [Arsitektur Sistem](#-arsitektur-sistem)
|
||||||
|
- 🎬 [Mulai](#-mulai)
|
||||||
|
- 🔧 [Konfigurasi](#-konfigurasi)
|
||||||
|
- 🔧 [Membangun Image Docker tanpa Model Embedding](#-membangun-image-docker-tanpa-model-embedding)
|
||||||
|
- 🔧 [Membangun Image Docker dengan Model Embedding](#-membangun-image-docker-dengan-model-embedding)
|
||||||
|
- 🔨 [Meluncurkan aplikasi dari Sumber untuk Pengembangan](#-meluncurkan-aplikasi-dari-sumber-untuk-pengembangan)
|
||||||
|
- 📚 [Dokumentasi](#-dokumentasi)
|
||||||
|
- 📜 [Peta Jalan](#-peta-jalan)
|
||||||
|
- 🏄 [Komunitas](#-komunitas)
|
||||||
|
- 🙌 [Kontribusi](#-kontribusi)
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## 💡 Apa Itu RAGFlow?
|
||||||
|
|
||||||
|
[RAGFlow](https://ragflow.io/) adalah mesin RAG (Retrieval-Augmented Generation) open-source berbasis pemahaman dokumen yang mendalam. Platform ini menyediakan alur kerja RAG yang efisien untuk bisnis dengan berbagai skala, menggabungkan LLM (Large Language Models) untuk menyediakan kemampuan tanya-jawab yang benar dan didukung oleh referensi dari data terstruktur kompleks.
|
||||||
|
|
||||||
|
## 🎮 Demo
|
||||||
|
|
||||||
|
Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🔥 Pembaruan Terbaru
|
||||||
|
|
||||||
|
- 2025-03-19 Mendukung penggunaan model multi-modal untuk memahami gambar di dalam file PDF atau DOCX.
|
||||||
|
- 2025-02-28 dikombinasikan dengan pencarian Internet (TAVILY), mendukung penelitian mendalam untuk LLM apa pun.
|
||||||
|
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
||||||
|
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||||
|
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||||
|
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||||
|
|
||||||
|
## 🎉 Tetap Terkini
|
||||||
|
|
||||||
|
⭐️ Star repositori kami untuk tetap mendapat informasi tentang fitur baru dan peningkatan menarik! 🌟
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🌟 Fitur Utama
|
||||||
|
|
||||||
|
### 🍭 **"Kualitas Masuk, Kualitas Keluar"**
|
||||||
|
|
||||||
|
- Ekstraksi pengetahuan berbasis pemahaman dokumen mendalam dari data tidak terstruktur dengan format yang rumit.
|
||||||
|
- Menemukan "jarum di tumpukan data" dengan token yang hampir tidak terbatas.
|
||||||
|
|
||||||
|
### 🍱 **Pemotongan Berbasis Template**
|
||||||
|
|
||||||
|
- Cerdas dan dapat dijelaskan.
|
||||||
|
- Banyak pilihan template yang tersedia.
|
||||||
|
|
||||||
|
### 🌱 **Referensi yang Didasarkan pada Data untuk Mengurangi Hallusinasi**
|
||||||
|
|
||||||
|
- Visualisasi pemotongan teks memungkinkan intervensi manusia.
|
||||||
|
- Tampilan cepat referensi kunci dan referensi yang dapat dilacak untuk mendukung jawaban yang didasarkan pada fakta.
|
||||||
|
|
||||||
|
### 🍔 **Kompatibilitas dengan Sumber Data Heterogen**
|
||||||
|
|
||||||
|
- Mendukung Word, slide, excel, txt, gambar, salinan hasil scan, data terstruktur, halaman web, dan banyak lagi.
|
||||||
|
|
||||||
|
### 🛀 **Alur Kerja RAG yang Otomatis dan Mudah**
|
||||||
|
|
||||||
|
- Orkestrasi RAG yang ramping untuk bisnis kecil dan besar.
|
||||||
|
- LLM yang dapat dikonfigurasi serta model embedding.
|
||||||
|
- Peringkat ulang berpasangan dengan beberapa pengambilan ulang.
|
||||||
|
- API intuitif untuk integrasi yang mudah dengan bisnis.
|
||||||
|
|
||||||
|
## 🔎 Arsitektur Sistem
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🎬 Mulai
|
||||||
|
|
||||||
|
### 📝 Prasyarat
|
||||||
|
|
||||||
|
- CPU >= 4 inti
|
||||||
|
- RAM >= 16 GB
|
||||||
|
- Disk >= 50 GB
|
||||||
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
|
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Hanya diperlukan jika Anda ingin menggunakan fitur eksekutor kode (sandbox) dari RAGFlow.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Jika Anda belum menginstal Docker di komputer lokal Anda (Windows, Mac, atau Linux), lihat [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||||
|
|
||||||
|
### 🚀 Menjalankan Server
|
||||||
|
|
||||||
|
1. Pastikan `vm.max_map_count` >= 262144:
|
||||||
|
|
||||||
|
> Untuk memeriksa nilai `vm.max_map_count`:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> $ sysctl vm.max_map_count
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Jika nilainya kurang dari 262144, setel ulang `vm.max_map_count` ke setidaknya 262144:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> # Dalam contoh ini, kita atur menjadi 262144:
|
||||||
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Perubahan ini akan hilang setelah sistem direboot. Untuk membuat perubahan ini permanen, tambahkan atau perbarui nilai
|
||||||
|
> `vm.max_map_count` di **/etc/sysctl.conf**:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
|
||||||
|
2. Clone repositori:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Bangun image Docker pre-built dan jalankan server:
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||||
|
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||||
|
|
||||||
|
> Perintah di bawah ini mengunduh edisi v0.19.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.19.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0 untuk edisi lengkap v0.19.0.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd ragflow/docker
|
||||||
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
|
1. Periksa status server setelah server aktif dan berjalan:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker logs -f ragflow-server
|
||||||
|
```
|
||||||
|
|
||||||
|
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||||
|
|
||||||
|
```bash
|
||||||
|
|
||||||
|
____ ___ ______ ______ __
|
||||||
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
|
* Running on all addresses (0.0.0.0)
|
||||||
|
```
|
||||||
|
|
||||||
|
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||||
|
> karena RAGFlow mungkin belum sepenuhnya siap.
|
||||||
|
|
||||||
|
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||||
|
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||||
|
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||||
|
3. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||||
|
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||||
|
|
||||||
|
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||||
|
|
||||||
|
_Sistem telah siap digunakan!_
|
||||||
|
|
||||||
|
## 🔧 Konfigurasi
|
||||||
|
|
||||||
|
Untuk konfigurasi sistem, Anda perlu mengelola file-file berikut:
|
||||||
|
|
||||||
|
- [.env](./docker/.env): Menyimpan pengaturan dasar sistem, seperti `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, dan
|
||||||
|
`MINIO_PASSWORD`.
|
||||||
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Mengonfigurasi aplikasi backend.
|
||||||
|
- [docker-compose.yml](./docker/docker-compose.yml): Sistem ini bergantung pada [docker-compose.yml](./docker/docker-compose.yml) untuk memulai.
|
||||||
|
|
||||||
|
Untuk memperbarui port HTTP default (80), buka [docker-compose.yml](./docker/docker-compose.yml) dan ubah `80:80`
|
||||||
|
menjadi `<YOUR_SERVING_PORT>:80`.
|
||||||
|
|
||||||
|
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||||
|
|
||||||
|
> ```bash
|
||||||
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
|
> ```
|
||||||
|
|
||||||
|
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||||
|
|
||||||
|
Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
||||||
|
|
||||||
|
Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hanya bergantung pada aplikasi LLM eksternal.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||||
|
|
||||||
|
1. Instal uv, atau lewati langkah ini jika sudah terinstal:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pipx install uv pre-commit
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Clone kode sumber dan instal dependensi Python:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||||
|
uv run download_deps.py
|
||||||
|
pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **conf/service_conf.yaml** ke `127.0.0.1`:
|
||||||
|
|
||||||
|
```
|
||||||
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Jika sistem operasi Anda tidak memiliki jemalloc, instal sebagai berikut:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ubuntu
|
||||||
|
sudo apt-get install libjemalloc-dev
|
||||||
|
# centos
|
||||||
|
sudo yum install jemalloc
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Jalankan aplikasi backend:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .venv/bin/activate
|
||||||
|
export PYTHONPATH=$(pwd)
|
||||||
|
bash docker/launch_backend_service.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Instal dependensi frontend:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd web
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Jalankan aplikasi frontend:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
9. Hentikan layanan front-end dan back-end RAGFlow setelah pengembangan selesai:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## 📚 Dokumentasi
|
||||||
|
|
||||||
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
|
## 📜 Roadmap
|
||||||
|
|
||||||
|
Lihat [Roadmap RAGFlow 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||||
|
|
||||||
|
## 🏄 Komunitas
|
||||||
|
|
||||||
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
## 🙌 Kontribusi
|
||||||
|
|
||||||
|
RAGFlow berkembang melalui kolaborasi open-source. Dalam semangat ini, kami menerima kontribusi dari komunitas.
|
||||||
|
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](https://ragflow.io/docs/dev/contributing).
|
||||||
179
README_ja.md
179
README_ja.md
@ -7,8 +7,11 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_tzh.md">繁体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a> |
|
<a href="./README_ja.md">日本語</a> |
|
||||||
<a href="./README_ko.md">한국어</a>
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||||
|
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
@ -19,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.13.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.13.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -29,12 +32,11 @@
|
|||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -45,22 +47,25 @@
|
|||||||
## 🎮 Demo
|
## 🎮 Demo
|
||||||
|
|
||||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🔥 最新情報
|
## 🔥 最新情報
|
||||||
|
|
||||||
- 2024-09-29 マルチラウンドダイアログを最適化。
|
- 2025-03-19 PDFまたはDOCXファイル内の画像を理解するために、多モーダルモデルを使用することをサポートします。
|
||||||
- 2024-09-13 ナレッジベース Q&A の検索モードを追加しました。
|
- 2025-02-28 インターネット検索 (TAVILY) と組み合わせて、あらゆる LLM の詳細な調査をサポートします。
|
||||||
- 2024-09-09 エージェントに医療相談テンプレートを追加しました。
|
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
||||||
|
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||||
|
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||||
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
|
||||||
|
|
||||||
## 🎉 続きを楽しみに
|
## 🎉 続きを楽しみに
|
||||||
|
|
||||||
⭐️ リポジトリをスター登録して、エキサイティングな新機能やアップデートを最新の状態に保ちましょう!すべての新しいリリースに関する即時通知を受け取れます! 🌟
|
⭐️ リポジトリをスター登録して、エキサイティングな新機能やアップデートを最新の状態に保ちましょう!すべての新しいリリースに関する即時通知を受け取れます! 🌟
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
@ -107,7 +112,10 @@
|
|||||||
- RAM >= 16 GB
|
- RAM >= 16 GB
|
||||||
- Disk >= 50 GB
|
- Disk >= 50 GB
|
||||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
- [gVisor](https://gvisor.dev/docs/user_guide/install/): RAGFlowのコード実行(サンドボックス)機能を利用する場合のみ必要です。
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||||
|
|
||||||
### 🚀 サーバーを起動
|
### 🚀 サーバーを起動
|
||||||
|
|
||||||
@ -140,20 +148,29 @@
|
|||||||
|
|
||||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||||
|
|
||||||
> 以下のコマンドは、RAGFlow slim(`dev-slim`)の開発版Dockerイメージをダウンロードします。RAGFlow slimのDockerイメージには、埋め込みモデルやPythonライブラリが含まれていないため、サイズは約1GBです。
|
> [!CAUTION]
|
||||||
|
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||||
|
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||||
|
|
||||||
|
> 以下のコマンドは、RAGFlow Docker イメージの v0.19.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.19.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.19.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0 と設定します。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> - 特定のバージョンのRAGFlow slim Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
> - RAGFlowの埋め込みモデルとPythonライブラリを含む開発版Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を`RAGFLOW_IMAGE=infiniflow/ragflow:dev`に更新します。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
> - 特定のバージョンのRAGFlow Dockerイメージ(埋め込みモデルとPythonライブラリを含む)をダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||||
> **NOTE:** 埋め込みモデルとPythonライブラリを含むRAGFlow Dockerイメージのサイズは約9GBであり、読み込みにかなりの時間がかかる場合があります。
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f ragflow-server
|
||||||
@ -162,22 +179,20 @@
|
|||||||
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
____ ___ ______ ______ __
|
____ ___ ______ ______ __
|
||||||
/ __ \ / | / ____// ____// /____ _ __
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||||
|
|
||||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
2. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||||
|
|
||||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||||
|
|
||||||
@ -188,116 +203,156 @@
|
|||||||
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
||||||
|
|
||||||
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml): バックエンドのサービスを設定します。
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): バックエンドのサービスを設定します。
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
||||||
|
|
||||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml](./docker/service_conf.yaml) ファイルの内容と一致していることを確認する必要があります。
|
[.env](./docker/.env) ファイルの変更が [service_conf.yaml.template](./docker/service_conf.yaml.template) ファイルの内容と一致していることを確認する必要があります。
|
||||||
|
|
||||||
> [./docker/README](./docker/README.md) ファイルは環境設定とサービスコンフィグの詳細な説明を提供し、[./docker/README](./docker/README.md) ファイルに記載されている全ての環境設定が [service_conf.yaml](./docker/service_conf.yaml) ファイルの対応するコンフィグと一致していることを確認することが義務付けられています。
|
> [./docker/README](./docker/README.md) ファイル ./docker/README には、service_conf.yaml.template ファイルで ${ENV_VARS} として使用できる環境設定とサービス構成の詳細な説明が含まれています。
|
||||||
|
|
||||||
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
||||||
|
|
||||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## 🔧 ソースコードでDockerイメージを作成(埋め込みモデルなし)
|
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
||||||
|
|
||||||
|
RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトルを保存します。[Infinity]に切り替え(https://github.com/infiniflow/infinity/)、次の手順に従います。
|
||||||
|
|
||||||
|
1. 実行中のすべてのコンテナを停止するには:
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
Note: `-v` は docker コンテナのボリュームを削除し、既存のデータをクリアします。
|
||||||
|
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
||||||
|
|
||||||
|
3. 起動コンテナ:
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
> [!WARNING]
|
||||||
|
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||||
|
|
||||||
|
## 🔧 ソースコードで Docker イメージを作成(埋め込みモデルなし)
|
||||||
|
|
||||||
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub nltk
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 ソースコードをコンパイルしたDockerイメージ(埋め込みモデルを含む)
|
## 🔧 ソースコードをコンパイルした Docker イメージ(埋め込みモデルを含む)
|
||||||
|
|
||||||
この Docker のサイズは約 9GB で、埋め込みモデルを含むため、外部の大モデルサービスのみが必要です。
|
この Docker のサイズは約 9GB で、埋め込みモデルを含むため、外部の大モデルサービスのみが必要です。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub nltk
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 ソースコードからサービスを起動する方法
|
## 🔨 ソースコードからサービスを起動する方法
|
||||||
|
|
||||||
1. Poetry をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
1. uv をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
pipx install uv pre-commit
|
||||||
```
|
```
|
||||||
|
|
||||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
uv run download_deps.py
|
||||||
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
`/etc/hosts` に以下の行を追加して、**docker/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||||
|
|
||||||
|
```
|
||||||
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
```
|
```
|
||||||
127.0.0.1 es01 mysql minio redis
|
|
||||||
```
|
|
||||||
**docker/service_conf.yaml** で mysql のポートを `5455` に、es のポートを `1200` に更新します(**docker/.env** に指定された通り).
|
|
||||||
|
|
||||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. バックエンドサービスを起動する:
|
5. オペレーティングシステムにjemallocがない場合は、次のようにインストールします:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ubuntu
|
||||||
|
sudo apt-get install libjemalloc-dev
|
||||||
|
# centos
|
||||||
|
sudo yum install jemalloc
|
||||||
|
```
|
||||||
|
|
||||||
|
6. バックエンドサービスを起動する:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
6. フロントエンドの依存関係をインストールする:
|
7. フロントエンドの依存関係をインストールする:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install --force
|
npm install
|
||||||
```
|
|
||||||
7. フロントエンドを設定し、**.umirc.ts** の `proxy.target` を `http://127.0.0.1:9380` に更新します:
|
|
||||||
8. フロントエンドサービスを起動する:
|
|
||||||
```bash
|
|
||||||
npm run dev
|
|
||||||
```
|
```
|
||||||
|
|
||||||
_以下の画面で、システムが正常に起動したことを示します:_
|
8. フロントエンドサービスを起動する:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
_以下の画面で、システムが正常に起動したことを示します:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
9. 開発が完了したら、RAGFlow のフロントエンド サービスとバックエンド サービスを停止します:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 ドキュメンテーション
|
## 📚 ドキュメンテーション
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 ロードマップ
|
## 📜 ロードマップ
|
||||||
|
|
||||||
[RAGFlow ロードマップ 2024](https://github.com/infiniflow/ragflow/issues/162) を参照
|
[RAGFlow ロードマップ 2025](https://github.com/infiniflow/ragflow/issues/4214) を参照
|
||||||
|
|
||||||
## 🏄 コミュニティ
|
## 🏄 コミュニティ
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
## 🙌 コントリビュート
|
## 🙌 コントリビュート
|
||||||
|
|
||||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](./CONTRIBUTING.md)をご覧ください。
|
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](https://ragflow.io/docs/dev/contributing)をご覧ください。
|
||||||
|
|||||||
197
README_ko.md
197
README_ko.md
@ -7,8 +7,11 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_tzh.md">繁体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a> |
|
<a href="./README_ja.md">日本語</a> |
|
||||||
<a href="./README_ko.md">한국어</a> |
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||||
|
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
@ -19,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.13.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.13.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -29,76 +32,72 @@
|
|||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
|
|
||||||
## 💡 RAGFlow란?
|
## 💡 RAGFlow란?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## 🎮 데모
|
## 🎮 데모
|
||||||
|
|
||||||
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🔥 업데이트
|
## 🔥 업데이트
|
||||||
|
|
||||||
- 2024-09-29 다단계 대화를 최적화합니다.
|
- 2025-03-19 PDF 또는 DOCX 파일 내의 이미지를 이해하기 위해 다중 모드 모델을 사용하는 것을 지원합니다.
|
||||||
|
- 2025-02-28 인터넷 검색(TAVILY)과 결합되어 모든 LLM에 대한 심층 연구를 지원합니다.
|
||||||
- 2024-09-13 지식베이스 Q&A 검색 모드를 추가합니다.
|
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
||||||
|
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||||
- 2024-09-09 Agent에 의료상담 템플릿을 추가하였습니다.
|
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||||
|
|
||||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||||
|
|
||||||
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
|
||||||
|
|
||||||
|
|
||||||
## 🎉 계속 지켜봐 주세요
|
## 🎉 계속 지켜봐 주세요
|
||||||
|
|
||||||
⭐️우리의 저장소를 즐겨찾기에 등록하여 흥미로운 새로운 기능과 업데이트를 최신 상태로 유지하세요! 모든 새로운 릴리스에 대한 즉시 알림을 받으세요! 🌟
|
⭐️우리의 저장소를 즐겨찾기에 등록하여 흥미로운 새로운 기능과 업데이트를 최신 상태로 유지하세요! 모든 새로운 릴리스에 대한 즉시 알림을 받으세요! 🌟
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🌟 주요 기능
|
## 🌟 주요 기능
|
||||||
|
|
||||||
### 🍭 **"Quality in, quality out"**
|
### 🍭 **"Quality in, quality out"**
|
||||||
|
|
||||||
- [심층 문서 이해](./deepdoc/README.md)를 기반으로 복잡한 형식의 비정형 데이터에서 지식을 추출합니다.
|
- [심층 문서 이해](./deepdoc/README.md)를 기반으로 복잡한 형식의 비정형 데이터에서 지식을 추출합니다.
|
||||||
- 문자 그대로 무한한 토큰에서 "데이터 속의 바늘"을 찾아냅니다.
|
- 문자 그대로 무한한 토큰에서 "데이터 속의 바늘"을 찾아냅니다.
|
||||||
|
|
||||||
### 🍱 **템플릿 기반의 chunking**
|
### 🍱 **템플릿 기반의 chunking**
|
||||||
|
|
||||||
- 똑똑하고 설명 가능한 방식.
|
- 똑똑하고 설명 가능한 방식.
|
||||||
- 다양한 템플릿 옵션을 제공합니다.
|
- 다양한 템플릿 옵션을 제공합니다.
|
||||||
|
|
||||||
|
|
||||||
### 🌱 **할루시네이션을 줄인 신뢰할 수 있는 인용**
|
### 🌱 **할루시네이션을 줄인 신뢰할 수 있는 인용**
|
||||||
|
|
||||||
- 텍스트 청킹을 시각화하여 사용자가 개입할 수 있도록 합니다.
|
- 텍스트 청킹을 시각화하여 사용자가 개입할 수 있도록 합니다.
|
||||||
- 중요한 참고 자료와 추적 가능한 인용을 빠르게 확인하여 신뢰할 수 있는 답변을 지원합니다.
|
- 중요한 참고 자료와 추적 가능한 인용을 빠르게 확인하여 신뢰할 수 있는 답변을 지원합니다.
|
||||||
|
|
||||||
|
|
||||||
### 🍔 **다른 종류의 데이터 소스와의 호환성**
|
### 🍔 **다른 종류의 데이터 소스와의 호환성**
|
||||||
|
|
||||||
- 워드, 슬라이드, 엑셀, 텍스트 파일, 이미지, 스캔본, 구조화된 데이터, 웹 페이지 등을 지원합니다.
|
- 워드, 슬라이드, 엑셀, 텍스트 파일, 이미지, 스캔본, 구조화된 데이터, 웹 페이지 등을 지원합니다.
|
||||||
|
|
||||||
### 🛀 **자동화되고 손쉬운 RAG 워크플로우**
|
### 🛀 **자동화되고 손쉬운 RAG 워크플로우**
|
||||||
|
|
||||||
- 개인 및 대규모 비즈니스에 맞춘 효율적인 RAG 오케스트레이션.
|
- 개인 및 대규모 비즈니스에 맞춘 효율적인 RAG 오케스트레이션.
|
||||||
- 구성 가능한 LLM 및 임베딩 모델.
|
- 구성 가능한 LLM 및 임베딩 모델.
|
||||||
- 다중 검색과 결합된 re-ranking.
|
- 다중 검색과 결합된 re-ranking.
|
||||||
- 비즈니스와 원활하게 통합할 수 있는 직관적인 API.
|
- 비즈니스와 원활하게 통합할 수 있는 직관적인 API.
|
||||||
|
|
||||||
|
|
||||||
## 🔎 시스템 아키텍처
|
## 🔎 시스템 아키텍처
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
@ -106,17 +105,22 @@
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎬 시작하기
|
## 🎬 시작하기
|
||||||
|
|
||||||
### 📝 사전 준비 사항
|
### 📝 사전 준비 사항
|
||||||
|
|
||||||
- CPU >= 4 cores
|
- CPU >= 4 cores
|
||||||
- RAM >= 16 GB
|
- RAM >= 16 GB
|
||||||
- Disk >= 50 GB
|
- Disk >= 50 GB
|
||||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치]((https://docs.docker.com/engine/install/))를 참조하세요.
|
- [gVisor](https://gvisor.dev/docs/user_guide/install/): RAGFlow의 코드 실행기(샌드박스) 기능을 사용하려는 경우에만 필요합니다.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||||
|
|
||||||
### 🚀 서버 시작하기
|
### 🚀 서버 시작하기
|
||||||
|
|
||||||
1. `vm.max_map_count`가 262144 이상인지 확인하세요:
|
1. `vm.max_map_count`가 262144 이상인지 확인하세요:
|
||||||
|
|
||||||
> `vm.max_map_count`의 값을 아래 명령어를 통해 확인하세요:
|
> `vm.max_map_count`의 값을 아래 명령어를 통해 확인하세요:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
@ -144,21 +148,29 @@
|
|||||||
|
|
||||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||||
|
|
||||||
> 아래의 명령은 RAGFlow slim(dev-slim)의 개발 버전 Docker 이미지를 다운로드합니다. RAGFlow slim Docker 이미지에는 임베딩 모델이나 Python 라이브러리가 포함되어 있지 않으므로 크기는 약 1GB입니다.
|
> [!CAUTION]
|
||||||
|
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||||
|
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||||
|
|
||||||
|
> 아래 명령어는 RAGFlow Docker 이미지의 v0.19.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.19.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.19.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0로 설정합니다.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> - 특정 버전의 RAGFlow slim Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0-slim`으로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
|
||||||
> - RAGFlow의 임베딩 모델과 Python 라이브러리를 포함한 개발 버전 Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 `RAGFLOW_IMAGE=infiniflow/ragflow:dev`로 업데이트하세요. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
|
||||||
> - 특정 버전의 RAGFlow Docker 이미지를 임베딩 모델과 Python 라이브러리를 포함하여 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0` 로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
|
||||||
|
|
||||||
> **NOTE:** 임베딩 모델과 Python 라이브러리를 포함한 RAGFlow Docker 이미지의 크기는 약 9GB이며, 로드하는 데 상당히 오랜 시간이 걸릴 수 있습니다.
|
|
||||||
|
|
||||||
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
1. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f ragflow-server
|
||||||
@ -167,22 +179,21 @@
|
|||||||
_다음 출력 결과로 시스템이 성공적으로 시작되었음을 확인합니다:_
|
_다음 출력 결과로 시스템이 성공적으로 시작되었음을 확인합니다:_
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
____ ___ ______ ______ __
|
____ ___ ______ ______ __
|
||||||
/ __ \ / | / ____// ____// /____ _ __
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network abnormal` 오류가 발생할 수 있습니다.
|
|
||||||
|
|
||||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||||
|
|
||||||
|
2. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||||
6. [service_conf.yaml](./docker/service_conf.yaml) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||||
|
|
||||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||||
|
|
||||||
_이제 쇼가 시작됩니다!_
|
_이제 쇼가 시작됩니다!_
|
||||||
@ -192,21 +203,38 @@
|
|||||||
시스템 설정과 관련하여 다음 파일들을 관리해야 합니다:
|
시스템 설정과 관련하여 다음 파일들을 관리해야 합니다:
|
||||||
|
|
||||||
- [.env](./docker/.env): `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, `MINIO_PASSWORD`와 같은 시스템의 기본 설정을 포함합니다.
|
- [.env](./docker/.env): `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, `MINIO_PASSWORD`와 같은 시스템의 기본 설정을 포함합니다.
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml): 백엔드 서비스를 구성합니다.
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): 백엔드 서비스를 구성합니다.
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): 시스템은 [docker-compose.yml](./docker/docker-compose.yml)을 사용하여 시작됩니다.
|
- [docker-compose.yml](./docker/docker-compose.yml): 시스템은 [docker-compose.yml](./docker/docker-compose.yml)을 사용하여 시작됩니다.
|
||||||
|
|
||||||
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 내용과 일치하도록 해야 합니다.
|
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일의 내용과 일치하도록 해야 합니다.
|
||||||
|
|
||||||
> [./docker/README](./docker/README.md) 파일에는 환경 설정과 서비스 구성에 대한 자세한 설명이 있으며, [./docker/README](./docker/README.md) 파일에 나열된 모든 환경 설정이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 해당 구성과 일치하도록 해야 합니다.
|
> [./docker/README](./docker/README.md) 파일 ./docker/README은 service_conf.yaml.template 파일에서 ${ENV_VARS}로 사용할 수 있는 환경 설정과 서비스 구성에 대한 자세한 설명을 제공합니다.
|
||||||
|
|
||||||
기본 HTTP 서비스 포트(80)를 업데이트하려면 [docker-compose.yml](./docker/docker-compose.yml) 파일에서 `80:80`을 `<YOUR_SERVING_PORT>:80`으로 변경하세요.
|
기본 HTTP 서비스 포트(80)를 업데이트하려면 [docker-compose.yml](./docker/docker-compose.yml) 파일에서 `80:80`을 `<YOUR_SERVING_PORT>:80`으로 변경하세요.
|
||||||
|
|
||||||
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
|
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
||||||
|
|
||||||
|
RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및 벡터를 저장합니다. [Infinity]로 전환(https://github.com/infiniflow/infinity/), 다음 절차를 따르십시오.
|
||||||
|
|
||||||
|
1. 실행 중인 모든 컨테이너를 중지합니다.
|
||||||
|
```bash
|
||||||
|
$docker compose-f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
Note: `-v` 는 docker 컨테이너의 볼륨을 삭제하고 기존 데이터를 지우며, 이 작업은 컨테이너를 중지하는 것과 동일합니다.
|
||||||
|
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||||
|
3. 컨테이너 부팅:
|
||||||
|
```bash
|
||||||
|
$docker compose-f docker/docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
> [!WARNING]
|
||||||
|
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||||
|
|
||||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함하지 않음)
|
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함하지 않음)
|
||||||
|
|
||||||
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
||||||
@ -214,9 +242,7 @@
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub nltk
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
||||||
@ -226,82 +252,107 @@ docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub nltk
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||||
|
|
||||||
1. Poetry를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
1. uv를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
pipx install uv pre-commit
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
uv run download_deps.py
|
||||||
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Docker Compose를 사용하여 의존 서비스(MinIO, Elasticsearch, Redis 및 MySQL)를 시작합니다:
|
3. Docker Compose를 사용하여 의존 서비스(MinIO, Elasticsearch, Redis 및 MySQL)를 시작합니다:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
`/etc/hosts` 에 다음 줄을 추가하여 **docker/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||||
|
|
||||||
|
```
|
||||||
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
```
|
```
|
||||||
127.0.0.1 es01 mysql minio redis
|
|
||||||
```
|
|
||||||
**docker/service_conf.yaml** 에서 mysql 포트를 `5455` 로, es 포트를 `1200` 으로 업데이트합니다( **docker/.env** 에 지정된 대로).
|
|
||||||
|
|
||||||
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. 백엔드 서비스를 시작합니다:
|
5. 만약 운영 체제에 jemalloc이 없으면 다음 방식으로 설치하세요:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ubuntu
|
||||||
|
sudo apt-get install libjemalloc-dev
|
||||||
|
# centos
|
||||||
|
sudo yum install jemalloc
|
||||||
|
```
|
||||||
|
|
||||||
|
6. 백엔드 서비스를 시작합니다:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
6. 프론트엔드 의존성을 설치합니다:
|
7. 프론트엔드 의존성을 설치합니다:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install --force
|
npm install
|
||||||
```
|
|
||||||
7. **.umirc.ts** 에서 `proxy.target` 을 `http://127.0.0.1:9380` 으로 업데이트합니다:
|
|
||||||
8. 프론트엔드 서비스를 시작합니다:
|
|
||||||
```bash
|
|
||||||
npm run dev
|
|
||||||
```
|
```
|
||||||
|
|
||||||
_다음 인터페이스는 시스템이 성공적으로 시작되었음을 나타냅니다:_
|
8. 프론트엔드 서비스를 시작합니다:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
_다음 인터페이스는 시스템이 성공적으로 시작되었음을 나타냅니다:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
|
9. 개발이 완료된 후 RAGFlow 프론트엔드 및 백엔드 서비스를 중지합니다.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 문서
|
## 📚 문서
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 로드맵
|
## 📜 로드맵
|
||||||
|
|
||||||
[RAGFlow 로드맵 2024](https://github.com/infiniflow/ragflow/issues/162)을 확인하세요.
|
[RAGFlow 로드맵 2025](https://github.com/infiniflow/ragflow/issues/4214)을 확인하세요.
|
||||||
|
|
||||||
## 🏄 커뮤니티
|
## 🏄 커뮤니티
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
## 🙌 컨트리뷰션
|
## 🙌 컨트리뷰션
|
||||||
|
|
||||||
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](./CONTRIBUTING.md)을 검토해 주세요.
|
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](https://ragflow.io/docs/dev/contributing)을 검토해 주세요.
|
||||||
|
|||||||
382
README_pt_br.md
Normal file
382
README_pt_br.md
Normal file
@ -0,0 +1,382 @@
|
|||||||
|
<div align="center">
|
||||||
|
<a href="https://demo.ragflow.io/">
|
||||||
|
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="./README.md">English</a> |
|
||||||
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_tzh.md">繁体中文</a> |
|
||||||
|
<a href="./README_ja.md">日本語</a> |
|
||||||
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||||
|
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||||
|
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="seguir no X(Twitter)">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="licença">
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h4 align="center">
|
||||||
|
<a href="https://ragflow.io/docs/dev/">Documentação</a> |
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
|
</h4>
|
||||||
|
|
||||||
|
<details open>
|
||||||
|
<summary><b>📕 Índice</b></summary>
|
||||||
|
|
||||||
|
- 💡 [O que é o RAGFlow?](#-o-que-é-o-ragflow)
|
||||||
|
- 🎮 [Demo](#-demo)
|
||||||
|
- 📌 [Últimas Atualizações](#-últimas-atualizações)
|
||||||
|
- 🌟 [Principais Funcionalidades](#-principais-funcionalidades)
|
||||||
|
- 🔎 [Arquitetura do Sistema](#-arquitetura-do-sistema)
|
||||||
|
- 🎬 [Primeiros Passos](#-primeiros-passos)
|
||||||
|
- 🔧 [Configurações](#-configurações)
|
||||||
|
- 🔧 [Construir uma imagem docker sem incorporar modelos](#-construir-uma-imagem-docker-sem-incorporar-modelos)
|
||||||
|
- 🔧 [Construir uma imagem docker incluindo modelos](#-construir-uma-imagem-docker-incluindo-modelos)
|
||||||
|
- 🔨 [Lançar serviço a partir do código-fonte para desenvolvimento](#-lançar-serviço-a-partir-do-código-fonte-para-desenvolvimento)
|
||||||
|
- 📚 [Documentação](#-documentação)
|
||||||
|
- 📜 [Roadmap](#-roadmap)
|
||||||
|
- 🏄 [Comunidade](#-comunidade)
|
||||||
|
- 🙌 [Contribuindo](#-contribuindo)
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## 💡 O que é o RAGFlow?
|
||||||
|
|
||||||
|
[RAGFlow](https://ragflow.io/) é um mecanismo RAG (Geração Aumentada por Recuperação) de código aberto baseado em entendimento profundo de documentos. Ele oferece um fluxo de trabalho RAG simplificado para empresas de qualquer porte, combinando LLMs (Modelos de Linguagem de Grande Escala) para fornecer capacidades de perguntas e respostas verídicas, respaldadas por citações bem fundamentadas de diversos dados complexos formatados.
|
||||||
|
|
||||||
|
## 🎮 Demo
|
||||||
|
|
||||||
|
Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🔥 Últimas Atualizações
|
||||||
|
|
||||||
|
- 19-03-2025 Suporta o uso de um modelo multi-modal para entender imagens dentro de arquivos PDF ou DOCX.
|
||||||
|
- 28-02-2025 combinado com a pesquisa na Internet (T AVI LY), suporta pesquisas profundas para qualquer LLM.
|
||||||
|
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
||||||
|
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||||
|
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
||||||
|
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
||||||
|
|
||||||
|
## 🎉 Fique Ligado
|
||||||
|
|
||||||
|
⭐️ Dê uma estrela no nosso repositório para se manter atualizado com novas funcionalidades e melhorias empolgantes! Receba notificações instantâneas sobre novos lançamentos! 🌟
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🌟 Principais Funcionalidades
|
||||||
|
|
||||||
|
### 🍭 **"Qualidade entra, qualidade sai"**
|
||||||
|
|
||||||
|
- Extração de conhecimento baseada em [entendimento profundo de documentos](./deepdoc/README.md) a partir de dados não estruturados com formatos complicados.
|
||||||
|
- Encontra a "agulha no palheiro de dados" de literalmente tokens ilimitados.
|
||||||
|
|
||||||
|
### 🍱 **Fragmentação baseada em templates**
|
||||||
|
|
||||||
|
- Inteligente e explicável.
|
||||||
|
- Muitas opções de templates para escolher.
|
||||||
|
|
||||||
|
### 🌱 **Citações fundamentadas com menos alucinações**
|
||||||
|
|
||||||
|
- Visualização da fragmentação de texto para permitir intervenção humana.
|
||||||
|
- Visualização rápida das referências chave e citações rastreáveis para apoiar respostas fundamentadas.
|
||||||
|
|
||||||
|
### 🍔 **Compatibilidade com fontes de dados heterogêneas**
|
||||||
|
|
||||||
|
- Suporta Word, apresentações, excel, txt, imagens, cópias digitalizadas, dados estruturados, páginas da web e mais.
|
||||||
|
|
||||||
|
### 🛀 **Fluxo de trabalho RAG automatizado e sem esforço**
|
||||||
|
|
||||||
|
- Orquestração RAG simplificada voltada tanto para negócios pessoais quanto grandes empresas.
|
||||||
|
- Modelos LLM e de incorporação configuráveis.
|
||||||
|
- Múltiplas recuperações emparelhadas com reclassificação fundida.
|
||||||
|
- APIs intuitivas para integração sem problemas com os negócios.
|
||||||
|
|
||||||
|
## 🔎 Arquitetura do Sistema
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🎬 Primeiros Passos
|
||||||
|
|
||||||
|
### 📝 Pré-requisitos
|
||||||
|
|
||||||
|
- CPU >= 4 núcleos
|
||||||
|
- RAM >= 16 GB
|
||||||
|
- Disco >= 50 GB
|
||||||
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
|
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Necessário apenas se você pretende usar o recurso de executor de código (sandbox) do RAGFlow.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||||
|
|
||||||
|
### 🚀 Iniciar o servidor
|
||||||
|
|
||||||
|
1. Certifique-se de que `vm.max_map_count` >= 262144:
|
||||||
|
|
||||||
|
> Para verificar o valor de `vm.max_map_count`:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> $ sysctl vm.max_map_count
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Se necessário, redefina `vm.max_map_count` para um valor de pelo menos 262144:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> # Neste caso, defina para 262144:
|
||||||
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> Essa mudança será resetada após a reinicialização do sistema. Para garantir que a alteração permaneça permanente, adicione ou atualize o valor de `vm.max_map_count` em **/etc/sysctl.conf**:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
|
||||||
|
2. Clone o repositório:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||||
|
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||||
|
|
||||||
|
> O comando abaixo baixa a edição `v0.19.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.19.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` para a edição completa `v0.19.0`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd ragflow/docker
|
||||||
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
||||||
|
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
||||||
|
| v0.19.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||||
|
| v0.19.0-slim | ~2 | ❌ | Lançamento estável |
|
||||||
|
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
||||||
|
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
||||||
|
|
||||||
|
4. Verifique o status do servidor após tê-lo iniciado:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker logs -f ragflow-server
|
||||||
|
```
|
||||||
|
|
||||||
|
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
||||||
|
|
||||||
|
```bash
|
||||||
|
____ ___ ______ ______ __
|
||||||
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
|
* Rodando em todos os endereços (0.0.0.0)
|
||||||
|
```
|
||||||
|
|
||||||
|
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||||
|
|
||||||
|
5. No seu navegador, insira o endereço IP do seu servidor e faça login no RAGFlow.
|
||||||
|
|
||||||
|
> Com as configurações padrão, você só precisa digitar `http://IP_DO_SEU_MÁQUINA` (**sem** o número da porta), pois a porta HTTP padrão `80` pode ser omitida ao usar as configurações padrão.
|
||||||
|
|
||||||
|
6. Em [service_conf.yaml.template](./docker/service_conf.yaml.template), selecione a fábrica LLM desejada em `user_default_llm` e atualize o campo `API_KEY` com a chave de API correspondente.
|
||||||
|
|
||||||
|
> Consulte [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) para mais informações.
|
||||||
|
|
||||||
|
_O show está no ar!_
|
||||||
|
|
||||||
|
## 🔧 Configurações
|
||||||
|
|
||||||
|
Quando se trata de configurações do sistema, você precisará gerenciar os seguintes arquivos:
|
||||||
|
|
||||||
|
- [.env](./docker/.env): Contém as configurações fundamentais para o sistema, como `SVR_HTTP_PORT`, `MYSQL_PASSWORD` e `MINIO_PASSWORD`.
|
||||||
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Configura os serviços de back-end. As variáveis de ambiente neste arquivo serão automaticamente preenchidas quando o contêiner Docker for iniciado. Quaisquer variáveis de ambiente definidas dentro do contêiner Docker estarão disponíveis para uso, permitindo personalizar o comportamento do serviço com base no ambiente de implantação.
|
||||||
|
- [docker-compose.yml](./docker/docker-compose.yml): O sistema depende do [docker-compose.yml](./docker/docker-compose.yml) para iniciar.
|
||||||
|
|
||||||
|
> O arquivo [./docker/README](./docker/README.md) fornece uma descrição detalhada das configurações do ambiente e dos serviços, que podem ser usadas como `${ENV_VARS}` no arquivo [service_conf.yaml.template](./docker/service_conf.yaml.template).
|
||||||
|
|
||||||
|
Para atualizar a porta HTTP de serviço padrão (80), vá até [docker-compose.yml](./docker/docker-compose.yml) e altere `80:80` para `<SUA_PORTA_DE_SERVIÇO>:80`.
|
||||||
|
|
||||||
|
Atualizações nas configurações acima exigem um reinício de todos os contêineres para que tenham efeito:
|
||||||
|
|
||||||
|
> ```bash
|
||||||
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
|
> ```
|
||||||
|
|
||||||
|
### Mudar o mecanismo de documentos de Elasticsearch para Infinity
|
||||||
|
|
||||||
|
O RAGFlow usa o Elasticsearch por padrão para armazenar texto completo e vetores. Para mudar para o [Infinity](https://github.com/infiniflow/infinity/), siga estas etapas:
|
||||||
|
|
||||||
|
1. Pare todos os contêineres em execução:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
Note: `-v` irá deletar os volumes do contêiner, e os dados existentes serão apagados.
|
||||||
|
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
||||||
|
|
||||||
|
3. Inicie os contêineres:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!ATENÇÃO]
|
||||||
|
> A mudança para o Infinity em uma máquina Linux/arm64 ainda não é oficialmente suportada.
|
||||||
|
|
||||||
|
## 🔧 Criar uma imagem Docker sem modelos de incorporação
|
||||||
|
|
||||||
|
Esta imagem tem cerca de 2 GB de tamanho e depende de serviços externos de LLM e incorporação.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔧 Criar uma imagem Docker incluindo modelos de incorporação
|
||||||
|
|
||||||
|
Esta imagem tem cerca de 9 GB de tamanho. Como inclui modelos de incorporação, depende apenas de serviços externos de LLM.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
||||||
|
|
||||||
|
1. Instale o `uv`, ou pule esta etapa se ele já estiver instalado:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pipx install uv pre-commit
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Clone o código-fonte e instale as dependências Python:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
uv sync --python 3.10 --all-extras # instala os módulos Python dependentes do RAGFlow
|
||||||
|
uv run download_deps.py
|
||||||
|
pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Inicie os serviços dependentes (MinIO, Elasticsearch, Redis e MySQL) usando Docker Compose:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
Adicione a seguinte linha ao arquivo `/etc/hosts` para resolver todos os hosts especificados em **docker/.env** para `127.0.0.1`:
|
||||||
|
|
||||||
|
```
|
||||||
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Se não conseguir acessar o HuggingFace, defina a variável de ambiente `HF_ENDPOINT` para usar um site espelho:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Se o seu sistema operacional não tiver jemalloc, instale-o da seguinte maneira:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ubuntu
|
||||||
|
sudo apt-get install libjemalloc-dev
|
||||||
|
# centos
|
||||||
|
sudo yum instalar jemalloc
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Lance o serviço de back-end:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .venv/bin/activate
|
||||||
|
export PYTHONPATH=$(pwd)
|
||||||
|
bash docker/launch_backend_service.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Instale as dependências do front-end:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd web
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Lance o serviço de front-end:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
9. Pare os serviços de front-end e back-end do RAGFlow após a conclusão do desenvolvimento:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## 📚 Documentação
|
||||||
|
|
||||||
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
|
## 📜 Roadmap
|
||||||
|
|
||||||
|
Veja o [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||||
|
|
||||||
|
## 🏄 Comunidade
|
||||||
|
|
||||||
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
## 🙌 Contribuindo
|
||||||
|
|
||||||
|
O RAGFlow prospera por meio da colaboração de código aberto. Com esse espírito, abraçamos contribuições diversas da comunidade.
|
||||||
|
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](https://ragflow.io/docs/dev/contributing).
|
||||||
383
README_tzh.md
Normal file
383
README_tzh.md
Normal file
@ -0,0 +1,383 @@
|
|||||||
|
<div align="center">
|
||||||
|
<a href="https://demo.ragflow.io/">
|
||||||
|
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="./README.md">English</a> |
|
||||||
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_ja.md">日本語</a> |
|
||||||
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||||
|
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||||
|
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="follow on X(Twitter)">
|
||||||
|
</a>
|
||||||
|
<a href="https://demo.ragflow.io" target="_blank">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||||
|
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h4 align="center">
|
||||||
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
|
</h4>
|
||||||
|
|
||||||
|
## 💡 RAGFlow 是什麼?
|
||||||
|
|
||||||
|
[RAGFlow](https://ragflow.io/) 是一款基於深度文件理解所建構的開源 RAG(Retrieval-Augmented Generation)引擎。 RAGFlow 可以為各種規模的企業及個人提供一套精簡的 RAG 工作流程,結合大語言模型(LLM)針對用戶各類不同的複雜格式數據提供可靠的問答以及有理有據的引用。
|
||||||
|
|
||||||
|
## 🎮 Demo 試用
|
||||||
|
|
||||||
|
請登入網址 [https://demo.ragflow.io](https://demo.ragflow.io) 試用 demo。
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🔥 近期更新
|
||||||
|
|
||||||
|
- 2025-03-19 PDF和DOCX中的圖支持用多模態大模型去解析得到描述.
|
||||||
|
- 2025-02-28 結合網路搜尋(Tavily),對於任意大模型實現類似 Deep Research 的推理功能.
|
||||||
|
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
||||||
|
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||||
|
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
||||||
|
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
||||||
|
|
||||||
|
## 🎉 關注項目
|
||||||
|
|
||||||
|
⭐️ 點擊右上角的 Star 追蹤 RAGFlow,可以取得最新發布的即時通知 !🌟
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🌟 主要功能
|
||||||
|
|
||||||
|
### 🍭 **"Quality in, quality out"**
|
||||||
|
|
||||||
|
- 基於[深度文件理解](./deepdoc/README.md),能夠從各類複雜格式的非結構化資料中提取真知灼見。
|
||||||
|
- 真正在無限上下文(token)的場景下快速完成大海撈針測試。
|
||||||
|
|
||||||
|
### 🍱 **基於模板的文字切片**
|
||||||
|
|
||||||
|
- 不只是智能,更重要的是可控可解釋。
|
||||||
|
- 多種文字範本可供選擇
|
||||||
|
|
||||||
|
### 🌱 **有理有據、最大程度降低幻覺(hallucination)**
|
||||||
|
|
||||||
|
- 文字切片過程視覺化,支援手動調整。
|
||||||
|
- 有理有據:答案提供關鍵引用的快照並支持追根溯源。
|
||||||
|
|
||||||
|
### 🍔 **相容各類異質資料來源**
|
||||||
|
|
||||||
|
- 支援豐富的文件類型,包括 Word 文件、PPT、excel 表格、txt 檔案、圖片、PDF、影印件、影印件、結構化資料、網頁等。
|
||||||
|
|
||||||
|
### 🛀 **全程無憂、自動化的 RAG 工作流程**
|
||||||
|
|
||||||
|
- 全面優化的 RAG 工作流程可以支援從個人應用乃至超大型企業的各類生態系統。
|
||||||
|
- 大語言模型 LLM 以及向量模型皆支援配置。
|
||||||
|
- 基於多路召回、融合重排序。
|
||||||
|
- 提供易用的 API,可輕鬆整合到各類企業系統。
|
||||||
|
|
||||||
|
## 🔎 系統架構
|
||||||
|
|
||||||
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## 🎬 快速開始
|
||||||
|
|
||||||
|
### 📝 前提條件
|
||||||
|
|
||||||
|
- CPU >= 4 核
|
||||||
|
- RAM >= 16 GB
|
||||||
|
- Disk >= 50 GB
|
||||||
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
|
- [gVisor](https://gvisor.dev/docs/user_guide/install/): 僅在您打算使用 RAGFlow 的代碼執行器(沙箱)功能時才需要安裝。
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||||
|
|
||||||
|
### 🚀 啟動伺服器
|
||||||
|
|
||||||
|
1. 確保 `vm.max_map_count` 不小於 262144:
|
||||||
|
|
||||||
|
> 如需確認 `vm.max_map_count` 的大小:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> $ sysctl vm.max_map_count
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> 如果 `vm.max_map_count` 的值小於 262144,可以進行重設:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> # 這裡我們設為 262144:
|
||||||
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
>
|
||||||
|
> 你的改動會在下次系統重新啟動時被重置。如果希望做永久改動,還需要在 **/etc/sysctl.conf** 檔案裡把 `vm.max_map_count` 的值再相應更新一遍:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> vm.max_map_count=262144
|
||||||
|
> ```
|
||||||
|
|
||||||
|
2. 克隆倉庫:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
||||||
|
|
||||||
|
> [!CAUTION]
|
||||||
|
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||||
|
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||||
|
|
||||||
|
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.19.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.19.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` 來下載 RAGFlow 鏡像的 `v0.19.0` 完整發行版。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd ragflow/docker
|
||||||
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 如果你遇到 Docker 映像檔拉不下來的問題,可以在 **docker/.env** 檔案內根據變數 `RAGFLOW_IMAGE` 的註解提示選擇華為雲或阿里雲的對應映像。
|
||||||
|
>
|
||||||
|
> - 華為雲鏡像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||||
|
> - 阿里雲鏡像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||||
|
|
||||||
|
4. 伺服器啟動成功後再次確認伺服器狀態:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker logs -f ragflow-server
|
||||||
|
```
|
||||||
|
|
||||||
|
_出現以下介面提示說明伺服器啟動成功:_
|
||||||
|
|
||||||
|
```bash
|
||||||
|
____ ___ ______ ______ __
|
||||||
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
|
* Running on all addresses (0.0.0.0)
|
||||||
|
```
|
||||||
|
|
||||||
|
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||||
|
|
||||||
|
5. 在你的瀏覽器中輸入你的伺服器對應的 IP 位址並登入 RAGFlow。
|
||||||
|
> 上面這個範例中,您只需輸入 http://IP_OF_YOUR_MACHINE 即可:未改動過設定則無需輸入連接埠(預設的 HTTP 服務連接埠 80)。
|
||||||
|
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 檔案的 `user_default_llm` 欄位設定 LLM factory,並在 `API_KEY` 欄填入和你選擇的大模型相對應的 API key。
|
||||||
|
|
||||||
|
> 詳見 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||||
|
|
||||||
|
_好戲開始,接著奏樂接著舞! _
|
||||||
|
|
||||||
|
## 🔧 系統配置
|
||||||
|
|
||||||
|
系統配置涉及以下三份文件:
|
||||||
|
|
||||||
|
- [.env](./docker/.env):存放一些系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||||
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template):設定各類別後台服務。
|
||||||
|
- [docker-compose.yml](./docker/docker-compose.yml): 系統依賴該檔案完成啟動。
|
||||||
|
|
||||||
|
請務必確保 [.env](./docker/.env) 檔案中的變數設定與 [service_conf.yaml.template](./docker/service_conf.yaml.template) 檔案中的設定保持一致!
|
||||||
|
|
||||||
|
如果無法存取映像網站 hub.docker.com 或模型網站 huggingface.co,請依照 [.env](./docker/.env) 註解修改 `RAGFLOW_IMAGE` 和 `HF_ENDPOINT`。
|
||||||
|
|
||||||
|
> [./docker/README](./docker/README.md) 解釋了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的環境變數設定和服務配置。
|
||||||
|
|
||||||
|
如需更新預設的 HTTP 服務連接埠(80), 可以在[docker-compose.yml](./docker/docker-compose.yml) 檔案中將配置`80:80` 改為`<YOUR_SERVING_PORT>:80` 。
|
||||||
|
|
||||||
|
> 所有系統配置都需要透過系統重新啟動生效:
|
||||||
|
>
|
||||||
|
> ```bash
|
||||||
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
|
> ```
|
||||||
|
|
||||||
|
###把文檔引擎從 Elasticsearch 切換成為 Infinity
|
||||||
|
|
||||||
|
RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換為 [Infinity](https://github.com/infiniflow/infinity/), 可以按照下面步驟進行:
|
||||||
|
|
||||||
|
1. 停止所有容器運作:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
Note: `-v` 將會刪除 docker 容器的 volumes,已有的資料會被清空。
|
||||||
|
|
||||||
|
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
||||||
|
|
||||||
|
3. 啟動容器:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Infinity 目前官方並未正式支援在 Linux/arm64 架構下的機器上運行.
|
||||||
|
|
||||||
|
## 🔧 原始碼編譯 Docker 映像(不含 embedding 模型)
|
||||||
|
|
||||||
|
本 Docker 映像大小約 2 GB 左右並且依賴外部的大模型和 embedding 服務。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔧 原始碼編譯 Docker 映像(包含 embedding 模型)
|
||||||
|
|
||||||
|
本 Docker 大小約 9 GB 左右。由於已包含 embedding 模型,所以只需依賴外部的大模型服務即可。
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🔨 以原始碼啟動服務
|
||||||
|
|
||||||
|
1. 安裝 uv。如已安裝,可跳過此步驟:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pipx install uv pre-commit
|
||||||
|
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 下載原始碼並安裝 Python 依賴:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
|
cd ragflow/
|
||||||
|
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||||
|
uv run download_deps.py
|
||||||
|
pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
|
3. 透過 Docker Compose 啟動依賴的服務(MinIO, Elasticsearch, Redis, and MySQL):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
在 `/etc/hosts` 中加入以下程式碼,將 **conf/service_conf.yaml** 檔案中的所有 host 位址都解析為 `127.0.0.1`:
|
||||||
|
|
||||||
|
```
|
||||||
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
|
```
|
||||||
|
|
||||||
|
4. 如果無法存取 HuggingFace,可以把環境變數 `HF_ENDPOINT` 設為對應的鏡像網站:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
```
|
||||||
|
|
||||||
|
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ubuntu
|
||||||
|
sudo apt-get install libjemalloc-dev
|
||||||
|
# centos
|
||||||
|
sudo yum install jemalloc
|
||||||
|
```
|
||||||
|
|
||||||
|
6. 啟動後端服務:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .venv/bin/activate
|
||||||
|
export PYTHONPATH=$(pwd)
|
||||||
|
bash docker/launch_backend_service.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
7. 安裝前端依賴:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd web
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
8. 啟動前端服務:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
以下界面說明系統已成功啟動:_
|
||||||
|
|
||||||
|

|
||||||
|
```
|
||||||
|
|
||||||
|
9. 開發完成後停止 RAGFlow 前端和後端服務:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## 📚 技術文檔
|
||||||
|
|
||||||
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
|
## 📜 路線圖
|
||||||
|
|
||||||
|
詳見 [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214) 。
|
||||||
|
|
||||||
|
## 🏄 開源社群
|
||||||
|
|
||||||
|
- [Discord](https://discord.gg/zd4qPW6t)
|
||||||
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
## 🙌 貢獻指南
|
||||||
|
|
||||||
|
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](https://ragflow.io/docs/dev/contributing) 。
|
||||||
|
|
||||||
|
## 🤝 商務合作
|
||||||
|
|
||||||
|
- [預約諮詢](https://aao615odquw.feishu.cn/share/base/form/shrcnjw7QleretCLqh1nuPo1xxh)
|
||||||
|
|
||||||
|
## 👥 加入社區
|
||||||
|
|
||||||
|
掃二維碼加入 RAGFlow 小助手,進 RAGFlow 交流群。
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/bccf284f-46f2-4445-9809-8f1030fb7585" width=50% height=50%>
|
||||||
|
</p>
|
||||||
200
README_zh.md
200
README_zh.md
@ -7,8 +7,11 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="./README.md">English</a> |
|
<a href="./README.md">English</a> |
|
||||||
<a href="./README_zh.md">简体中文</a> |
|
<a href="./README_zh.md">简体中文</a> |
|
||||||
|
<a href="./README_tzh.md">繁体中文</a> |
|
||||||
<a href="./README_ja.md">日本語</a> |
|
<a href="./README_ja.md">日本語</a> |
|
||||||
<a href="./README_ko.md">한국어</a>
|
<a href="./README_ko.md">한국어</a> |
|
||||||
|
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||||
|
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
@ -19,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.13.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.13.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.19.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.19.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -29,12 +32,11 @@
|
|||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -45,27 +47,29 @@
|
|||||||
## 🎮 Demo 试用
|
## 🎮 Demo 试用
|
||||||
|
|
||||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🔥 近期更新
|
## 🔥 近期更新
|
||||||
|
|
||||||
- 2024-09-29 优化多轮对话.
|
- 2025-03-19 PDF和DOCX中的图支持用多模态大模型去解析得到描述.
|
||||||
- 2024-09-13 增加知识库问答搜索模式。
|
- 2025-02-28 结合互联网搜索(Tavily),对于任意大模型实现类似 Deep Research 的推理功能.
|
||||||
- 2024-09-09 在 Agent 中加入医疗问诊模板。
|
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
||||||
|
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||||
|
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||||
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
|
||||||
|
|
||||||
## 🎉 关注项目
|
## 🎉 关注项目
|
||||||
⭐️点击右上角的 Star 关注RAGFlow,可以获取最新发布的实时通知 !🌟
|
|
||||||
|
⭐️ 点击右上角的 Star 关注 RAGFlow,可以获取最新发布的实时通知 !🌟
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 🌟 主要功能
|
## 🌟 主要功能
|
||||||
|
|
||||||
### 🍭 **"Quality in, quality out"**
|
### 🍭 **"Quality in, quality out"**
|
||||||
@ -108,7 +112,10 @@
|
|||||||
- RAM >= 16 GB
|
- RAM >= 16 GB
|
||||||
- Disk >= 50 GB
|
- Disk >= 50 GB
|
||||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||||
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
- [gVisor](https://gvisor.dev/docs/user_guide/install/): 仅在你打算使用 RAGFlow 的代码执行器(沙箱)功能时才需要安装。
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
||||||
|
|
||||||
### 🚀 启动服务器
|
### 🚀 启动服务器
|
||||||
|
|
||||||
@ -141,18 +148,34 @@
|
|||||||
|
|
||||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||||
|
|
||||||
> 运行以下命令会自动下载 dev 版的 RAGFlow slim Docker 镜像(`dev-slim`),该镜像并不包含 embedding 模型以及一些 Python 库,因此镜像大小约 1GB。
|
> [!CAUTION]
|
||||||
|
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||||
|
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||||
|
|
||||||
|
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.19.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.19.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.19.0` 来下载 RAGFlow 镜像的 `v0.19.0` 完整发行版。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> - 如果你想下载并运行特定版本的 RAGFlow slim Docker 镜像,请在 **docker/.env** 文件中找到 `RAGFLOW_IMAGE` 变量,将其改为对应版本。例如 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0-slim`,然后再运行上述命令。
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
> - 如果您想安装内置 embedding 模型和 Python 库的 dev 版本的 Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:dev`。
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
> - 如果您想安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:v0.13.0`。修改后,再运行上面的命令。
|
| v0.19.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
> **注意:** 安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像大小约 9 GB,可能需要更长时间下载,请耐心等待。
|
| v0.19.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
||||||
|
>
|
||||||
|
> - 华为云镜像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||||
|
> - 阿里云镜像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||||
|
|
||||||
4. 服务器启动成功后再次确认服务器状态:
|
4. 服务器启动成功后再次确认服务器状态:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -162,22 +185,20 @@
|
|||||||
_出现以下界面提示说明服务器启动成功:_
|
_出现以下界面提示说明服务器启动成功:_
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
____ ___ ______ ______ __
|
____ ___ ______ ______ __
|
||||||
/ __ \ / | / ____// ____// /____ _ __
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network abnormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
|
||||||
|
> 如果您在没有看到上面的提示信息出来之前,就尝试登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`。
|
||||||
|
|
||||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||||
6. 在 [service_conf.yaml](./docker/service_conf.yaml) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||||
|
|
||||||
> 详见 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
> 详见 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||||
|
|
||||||
@ -188,14 +209,14 @@
|
|||||||
系统配置涉及以下三份文件:
|
系统配置涉及以下三份文件:
|
||||||
|
|
||||||
- [.env](./docker/.env):存放一些基本的系统环境变量,比如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
- [.env](./docker/.env):存放一些基本的系统环境变量,比如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||||
- [service_conf.yaml](./docker/service_conf.yaml):配置各类后台服务。
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template):配置各类后台服务。
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): 系统依赖该文件完成启动。
|
- [docker-compose.yml](./docker/docker-compose.yml): 系统依赖该文件完成启动。
|
||||||
|
|
||||||
请务必确保 [.env](./docker/.env) 文件中的变量设置与 [service_conf.yaml](./docker/service_conf.yaml) 文件中的配置保持一致!
|
请务必确保 [.env](./docker/.env) 文件中的变量设置与 [service_conf.yaml.template](./docker/service_conf.yaml.template) 文件中的配置保持一致!
|
||||||
|
|
||||||
如果不能访问镜像站点hub.docker.com或者模型站点huggingface.co,请按照[.env](./docker/.env)注释修改`RAGFLOW_IMAGE`和`HF_ENDPOINT`。
|
如果不能访问镜像站点 hub.docker.com 或者模型站点 huggingface.co,请按照 [.env](./docker/.env) 注释修改 `RAGFLOW_IMAGE` 和 `HF_ENDPOINT`。
|
||||||
|
|
||||||
> [./docker/README](./docker/README.md) 文件提供了环境变量设置和服务配置的详细信息。请**一定要**确保 [./docker/README](./docker/README.md) 文件当中列出来的环境变量的值与 [service_conf.yaml](./docker/service_conf.yaml) 文件当中的系统配置保持一致。
|
> [./docker/README](./docker/README.md) 解释了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的环境变量设置和服务配置。
|
||||||
|
|
||||||
如需更新默认的 HTTP 服务端口(80), 可以在 [docker-compose.yml](./docker/docker-compose.yml) 文件中将配置 `80:80` 改为 `<YOUR_SERVING_PORT>:80`。
|
如需更新默认的 HTTP 服务端口(80), 可以在 [docker-compose.yml](./docker/docker-compose.yml) 文件中将配置 `80:80` 改为 `<YOUR_SERVING_PORT>:80`。
|
||||||
|
|
||||||
@ -205,16 +226,36 @@
|
|||||||
> $ docker compose -f docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
|
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
||||||
|
|
||||||
|
RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换为 [Infinity](https://github.com/infiniflow/infinity/), 可以按照下面步骤进行:
|
||||||
|
|
||||||
|
1. 停止所有容器运行:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
|
```
|
||||||
|
Note: `-v` 将会删除 docker 容器的 volumes,已有的数据会被清空。
|
||||||
|
|
||||||
|
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
||||||
|
|
||||||
|
3. 启动容器:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Infinity 目前官方并未正式支持在 Linux/arm64 架构下的机器上运行.
|
||||||
|
|
||||||
## 🔧 源码编译 Docker 镜像(不含 embedding 模型)
|
## 🔧 源码编译 Docker 镜像(不含 embedding 模型)
|
||||||
|
|
||||||
本 Docker 镜像大小约 1 GB 左右并且依赖外部的大模型和 embedding 服务。
|
本 Docker 镜像大小约 2 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub nltk
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
||||||
@ -224,85 +265,109 @@ docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
pip3 install huggingface-hub nltk
|
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
python3 download_deps.py
|
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 以源代码启动服务
|
## 🔨 以源代码启动服务
|
||||||
|
|
||||||
1. 安装 Poetry。如已经安装,可跳过本步骤:
|
1. 安装 uv。如已经安装,可跳过本步骤:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://install.python-poetry.org | python3 -
|
pipx install uv pre-commit
|
||||||
|
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 下载源代码并安装 Python 依赖:
|
2. 下载源代码并安装 Python 依赖:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
uv run download_deps.py
|
||||||
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
在 `/etc/hosts` 中添加以下代码,将 **docker/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
在 `/etc/hosts` 中添加以下代码,目的是将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||||
```
|
|
||||||
127.0.0.1 es01 mysql minio redis
|
```
|
||||||
```
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
在文件 **docker/service_conf.yaml** 中,对照 **docker/.env** 的配置将 mysql 端口更新为 `5455`,es 端口更新为 `1200`。
|
```
|
||||||
|
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||||
|
|
||||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. 启动后端服务:
|
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ubuntu
|
||||||
|
sudo apt-get install libjemalloc-dev
|
||||||
|
# centos
|
||||||
|
sudo yum install jemalloc
|
||||||
|
```
|
||||||
|
|
||||||
|
6. 启动后端服务:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
6. 安装前端依赖:
|
7. 安装前端依赖:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install --force
|
npm install
|
||||||
```
|
```
|
||||||
7. 配置前端,将 **.umirc.ts** 的 `proxy.target` 更新为 `http://127.0.0.1:9380`:
|
|
||||||
8. 启动前端服务:
|
|
||||||
```bash
|
|
||||||
npm run dev
|
|
||||||
```
|
|
||||||
|
|
||||||
_以下界面说明系统已经成功启动:_
|
8. 启动前端服务:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
_以下界面说明系统已经成功启动:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
9. 开发完成后停止 RAGFlow 前端和后端服务:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 技术文档
|
## 📚 技术文档
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 路线图
|
## 📜 路线图
|
||||||
|
|
||||||
详见 [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162) 。
|
详见 [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214) 。
|
||||||
|
|
||||||
## 🏄 开源社区
|
## 🏄 开源社区
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/zd4qPW6t)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
## 🙌 贡献指南
|
## 🙌 贡献指南
|
||||||
|
|
||||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](./CONTRIBUTING.md) 。
|
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](https://ragflow.io/docs/dev/contributing) 。
|
||||||
|
|
||||||
## 🤝 商务合作
|
## 🤝 商务合作
|
||||||
|
|
||||||
@ -315,4 +380,3 @@ RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/bccf284f-46f2-4445-9809-8f1030fb7585" width=50% height=50%>
|
<img src="https://github.com/infiniflow/ragflow/assets/7248/bccf284f-46f2-4445-9809-8f1030fb7585" width=50% height=50%>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ It is used to compose a complex work flow or agent.
|
|||||||
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
||||||
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
||||||
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
||||||
Otherwise, correct configurations in conf/service_conf.yaml is essential.
|
Otherwise, correct configurations in service_conf.yaml is essential.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||||
|
|||||||
@ -11,7 +11,7 @@
|
|||||||
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
||||||
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
||||||
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
||||||
否则,正确配置 conf/service_conf.yaml 文件是必不可少的。
|
否则,正确配置 service_conf.yaml 文件是必不可少的。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||||
|
|||||||
@ -0,0 +1,18 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from beartype.claw import beartype_this_package
|
||||||
|
beartype_this_package()
|
||||||
|
|||||||
179
agent/canvas.py
179
agent/canvas.py
@ -13,21 +13,17 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import importlib
|
import logging
|
||||||
import json
|
import json
|
||||||
import traceback
|
|
||||||
from abc import ABC
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from agent.component import component_class
|
from agent.component import component_class
|
||||||
from agent.component.base import ComponentBase
|
from agent.component.base import ComponentBase
|
||||||
from agent.settings import flow_logger, DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class Canvas(ABC):
|
class Canvas:
|
||||||
"""
|
"""
|
||||||
dsl = {
|
dsl = {
|
||||||
"components": {
|
"components": {
|
||||||
@ -88,7 +84,8 @@ class Canvas(ABC):
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"downstream": [],
|
"downstream": [],
|
||||||
"upstream": []
|
"upstream": [],
|
||||||
|
"parent_id": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"history": [],
|
"history": [],
|
||||||
@ -139,7 +136,8 @@ class Canvas(ABC):
|
|||||||
"components": {}
|
"components": {}
|
||||||
}
|
}
|
||||||
for k in self.dsl.keys():
|
for k in self.dsl.keys():
|
||||||
if k in ["components"]:continue
|
if k in ["components"]:
|
||||||
|
continue
|
||||||
dsl[k] = deepcopy(self.dsl[k])
|
dsl[k] = deepcopy(self.dsl[k])
|
||||||
|
|
||||||
for k, cpn in self.components.items():
|
for k, cpn in self.components.items():
|
||||||
@ -162,8 +160,16 @@ class Canvas(ABC):
|
|||||||
self.components[k]["obj"].reset()
|
self.components[k]["obj"].reset()
|
||||||
self._embed_id = ""
|
self._embed_id = ""
|
||||||
|
|
||||||
def run(self, **kwargs):
|
def get_component_name(self, cid):
|
||||||
ans = ""
|
for n in self.dsl["graph"]["nodes"]:
|
||||||
|
if cid == n["id"]:
|
||||||
|
return n["data"]["name"]
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def run(self, running_hint_text = "is running...🕞", **kwargs):
|
||||||
|
if not running_hint_text or not isinstance(running_hint_text, str):
|
||||||
|
running_hint_text = "is running...🕞"
|
||||||
|
|
||||||
if self.answer:
|
if self.answer:
|
||||||
cpn_id = self.answer[0]
|
cpn_id = self.answer[0]
|
||||||
self.answer.pop(0)
|
self.answer.pop(0)
|
||||||
@ -173,71 +179,107 @@ class Canvas(ABC):
|
|||||||
ans = ComponentBase.be_output(str(e))
|
ans = ComponentBase.be_output(str(e))
|
||||||
self.path[-1].append(cpn_id)
|
self.path[-1].append(cpn_id)
|
||||||
if kwargs.get("stream"):
|
if kwargs.get("stream"):
|
||||||
assert isinstance(ans, partial)
|
for an in ans():
|
||||||
return ans
|
yield an
|
||||||
self.history.append(("assistant", ans.to_dict("records")))
|
else:
|
||||||
return ans
|
yield ans
|
||||||
|
return
|
||||||
|
|
||||||
if not self.path:
|
if not self.path:
|
||||||
self.components["begin"]["obj"].run(self.history, **kwargs)
|
self.components["begin"]["obj"].run(self.history, **kwargs)
|
||||||
self.path.append(["begin"])
|
self.path.append(["begin"])
|
||||||
|
|
||||||
self.path.append([])
|
self.path.append([])
|
||||||
|
|
||||||
ran = -1
|
ran = -1
|
||||||
|
waiting = []
|
||||||
|
without_dependent_checking = []
|
||||||
|
|
||||||
def prepare2run(cpns):
|
def prepare2run(cpns):
|
||||||
nonlocal ran, ans
|
nonlocal ran, ans
|
||||||
for c in cpns:
|
for c in cpns:
|
||||||
if self.path[-1] and c == self.path[-1][-1]: continue
|
if self.path[-1] and c == self.path[-1][-1]:
|
||||||
|
continue
|
||||||
cpn = self.components[c]["obj"]
|
cpn = self.components[c]["obj"]
|
||||||
if cpn.component_name == "Answer":
|
if cpn.component_name == "Answer":
|
||||||
self.answer.append(c)
|
self.answer.append(c)
|
||||||
else:
|
else:
|
||||||
if DEBUG: print("RUN: ", c)
|
logging.debug(f"Canvas.prepare2run: {c}")
|
||||||
if cpn.component_name == "Generate":
|
if c not in without_dependent_checking:
|
||||||
cpids = cpn.get_dependent_components()
|
cpids = cpn.get_dependent_components()
|
||||||
if any([c not in self.path[-1] for c in cpids]):
|
if any([cc not in self.path[-1] for cc in cpids]):
|
||||||
|
if c not in waiting:
|
||||||
|
waiting.append(c)
|
||||||
continue
|
continue
|
||||||
ans = cpn.run(self.history, **kwargs)
|
yield "*'{}'* {}".format(self.get_component_name(c), running_hint_text)
|
||||||
|
|
||||||
|
if cpn.component_name.lower() == "iteration":
|
||||||
|
st_cpn = cpn.get_start()
|
||||||
|
assert st_cpn, "Start component not found for Iteration."
|
||||||
|
if not st_cpn["obj"].end():
|
||||||
|
cpn = st_cpn["obj"]
|
||||||
|
c = cpn._id
|
||||||
|
|
||||||
|
try:
|
||||||
|
ans = cpn.run(self.history, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(f"Canvas.run got exception: {e}")
|
||||||
|
self.path[-1].append(c)
|
||||||
|
ran += 1
|
||||||
|
raise e
|
||||||
self.path[-1].append(c)
|
self.path[-1].append(c)
|
||||||
|
|
||||||
ran += 1
|
ran += 1
|
||||||
|
|
||||||
prepare2run(self.components[self.path[-2][-1]]["downstream"])
|
downstream = self.components[self.path[-2][-1]]["downstream"]
|
||||||
|
if not downstream and self.components[self.path[-2][-1]].get("parent_id"):
|
||||||
|
cid = self.path[-2][-1]
|
||||||
|
pid = self.components[cid]["parent_id"]
|
||||||
|
o, _ = self.components[cid]["obj"].output(allow_partial=False)
|
||||||
|
oo, _ = self.components[pid]["obj"].output(allow_partial=False)
|
||||||
|
self.components[pid]["obj"].set_output(pd.concat([oo, o], ignore_index=True).dropna())
|
||||||
|
downstream = [pid]
|
||||||
|
|
||||||
|
for m in prepare2run(downstream):
|
||||||
|
yield {"content": m, "running_status": True}
|
||||||
|
|
||||||
while 0 <= ran < len(self.path[-1]):
|
while 0 <= ran < len(self.path[-1]):
|
||||||
if DEBUG: print(ran, self.path)
|
logging.debug(f"Canvas.run: {ran} {self.path}")
|
||||||
cpn_id = self.path[-1][ran]
|
cpn_id = self.path[-1][ran]
|
||||||
cpn = self.get_component(cpn_id)
|
cpn = self.get_component(cpn_id)
|
||||||
if not cpn["downstream"]: break
|
if not any([cpn["downstream"], cpn.get("parent_id"), waiting]):
|
||||||
|
break
|
||||||
|
|
||||||
loop = self._find_loop()
|
loop = self._find_loop()
|
||||||
if loop: raise OverflowError(f"Too much loops: {loop}")
|
if loop:
|
||||||
|
raise OverflowError(f"Too much loops: {loop}")
|
||||||
|
|
||||||
|
downstream = []
|
||||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||||
assert switch_out in self.components, \
|
assert switch_out in self.components, \
|
||||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||||
try:
|
downstream = [switch_out]
|
||||||
prepare2run([switch_out])
|
else:
|
||||||
except Exception as e:
|
downstream = cpn["downstream"]
|
||||||
for p in [c for p in self.path for c in p][::-1]:
|
|
||||||
if p.lower().find("answer") >= 0:
|
|
||||||
self.get_component(p)["obj"].set_exception(e)
|
|
||||||
prepare2run([p])
|
|
||||||
break
|
|
||||||
traceback.print_exc()
|
|
||||||
break
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
if not downstream and cpn.get("parent_id"):
|
||||||
prepare2run(cpn["downstream"])
|
pid = cpn["parent_id"]
|
||||||
except Exception as e:
|
_, o = cpn["obj"].output(allow_partial=False)
|
||||||
for p in [c for p in self.path for c in p][::-1]:
|
_, oo = self.components[pid]["obj"].output(allow_partial=False)
|
||||||
if p.lower().find("answer") >= 0:
|
self.components[pid]["obj"].set_output(pd.concat([oo.dropna(axis=1), o.dropna(axis=1)], ignore_index=True).dropna())
|
||||||
self.get_component(p)["obj"].set_exception(e)
|
downstream = [pid]
|
||||||
prepare2run([p])
|
|
||||||
break
|
for m in prepare2run(downstream):
|
||||||
traceback.print_exc()
|
yield {"content": m, "running_status": True}
|
||||||
break
|
|
||||||
|
if ran >= len(self.path[-1]) and waiting:
|
||||||
|
without_dependent_checking = waiting
|
||||||
|
waiting = []
|
||||||
|
for m in prepare2run(without_dependent_checking):
|
||||||
|
yield {"content": m, "running_status": True}
|
||||||
|
without_dependent_checking = []
|
||||||
|
ran -= 1
|
||||||
|
|
||||||
if self.answer:
|
if self.answer:
|
||||||
cpn_id = self.answer[0]
|
cpn_id = self.answer[0]
|
||||||
@ -246,11 +288,13 @@ class Canvas(ABC):
|
|||||||
self.path[-1].append(cpn_id)
|
self.path[-1].append(cpn_id)
|
||||||
if kwargs.get("stream"):
|
if kwargs.get("stream"):
|
||||||
assert isinstance(ans, partial)
|
assert isinstance(ans, partial)
|
||||||
return ans
|
for an in ans():
|
||||||
|
yield an
|
||||||
|
else:
|
||||||
|
yield ans
|
||||||
|
|
||||||
self.history.append(("assistant", ans.to_dict("records")))
|
else:
|
||||||
|
raise Exception("The dialog flow has no way to interact with you. Please add an 'Interact' component to the end of the flow.")
|
||||||
return ans
|
|
||||||
|
|
||||||
def get_component(self, cpn_id):
|
def get_component(self, cpn_id):
|
||||||
return self.components[cpn_id]
|
return self.components[cpn_id]
|
||||||
@ -261,8 +305,10 @@ class Canvas(ABC):
|
|||||||
def get_history(self, window_size):
|
def get_history(self, window_size):
|
||||||
convs = []
|
convs = []
|
||||||
for role, obj in self.history[window_size * -1:]:
|
for role, obj in self.history[window_size * -1:]:
|
||||||
convs.append({"role": role, "content": (obj if role == "user" else
|
if isinstance(obj, list) and obj and all([isinstance(o, dict) for o in obj]):
|
||||||
'\n'.join([str(s) for s in pd.DataFrame(obj)['content']]))})
|
convs.append({"role": role, "content": '\n'.join([str(s.get("content", "")) for s in obj])})
|
||||||
|
else:
|
||||||
|
convs.append({"role": role, "content": str(obj)})
|
||||||
return convs
|
return convs
|
||||||
|
|
||||||
def add_user_input(self, question):
|
def add_user_input(self, question):
|
||||||
@ -276,19 +322,22 @@ class Canvas(ABC):
|
|||||||
|
|
||||||
def _find_loop(self, max_loops=6):
|
def _find_loop(self, max_loops=6):
|
||||||
path = self.path[-1][::-1]
|
path = self.path[-1][::-1]
|
||||||
if len(path) < 2: return False
|
if len(path) < 2:
|
||||||
|
return False
|
||||||
|
|
||||||
for i in range(len(path)):
|
for i in range(len(path)):
|
||||||
if path[i].lower().find("answer") >= 0:
|
if path[i].lower().find("answer") == 0 or path[i].lower().find("iterationitem") == 0:
|
||||||
path = path[:i]
|
path = path[:i]
|
||||||
break
|
break
|
||||||
|
|
||||||
if len(path) < 2: return False
|
if len(path) < 2:
|
||||||
|
return False
|
||||||
|
|
||||||
for l in range(2, len(path) // 2):
|
for loc in range(2, len(path) // 2):
|
||||||
pat = ",".join(path[0:l])
|
pat = ",".join(path[0:loc])
|
||||||
path_str = ",".join(path)
|
path_str = ",".join(path)
|
||||||
if len(pat) >= len(path_str): return False
|
if len(pat) >= len(path_str):
|
||||||
|
return False
|
||||||
loop = max_loops
|
loop = max_loops
|
||||||
while path_str.find(pat) == 0 and loop >= 0:
|
while path_str.find(pat) == 0 and loop >= 0:
|
||||||
loop -= 1
|
loop -= 1
|
||||||
@ -296,10 +345,26 @@ class Canvas(ABC):
|
|||||||
return False
|
return False
|
||||||
path_str = path_str[len(pat)+1:]
|
path_str = path_str[len(pat)+1:]
|
||||||
if loop < 0:
|
if loop < 0:
|
||||||
pat = " => ".join([p.split(":")[0] for p in path[0:l]])
|
pat = " => ".join([p.split(":")[0] for p in path[0:loc]])
|
||||||
return pat + " => " + pat
|
return pat + " => " + pat
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_prologue(self):
|
def get_prologue(self):
|
||||||
return self.components["begin"]["obj"]._param.prologue
|
return self.components["begin"]["obj"]._param.prologue
|
||||||
|
|
||||||
|
def set_global_param(self, **kwargs):
|
||||||
|
for k, v in kwargs.items():
|
||||||
|
for q in self.components["begin"]["obj"]._param.query:
|
||||||
|
if k != q["key"]:
|
||||||
|
continue
|
||||||
|
q["value"] = v
|
||||||
|
|
||||||
|
def get_preset_param(self):
|
||||||
|
return self.components["begin"]["obj"]._param.query
|
||||||
|
|
||||||
|
def get_component_input_elements(self, cpnnm):
|
||||||
|
return self.components[cpnnm]["obj"].get_input_elements()
|
||||||
|
|
||||||
|
def set_component_infor(self, cpn_id, infor):
|
||||||
|
self.components[cpn_id]["obj"].set_infor(infor)
|
||||||
|
|||||||
@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
from .begin import Begin, BeginParam
|
from .begin import Begin, BeginParam
|
||||||
from .generate import Generate, GenerateParam
|
from .generate import Generate, GenerateParam
|
||||||
@ -30,9 +46,91 @@ from .tushare import TuShare, TuShareParam
|
|||||||
from .akshare import AkShare, AkShareParam
|
from .akshare import AkShare, AkShareParam
|
||||||
from .crawler import Crawler, CrawlerParam
|
from .crawler import Crawler, CrawlerParam
|
||||||
from .invoke import Invoke, InvokeParam
|
from .invoke import Invoke, InvokeParam
|
||||||
|
from .template import Template, TemplateParam
|
||||||
|
from .email import Email, EmailParam
|
||||||
|
from .iteration import Iteration, IterationParam
|
||||||
|
from .iterationitem import IterationItem, IterationItemParam
|
||||||
|
from .code import Code, CodeParam
|
||||||
|
|
||||||
|
|
||||||
def component_class(class_name):
|
def component_class(class_name):
|
||||||
m = importlib.import_module("agent.component")
|
m = importlib.import_module("agent.component")
|
||||||
c = getattr(m, class_name)
|
c = getattr(m, class_name)
|
||||||
return c
|
return c
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"Begin",
|
||||||
|
"BeginParam",
|
||||||
|
"Generate",
|
||||||
|
"GenerateParam",
|
||||||
|
"Retrieval",
|
||||||
|
"RetrievalParam",
|
||||||
|
"Answer",
|
||||||
|
"AnswerParam",
|
||||||
|
"Categorize",
|
||||||
|
"CategorizeParam",
|
||||||
|
"Switch",
|
||||||
|
"SwitchParam",
|
||||||
|
"Relevant",
|
||||||
|
"RelevantParam",
|
||||||
|
"Message",
|
||||||
|
"MessageParam",
|
||||||
|
"RewriteQuestion",
|
||||||
|
"RewriteQuestionParam",
|
||||||
|
"KeywordExtract",
|
||||||
|
"KeywordExtractParam",
|
||||||
|
"Concentrator",
|
||||||
|
"ConcentratorParam",
|
||||||
|
"Baidu",
|
||||||
|
"BaiduParam",
|
||||||
|
"DuckDuckGo",
|
||||||
|
"DuckDuckGoParam",
|
||||||
|
"Wikipedia",
|
||||||
|
"WikipediaParam",
|
||||||
|
"PubMed",
|
||||||
|
"PubMedParam",
|
||||||
|
"ArXiv",
|
||||||
|
"ArXivParam",
|
||||||
|
"Google",
|
||||||
|
"GoogleParam",
|
||||||
|
"Bing",
|
||||||
|
"BingParam",
|
||||||
|
"GoogleScholar",
|
||||||
|
"GoogleScholarParam",
|
||||||
|
"DeepL",
|
||||||
|
"DeepLParam",
|
||||||
|
"GitHub",
|
||||||
|
"GitHubParam",
|
||||||
|
"BaiduFanyi",
|
||||||
|
"BaiduFanyiParam",
|
||||||
|
"QWeather",
|
||||||
|
"QWeatherParam",
|
||||||
|
"ExeSQL",
|
||||||
|
"ExeSQLParam",
|
||||||
|
"YahooFinance",
|
||||||
|
"YahooFinanceParam",
|
||||||
|
"WenCai",
|
||||||
|
"WenCaiParam",
|
||||||
|
"Jin10",
|
||||||
|
"Jin10Param",
|
||||||
|
"TuShare",
|
||||||
|
"TuShareParam",
|
||||||
|
"AkShare",
|
||||||
|
"AkShareParam",
|
||||||
|
"Crawler",
|
||||||
|
"CrawlerParam",
|
||||||
|
"Invoke",
|
||||||
|
"InvokeParam",
|
||||||
|
"Iteration",
|
||||||
|
"IterationParam",
|
||||||
|
"IterationItem",
|
||||||
|
"IterationItemParam",
|
||||||
|
"Template",
|
||||||
|
"TemplateParam",
|
||||||
|
"Email",
|
||||||
|
"EmailParam",
|
||||||
|
"Code",
|
||||||
|
"CodeParam",
|
||||||
|
"component_class"
|
||||||
|
]
|
||||||
|
|||||||
@ -1,56 +1,56 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import akshare as ak
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
|
||||||
|
|
||||||
|
class AkShareParam(ComponentParamBase):
|
||||||
class AkShareParam(ComponentParamBase):
|
"""
|
||||||
"""
|
Define the AkShare component parameters.
|
||||||
Define the AkShare component parameters.
|
"""
|
||||||
"""
|
|
||||||
|
def __init__(self):
|
||||||
def __init__(self):
|
super().__init__()
|
||||||
super().__init__()
|
self.top_n = 10
|
||||||
self.top_n = 10
|
|
||||||
|
def check(self):
|
||||||
def check(self):
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
self.check_positive_integer(self.top_n, "Top N")
|
|
||||||
|
|
||||||
|
class AkShare(ComponentBase, ABC):
|
||||||
class AkShare(ComponentBase, ABC):
|
component_name = "AkShare"
|
||||||
component_name = "AkShare"
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
def _run(self, history, **kwargs):
|
import akshare as ak
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not ans:
|
||||||
return AkShare.be_output("")
|
return AkShare.be_output("")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ak_res = []
|
ak_res = []
|
||||||
stock_news_em_df = ak.stock_news_em(symbol=ans)
|
stock_news_em_df = ak.stock_news_em(symbol=ans)
|
||||||
stock_news_em_df = stock_news_em_df.head(self._param.top_n)
|
stock_news_em_df = stock_news_em_df.head(self._param.top_n)
|
||||||
ak_res = [{"content": '<a href="' + i["新闻链接"] + '">' + i["新闻标题"] + '</a>\n 新闻内容: ' + i[
|
ak_res = [{"content": '<a href="' + i["新闻链接"] + '">' + i["新闻标题"] + '</a>\n 新闻内容: ' + i[
|
||||||
"新闻内容"] + " \n发布时间:" + i["发布时间"] + " \n文章来源: " + i["文章来源"]} for index, i in stock_news_em_df.iterrows()]
|
"新闻内容"] + " \n发布时间:" + i["发布时间"] + " \n文章来源: " + i["文章来源"]} for index, i in stock_news_em_df.iterrows()]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return AkShare.be_output("**ERROR**: " + str(e))
|
return AkShare.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
if not ak_res:
|
if not ak_res:
|
||||||
return AkShare.be_output("")
|
return AkShare.be_output("")
|
||||||
|
|
||||||
return pd.DataFrame(ak_res)
|
return pd.DataFrame(ak_res)
|
||||||
|
|||||||
@ -16,6 +16,7 @@
|
|||||||
import random
|
import random
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from typing import Tuple, Union
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
@ -76,4 +77,13 @@ class Answer(ComponentBase, ABC):
|
|||||||
def set_exception(self, e):
|
def set_exception(self, e):
|
||||||
self.exception = e
|
self.exception = e
|
||||||
|
|
||||||
|
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||||
|
if allow_partial:
|
||||||
|
return super.output()
|
||||||
|
|
||||||
|
for r, c in self._canvas.history[::-1]:
|
||||||
|
if r == "user":
|
||||||
|
return self._param.output_var_name, pd.DataFrame([{"content": c}])
|
||||||
|
|
||||||
|
self._param.output_var_name, pd.DataFrame([])
|
||||||
|
|
||||||
|
|||||||
@ -13,13 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import arxiv
|
import arxiv
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class ArXivParam(ComponentParamBase):
|
class ArXivParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
Define the ArXiv component parameters.
|
Define the ArXiv component parameters.
|
||||||
@ -65,5 +64,5 @@ class ArXiv(ComponentBase, ABC):
|
|||||||
return ArXiv.be_output("")
|
return ArXiv.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(arxiv_res)
|
df = pd.DataFrame(arxiv_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {str(df)}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,13 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import random
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from functools import partial
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import requests
|
import requests
|
||||||
import re
|
import re
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -46,7 +44,7 @@ class Baidu(ComponentBase, ABC):
|
|||||||
return Baidu.be_output("")
|
return Baidu.be_output("")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
url = 'https://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
url = 'http://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||||
headers = {
|
headers = {
|
||||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||||
response = requests.get(url=url, headers=headers)
|
response = requests.get(url=url, headers=headers)
|
||||||
@ -64,6 +62,6 @@ class Baidu(ComponentBase, ABC):
|
|||||||
return Baidu.be_output("")
|
return Baidu.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(baidu_res)
|
df = pd.DataFrame(baidu_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {str(df)}")
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
|||||||
@ -39,9 +39,6 @@ class BaiduFanyiParam(ComponentParamBase):
|
|||||||
self.check_empty(self.appid, "BaiduFanyi APPID")
|
self.check_empty(self.appid, "BaiduFanyi APPID")
|
||||||
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
||||||
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
||||||
self.check_valid_value(self.trans_type, "Translate domain",
|
|
||||||
['it', 'finance', 'machinery', 'senimed', 'novel', 'academic', 'aerospace', 'wiki',
|
|
||||||
'news', 'law', 'contract'])
|
|
||||||
self.check_valid_value(self.source_lang, "Source language",
|
self.check_valid_value(self.source_lang, "Source language",
|
||||||
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||||
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||||
@ -96,3 +93,4 @@ class BaiduFanyi(ComponentBase, ABC):
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
BaiduFanyi.be_output("**Error**:" + str(e))
|
BaiduFanyi.be_output("**Error**:" + str(e))
|
||||||
|
|
||||||
|
|||||||
@ -17,14 +17,13 @@ from abc import ABC
|
|||||||
import builtins
|
import builtins
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from copy import deepcopy
|
import logging
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import List, Dict, Tuple, Union
|
from typing import Any, Tuple, Union
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from agent import settings
|
from agent import settings
|
||||||
from agent.settings import flow_logger, DEBUG
|
|
||||||
|
|
||||||
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
||||||
_DEPRECATED_PARAMS = "_deprecated_params"
|
_DEPRECATED_PARAMS = "_deprecated_params"
|
||||||
@ -35,7 +34,11 @@ _IS_RAW_CONF = "_is_raw_conf"
|
|||||||
class ComponentParamBase(ABC):
|
class ComponentParamBase(ABC):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.output_var_name = "output"
|
self.output_var_name = "output"
|
||||||
|
self.infor_var_name = "infor"
|
||||||
self.message_history_window_size = 22
|
self.message_history_window_size = 22
|
||||||
|
self.query = []
|
||||||
|
self.inputs = []
|
||||||
|
self.debug_inputs = []
|
||||||
|
|
||||||
def set_name(self, name: str):
|
def set_name(self, name: str):
|
||||||
self._name = name
|
self._name = name
|
||||||
@ -81,7 +84,6 @@ class ComponentParamBase(ABC):
|
|||||||
return {name: True for name in self.get_feeded_deprecated_params()}
|
return {name: True for name in self.get_feeded_deprecated_params()}
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
|
||||||
return json.dumps(self.as_dict(), ensure_ascii=False)
|
return json.dumps(self.as_dict(), ensure_ascii=False)
|
||||||
|
|
||||||
def as_dict(self):
|
def as_dict(self):
|
||||||
@ -359,13 +361,13 @@ class ComponentParamBase(ABC):
|
|||||||
|
|
||||||
def _warn_deprecated_param(self, param_name, descr):
|
def _warn_deprecated_param(self, param_name, descr):
|
||||||
if self._deprecated_params_set.get(param_name):
|
if self._deprecated_params_set.get(param_name):
|
||||||
flow_logger.warning(
|
logging.warning(
|
||||||
f"{descr} {param_name} is deprecated and ignored in this version."
|
f"{descr} {param_name} is deprecated and ignored in this version."
|
||||||
)
|
)
|
||||||
|
|
||||||
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
||||||
if self._deprecated_params_set.get(param_name):
|
if self._deprecated_params_set.get(param_name):
|
||||||
flow_logger.warning(
|
logging.warning(
|
||||||
f"{descr} {param_name} will be deprecated in future release; "
|
f"{descr} {param_name} will be deprecated in future release; "
|
||||||
f"please use {new_param} instead."
|
f"please use {new_param} instead."
|
||||||
)
|
)
|
||||||
@ -383,22 +385,41 @@ class ComponentBase(ABC):
|
|||||||
"params": {}
|
"params": {}
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
out = getattr(self._param, self._param.output_var_name)
|
||||||
|
if isinstance(out, pd.DataFrame) and "chunks" in out:
|
||||||
|
del out["chunks"]
|
||||||
|
setattr(self._param, self._param.output_var_name, out)
|
||||||
|
|
||||||
return """{{
|
return """{{
|
||||||
"component_name": "{}",
|
"component_name": "{}",
|
||||||
"params": {}
|
"params": {},
|
||||||
|
"output": {},
|
||||||
|
"inputs": {}
|
||||||
}}""".format(self.component_name,
|
}}""".format(self.component_name,
|
||||||
self._param
|
self._param,
|
||||||
)
|
json.dumps(json.loads(str(self._param)).get("output", {}), ensure_ascii=False),
|
||||||
|
json.dumps(json.loads(str(self._param)).get("inputs", []), ensure_ascii=False)
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||||
|
from agent.canvas import Canvas # Local import to avoid cyclic dependency
|
||||||
|
assert isinstance(canvas, Canvas), "canvas must be an instance of Canvas"
|
||||||
self._canvas = canvas
|
self._canvas = canvas
|
||||||
self._id = id
|
self._id = id
|
||||||
self._param = param
|
self._param = param
|
||||||
self._param.check()
|
self._param.check()
|
||||||
|
|
||||||
|
def get_dependent_components(self):
|
||||||
|
cpnts = set([para["component_id"].split("@")[0] for para in self._param.query \
|
||||||
|
if para.get("component_id") \
|
||||||
|
and para["component_id"].lower().find("answer") < 0 \
|
||||||
|
and para["component_id"].lower().find("begin") < 0])
|
||||||
|
return list(cpnts)
|
||||||
|
|
||||||
def run(self, history, **kwargs):
|
def run(self, history, **kwargs):
|
||||||
flow_logger.info("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
logging.debug("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||||
json.dumps(kwargs, ensure_ascii=False)))
|
json.dumps(kwargs, ensure_ascii=False)))
|
||||||
|
self._param.debug_inputs = []
|
||||||
try:
|
try:
|
||||||
res = self._run(history, **kwargs)
|
res = self._run(history, **kwargs)
|
||||||
self.set_output(res)
|
self.set_output(res)
|
||||||
@ -413,75 +434,169 @@ class ComponentBase(ABC):
|
|||||||
|
|
||||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||||
o = getattr(self._param, self._param.output_var_name)
|
o = getattr(self._param, self._param.output_var_name)
|
||||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
if not isinstance(o, partial):
|
||||||
if not isinstance(o, list): o = [o]
|
if not isinstance(o, pd.DataFrame):
|
||||||
o = pd.DataFrame(o)
|
if isinstance(o, list):
|
||||||
|
return self._param.output_var_name, pd.DataFrame(o).dropna()
|
||||||
|
if o is None:
|
||||||
|
return self._param.output_var_name, pd.DataFrame()
|
||||||
|
return self._param.output_var_name, pd.DataFrame([{"content": str(o)}])
|
||||||
|
return self._param.output_var_name, o
|
||||||
|
|
||||||
if allow_partial or not isinstance(o, partial):
|
if allow_partial or not isinstance(o, partial):
|
||||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||||
return pd.DataFrame(o if isinstance(o, list) else [o])
|
return pd.DataFrame(o if isinstance(o, list) else [o]).dropna()
|
||||||
return self._param.output_var_name, o
|
return self._param.output_var_name, o
|
||||||
|
|
||||||
outs = None
|
outs = None
|
||||||
for oo in o():
|
for oo in o():
|
||||||
if not isinstance(oo, pd.DataFrame):
|
if not isinstance(oo, pd.DataFrame):
|
||||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo]).dropna()
|
||||||
else: outs = oo
|
else:
|
||||||
|
outs = oo.dropna()
|
||||||
return self._param.output_var_name, outs
|
return self._param.output_var_name, outs
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
setattr(self._param, self._param.output_var_name, None)
|
setattr(self._param, self._param.output_var_name, None)
|
||||||
|
self._param.inputs = []
|
||||||
|
|
||||||
def set_output(self, v: pd.DataFrame):
|
def set_output(self, v):
|
||||||
setattr(self._param, self._param.output_var_name, v)
|
setattr(self._param, self._param.output_var_name, v)
|
||||||
|
|
||||||
|
def set_infor(self, v):
|
||||||
|
setattr(self._param, self._param.infor_var_name, v)
|
||||||
|
|
||||||
|
def _fetch_outputs_from(self, sources: list[dict[str, Any]]) -> list[pd.DataFrame]:
|
||||||
|
outs = []
|
||||||
|
for q in sources:
|
||||||
|
if q.get("component_id"):
|
||||||
|
if "@" in q["component_id"] and q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||||
|
cpn_id, key = q["component_id"].split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] == key:
|
||||||
|
outs.append(pd.DataFrame([{"content": p.get("value", "")}]))
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||||
|
continue
|
||||||
|
|
||||||
|
if q["component_id"].lower().find("answer") == 0:
|
||||||
|
txt = []
|
||||||
|
for r, c in self._canvas.history[::-1][:self._param.message_history_window_size][::-1]:
|
||||||
|
txt.append(f"{r.upper()}:{c}")
|
||||||
|
txt = "\n".join(txt)
|
||||||
|
outs.append(pd.DataFrame([{"content": txt}]))
|
||||||
|
continue
|
||||||
|
|
||||||
|
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||||
|
elif q.get("value"):
|
||||||
|
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||||
|
return outs
|
||||||
def get_input(self):
|
def get_input(self):
|
||||||
upstream_outs = []
|
if self._param.debug_inputs:
|
||||||
|
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs if v.get("value")])
|
||||||
|
|
||||||
reversed_cpnts = []
|
reversed_cpnts = []
|
||||||
if len(self._canvas.path) > 1:
|
if len(self._canvas.path) > 1:
|
||||||
reversed_cpnts.extend(self._canvas.path[-2])
|
reversed_cpnts.extend(self._canvas.path[-2])
|
||||||
reversed_cpnts.extend(self._canvas.path[-1])
|
reversed_cpnts.extend(self._canvas.path[-1])
|
||||||
|
up_cpns = self.get_upstream()
|
||||||
|
reversed_up_cpnts = [cpn for cpn in reversed_cpnts if cpn in up_cpns]
|
||||||
|
|
||||||
if DEBUG: print(self.component_name, reversed_cpnts[::-1])
|
if self._param.query:
|
||||||
for u in reversed_cpnts[::-1]:
|
self._param.inputs = []
|
||||||
if self.get_component_name(u) in ["switch", "concentrator"]: continue
|
outs = self._fetch_outputs_from(self._param.query)
|
||||||
|
|
||||||
|
for out in outs:
|
||||||
|
records = out.to_dict("records")
|
||||||
|
content: str
|
||||||
|
|
||||||
|
if len(records) > 1:
|
||||||
|
content = "\n".join(
|
||||||
|
[str(d["content"]) for d in records]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
content = records[0]["content"]
|
||||||
|
|
||||||
|
self._param.inputs.append({
|
||||||
|
"component_id": records[0].get("component_id"),
|
||||||
|
"content": content
|
||||||
|
})
|
||||||
|
|
||||||
|
if outs:
|
||||||
|
df = pd.concat(outs, ignore_index=True)
|
||||||
|
if "content" in df:
|
||||||
|
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||||
|
return df
|
||||||
|
|
||||||
|
upstream_outs = []
|
||||||
|
|
||||||
|
for u in reversed_up_cpnts[::-1]:
|
||||||
|
if self.get_component_name(u) in ["switch", "concentrator"]:
|
||||||
|
continue
|
||||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||||
if o is not None:
|
if o is not None:
|
||||||
|
o["component_id"] = u
|
||||||
upstream_outs.append(o)
|
upstream_outs.append(o)
|
||||||
continue
|
continue
|
||||||
if u not in self._canvas.get_component(self._id)["upstream"]: continue
|
#if self.component_name.lower()!="answer" and u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||||
if self.component_name.lower().find("switch") < 0 \
|
if self.component_name.lower().find("switch") < 0 \
|
||||||
and self.get_component_name(u) in ["relevant", "categorize"]:
|
and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||||
continue
|
continue
|
||||||
if u.lower().find("answer") >= 0:
|
if u.lower().find("answer") >= 0:
|
||||||
for r, c in self._canvas.history[::-1]:
|
for r, c in self._canvas.history[::-1]:
|
||||||
if r == "user":
|
if r == "user":
|
||||||
upstream_outs.append(pd.DataFrame([{"content": c}]))
|
upstream_outs.append(pd.DataFrame([{"content": c, "component_id": u}]))
|
||||||
break
|
break
|
||||||
break
|
break
|
||||||
if self.component_name.lower().find("answer") >= 0 and self.get_component_name(u) in ["relevant"]:
|
if self.component_name.lower().find("answer") >= 0 and self.get_component_name(u) in ["relevant"]:
|
||||||
continue
|
continue
|
||||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||||
if o is not None:
|
if o is not None:
|
||||||
|
o["component_id"] = u
|
||||||
upstream_outs.append(o)
|
upstream_outs.append(o)
|
||||||
break
|
break
|
||||||
|
|
||||||
if upstream_outs:
|
assert upstream_outs, "Can't inference the where the component input is. Please identify whose output is this component's input."
|
||||||
df = pd.concat(upstream_outs, ignore_index=True)
|
|
||||||
if "content" in df:
|
df = pd.concat(upstream_outs, ignore_index=True)
|
||||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
if "content" in df:
|
||||||
return df
|
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||||
return pd.DataFrame(self._canvas.get_history(3)[-1:])
|
|
||||||
|
self._param.inputs = []
|
||||||
|
for _, r in df.iterrows():
|
||||||
|
self._param.inputs.append({"component_id": r["component_id"], "content": r["content"]})
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
def get_input_elements(self):
|
||||||
|
assert self._param.query, "Please verify the input parameters first."
|
||||||
|
eles = []
|
||||||
|
for q in self._param.query:
|
||||||
|
if q.get("component_id"):
|
||||||
|
cpn_id = q["component_id"]
|
||||||
|
if cpn_id.split("@")[0].lower().find("begin") >= 0:
|
||||||
|
cpn_id, key = cpn_id.split("@")
|
||||||
|
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||||
|
continue
|
||||||
|
|
||||||
|
eles.append({"name": self._canvas.get_component_name(cpn_id), "key": cpn_id})
|
||||||
|
else:
|
||||||
|
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||||
|
return eles
|
||||||
|
|
||||||
def get_stream_input(self):
|
def get_stream_input(self):
|
||||||
reversed_cpnts = []
|
reversed_cpnts = []
|
||||||
if len(self._canvas.path) > 1:
|
if len(self._canvas.path) > 1:
|
||||||
reversed_cpnts.extend(self._canvas.path[-2])
|
reversed_cpnts.extend(self._canvas.path[-2])
|
||||||
reversed_cpnts.extend(self._canvas.path[-1])
|
reversed_cpnts.extend(self._canvas.path[-1])
|
||||||
|
up_cpns = self.get_upstream()
|
||||||
|
reversed_up_cpnts = [cpn for cpn in reversed_cpnts if cpn in up_cpns]
|
||||||
|
|
||||||
for u in reversed_cpnts[::-1]:
|
for u in reversed_up_cpnts[::-1]:
|
||||||
if self.get_component_name(u) in ["switch", "answer"]: continue
|
if self.get_component_name(u) in ["switch", "answer"]:
|
||||||
|
continue
|
||||||
return self._canvas.get_component(u)["obj"].output()[1]
|
return self._canvas.get_component(u)["obj"].output()[1]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -490,3 +605,14 @@ class ComponentBase(ABC):
|
|||||||
|
|
||||||
def get_component_name(self, cpn_id):
|
def get_component_name(self, cpn_id):
|
||||||
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
return self._run([], **kwargs)
|
||||||
|
|
||||||
|
def get_parent(self):
|
||||||
|
pid = self._canvas.get_component(self._id)["parent_id"]
|
||||||
|
return self._canvas.get_component(pid)["obj"]
|
||||||
|
|
||||||
|
def get_upstream(self):
|
||||||
|
cpn_nms = self._canvas.get_component(self._id)['upstream']
|
||||||
|
return cpn_nms
|
||||||
|
|||||||
@ -26,6 +26,7 @@ class BeginParam(ComponentParamBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
||||||
|
self.query = []
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
return True
|
return True
|
||||||
@ -42,7 +43,7 @@ class Begin(ComponentBase):
|
|||||||
def stream_output(self):
|
def stream_output(self):
|
||||||
res = {"content": self._param.prologue}
|
res = {"content": self._param.prologue}
|
||||||
yield res
|
yield res
|
||||||
self.set_output(res)
|
self.set_output(self.be_output(res))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -13,13 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import requests
|
import requests
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class BingParam(ComponentParamBase):
|
class BingParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
Define the Bing component parameters.
|
Define the Bing component parameters.
|
||||||
@ -81,5 +80,5 @@ class Bing(ComponentBase, ABC):
|
|||||||
return Bing.be_output("")
|
return Bing.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(bing_res)
|
df = pd.DataFrame(bing_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {str(df)}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,11 +13,11 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from agent.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
from agent.settings import DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class CategorizeParam(GenerateParam):
|
class CategorizeParam(GenerateParam):
|
||||||
@ -34,36 +34,46 @@ class CategorizeParam(GenerateParam):
|
|||||||
super().check()
|
super().check()
|
||||||
self.check_empty(self.category_description, "[Categorize] Category examples")
|
self.check_empty(self.category_description, "[Categorize] Category examples")
|
||||||
for k, v in self.category_description.items():
|
for k, v in self.category_description.items():
|
||||||
if not k: raise ValueError(f"[Categorize] Category name can not be empty!")
|
if not k:
|
||||||
if not v.get("to"): raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
raise ValueError("[Categorize] Category name can not be empty!")
|
||||||
|
if not v.get("to"):
|
||||||
|
raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||||
|
|
||||||
def get_prompt(self):
|
def get_prompt(self, chat_hist):
|
||||||
cate_lines = []
|
cate_lines = []
|
||||||
for c, desc in self.category_description.items():
|
for c, desc in self.category_description.items():
|
||||||
for l in desc.get("examples", "").split("\n"):
|
for line in desc.get("examples", "").split("\n"):
|
||||||
if not l: continue
|
if not line:
|
||||||
cate_lines.append("Question: {}\tCategory: {}".format(l, c))
|
continue
|
||||||
|
cate_lines.append("USER: {}\nCategory: {}".format(line, c))
|
||||||
descriptions = []
|
descriptions = []
|
||||||
for c, desc in self.category_description.items():
|
for c, desc in self.category_description.items():
|
||||||
if desc.get("description"):
|
if desc.get("description"):
|
||||||
descriptions.append(
|
descriptions.append(
|
||||||
"--------------------\nCategory: {}\nDescription: {}\n".format(c, desc["description"]))
|
"\nCategory: {}\nDescription: {}".format(c, desc["description"]))
|
||||||
|
|
||||||
self.prompt = """
|
self.prompt = """
|
||||||
You're a text classifier. You need to categorize the user’s questions into {} categories,
|
Role: You're a text classifier.
|
||||||
namely: {}
|
Task: You need to categorize the user’s questions into {} categories, namely: {}
|
||||||
Here's description of each category:
|
|
||||||
{}
|
|
||||||
|
|
||||||
You could learn from the following examples:
|
Here's description of each category:
|
||||||
{}
|
{}
|
||||||
You could learn from the above examples.
|
|
||||||
Just mention the category names, no need for any additional words.
|
You could learn from the following examples:
|
||||||
|
{}
|
||||||
|
You could learn from the above examples.
|
||||||
|
|
||||||
|
Requirements:
|
||||||
|
- Just mention the category names, no need for any additional words.
|
||||||
|
|
||||||
|
---- Real Data ----
|
||||||
|
USER: {}\n
|
||||||
""".format(
|
""".format(
|
||||||
len(self.category_description.keys()),
|
len(self.category_description.keys()),
|
||||||
"/".join(list(self.category_description.keys())),
|
"/".join(list(self.category_description.keys())),
|
||||||
"\n".join(descriptions),
|
"\n".join(descriptions),
|
||||||
"- ".join(cate_lines)
|
"\n\n- ".join(cate_lines),
|
||||||
|
chat_hist
|
||||||
)
|
)
|
||||||
return self.prompt
|
return self.prompt
|
||||||
|
|
||||||
@ -73,15 +83,28 @@ class Categorize(Generate, ABC):
|
|||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
input = self.get_input()
|
input = self.get_input()
|
||||||
input = "Question: " + (list(input["content"])[-1] if "content" in input else "") + "\tCategory: "
|
input = " - ".join(input["content"]) if "content" in input else ""
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
self._canvas.set_component_infor(self._id, {"prompt":self._param.get_prompt(input),"messages": [{"role": "user", "content": "\nCategory: "}],"conf": self._param.gen_conf()})
|
||||||
|
|
||||||
|
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::", input)
|
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||||
|
# Count the number of times each category appears in the answer.
|
||||||
|
category_counts = {}
|
||||||
for c in self._param.category_description.keys():
|
for c in self._param.category_description.keys():
|
||||||
if ans.lower().find(c.lower()) >= 0:
|
count = ans.lower().count(c.lower())
|
||||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
category_counts[c] = count
|
||||||
|
|
||||||
|
# If a category is found, return the category with the highest count.
|
||||||
|
if any(category_counts.values()):
|
||||||
|
max_category = max(category_counts.items(), key=lambda x: x[1])
|
||||||
|
return Categorize.be_output(self._param.category_description[max_category[0]]["to"])
|
||||||
|
|
||||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
df = self._run([], **kwargs)
|
||||||
|
cpn_id = df.iloc[0, 0]
|
||||||
|
return Categorize.be_output(self._canvas.get_component_name(cpn_id))
|
||||||
|
|
||||||
|
|||||||
@ -1,75 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
from abc import ABC
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
from api.db import LLMType
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
|
||||||
from api.db.services.llm_service import LLMBundle
|
|
||||||
from api.settings import retrievaler
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
|
||||||
|
|
||||||
|
|
||||||
class CiteParam(ComponentParamBase):
|
|
||||||
|
|
||||||
"""
|
|
||||||
Define the Retrieval component parameters.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.cite_sources = []
|
|
||||||
|
|
||||||
def check(self):
|
|
||||||
self.check_empty(self.cite_source, "Please specify where you want to cite from.")
|
|
||||||
|
|
||||||
|
|
||||||
class Cite(ComponentBase, ABC):
|
|
||||||
component_name = "Cite"
|
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
|
||||||
input = "\n- ".join(self.get_input()["content"])
|
|
||||||
sources = [self._canvas.get_component(cpn_id).output()[1] for cpn_id in self._param.cite_source]
|
|
||||||
query = []
|
|
||||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
|
||||||
if role != "user":continue
|
|
||||||
query.append(cnt)
|
|
||||||
query = "\n".join(query)
|
|
||||||
|
|
||||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
|
||||||
if not kbs:
|
|
||||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
|
||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
|
||||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
|
||||||
|
|
||||||
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
|
||||||
|
|
||||||
rerank_mdl = None
|
|
||||||
if self._param.rerank_id:
|
|
||||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
|
||||||
|
|
||||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
|
||||||
1, self._param.top_n,
|
|
||||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
|
||||||
aggs=False, rerank_mdl=rerank_mdl)
|
|
||||||
|
|
||||||
if not kbinfos["chunks"]: return pd.DataFrame()
|
|
||||||
df = pd.DataFrame(kbinfos["chunks"])
|
|
||||||
df["content"] = df["content_with_weight"]
|
|
||||||
del df["content_with_weight"]
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
138
agent/component/code.py
Normal file
138
agent/component/code.py
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import base64
|
||||||
|
from abc import ABC
|
||||||
|
from enum import Enum
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field, field_validator
|
||||||
|
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from api import settings
|
||||||
|
|
||||||
|
|
||||||
|
class Language(str, Enum):
|
||||||
|
PYTHON = "python"
|
||||||
|
NODEJS = "nodejs"
|
||||||
|
|
||||||
|
|
||||||
|
class CodeExecutionRequest(BaseModel):
|
||||||
|
code_b64: str = Field(..., description="Base64 encoded code string")
|
||||||
|
language: Language = Field(default=Language.PYTHON, description="Programming language")
|
||||||
|
arguments: Optional[dict] = Field(default={}, description="Arguments")
|
||||||
|
|
||||||
|
@field_validator("code_b64")
|
||||||
|
@classmethod
|
||||||
|
def validate_base64(cls, v: str) -> str:
|
||||||
|
try:
|
||||||
|
base64.b64decode(v, validate=True)
|
||||||
|
return v
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Invalid base64 encoding: {str(e)}")
|
||||||
|
|
||||||
|
@field_validator("language", mode="before")
|
||||||
|
@classmethod
|
||||||
|
def normalize_language(cls, v) -> str:
|
||||||
|
if isinstance(v, str):
|
||||||
|
low = v.lower()
|
||||||
|
if low in ("python", "python3"):
|
||||||
|
return "python"
|
||||||
|
elif low in ("javascript", "nodejs"):
|
||||||
|
return "nodejs"
|
||||||
|
raise ValueError(f"Unsupported language: {v}")
|
||||||
|
|
||||||
|
|
||||||
|
class CodeParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the code sandbox component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.lang = "python"
|
||||||
|
self.script = ""
|
||||||
|
self.arguments = []
|
||||||
|
self.address = f"http://{settings.SANDBOX_HOST}:9385/run"
|
||||||
|
self.enable_network = True
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_valid_value(self.lang, "Support languages", ["python", "python3", "nodejs", "javascript"])
|
||||||
|
self.check_defined_type(self.enable_network, "Enable network", ["bool"])
|
||||||
|
|
||||||
|
|
||||||
|
class Code(ComponentBase, ABC):
|
||||||
|
component_name = "Code"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
arguments = {}
|
||||||
|
for input in self._param.arguments:
|
||||||
|
if "@" in input["component_id"]:
|
||||||
|
component_id = input["component_id"].split("@")[0]
|
||||||
|
refered_component_key = input["component_id"].split("@")[1]
|
||||||
|
refered_component = self._canvas.get_component(component_id)["obj"]
|
||||||
|
|
||||||
|
for param in refered_component._param.query:
|
||||||
|
if param["key"] == refered_component_key:
|
||||||
|
if "value" in param:
|
||||||
|
arguments[input["name"]] = param["value"]
|
||||||
|
else:
|
||||||
|
cpn = self._canvas.get_component(input["component_id"])["obj"]
|
||||||
|
if cpn.component_name.lower() == "answer":
|
||||||
|
arguments[input["name"]] = self._canvas.get_history(1)[0]["content"]
|
||||||
|
continue
|
||||||
|
_, out = cpn.output(allow_partial=False)
|
||||||
|
if not out.empty:
|
||||||
|
arguments[input["name"]] = "\n".join(out["content"])
|
||||||
|
|
||||||
|
return self._execute_code(
|
||||||
|
language=self._param.lang,
|
||||||
|
code=self._param.script,
|
||||||
|
arguments=arguments,
|
||||||
|
address=self._param.address,
|
||||||
|
enable_network=self._param.enable_network,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _execute_code(self, language: str, code: str, arguments: dict, address: str, enable_network: bool):
|
||||||
|
import requests
|
||||||
|
|
||||||
|
try:
|
||||||
|
code_b64 = self._encode_code(code)
|
||||||
|
code_req = CodeExecutionRequest(code_b64=code_b64, language=language, arguments=arguments).model_dump()
|
||||||
|
except Exception as e:
|
||||||
|
return Code.be_output("**Error**: construct code request error: " + str(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = requests.post(url=address, json=code_req, timeout=10)
|
||||||
|
body = resp.json()
|
||||||
|
if body:
|
||||||
|
stdout = body.get("stdout")
|
||||||
|
stderr = body.get("stderr")
|
||||||
|
return Code.be_output(stdout or stderr)
|
||||||
|
else:
|
||||||
|
return Code.be_output("**Error**: There is no response from sanbox")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return Code.be_output("**Error**: Internal error in sanbox: " + str(e))
|
||||||
|
|
||||||
|
def _encode_code(self, code: str) -> str:
|
||||||
|
return base64.b64encode(code.encode("utf-8")).decode("utf-8")
|
||||||
|
|
||||||
|
def get_input_elements(self):
|
||||||
|
elements = []
|
||||||
|
for input in self._param.arguments:
|
||||||
|
cpn_id = input["component_id"]
|
||||||
|
elements.append({"key": cpn_id, "name": input["name"]})
|
||||||
|
return elements
|
||||||
@ -1,36 +1,36 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class ConcentratorParam(ComponentParamBase):
|
class ConcentratorParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
Define the Concentrator component parameters.
|
Define the Concentrator component parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
class Concentrator(ComponentBase, ABC):
|
class Concentrator(ComponentBase, ABC):
|
||||||
component_name = "Concentrator"
|
component_name = "Concentrator"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
return Concentrator.be_output("")
|
return Concentrator.be_output("")
|
||||||
@ -17,6 +17,7 @@ from abc import ABC
|
|||||||
import asyncio
|
import asyncio
|
||||||
from crawl4ai import AsyncWebCrawler
|
from crawl4ai import AsyncWebCrawler
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from api.utils.web_utils import is_valid_url
|
||||||
|
|
||||||
|
|
||||||
class CrawlerParam(ComponentParamBase):
|
class CrawlerParam(ComponentParamBase):
|
||||||
@ -39,8 +40,8 @@ class Crawler(ComponentBase, ABC):
|
|||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not is_valid_url(ans):
|
||||||
return Crawler.be_output("")
|
return Crawler.be_output("URL not valid")
|
||||||
try:
|
try:
|
||||||
result = asyncio.run(self.get_web(ans))
|
result = asyncio.run(self.get_web(ans))
|
||||||
|
|
||||||
@ -64,7 +65,3 @@ class Crawler(ComponentBase, ABC):
|
|||||||
elif self._param.extract_type == 'content':
|
elif self._param.extract_type == 'content':
|
||||||
result.extracted_content
|
result.extracted_content
|
||||||
return result.markdown
|
return result.markdown
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -14,7 +14,6 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import re
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
import deepl
|
import deepl
|
||||||
|
|
||||||
|
|||||||
@ -13,10 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from duckduckgo_search import DDGS
|
from duckduckgo_search import DDGS
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -62,5 +62,5 @@ class DuckDuckGo(ComponentBase, ABC):
|
|||||||
return DuckDuckGo.be_output("")
|
return DuckDuckGo.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(duck_res)
|
df = pd.DataFrame(duck_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug("df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
141
agent/component/email.py
Normal file
141
agent/component/email.py
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from abc import ABC
|
||||||
|
import json
|
||||||
|
import smtplib
|
||||||
|
import logging
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
from email.mime.multipart import MIMEMultipart
|
||||||
|
from email.header import Header
|
||||||
|
from email.utils import formataddr
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
class EmailParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Email component parameters.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
# Fixed configuration parameters
|
||||||
|
self.smtp_server = "" # SMTP server address
|
||||||
|
self.smtp_port = 465 # SMTP port
|
||||||
|
self.email = "" # Sender email
|
||||||
|
self.password = "" # Email authorization code
|
||||||
|
self.sender_name = "" # Sender name
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
# Check required parameters
|
||||||
|
self.check_empty(self.smtp_server, "SMTP Server")
|
||||||
|
self.check_empty(self.email, "Email")
|
||||||
|
self.check_empty(self.password, "Password")
|
||||||
|
self.check_empty(self.sender_name, "Sender Name")
|
||||||
|
|
||||||
|
class Email(ComponentBase, ABC):
|
||||||
|
component_name = "Email"
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
# Get upstream component output and parse JSON
|
||||||
|
ans = self.get_input()
|
||||||
|
content = "".join(ans["content"]) if "content" in ans else ""
|
||||||
|
if not content:
|
||||||
|
return Email.be_output("No content to send")
|
||||||
|
|
||||||
|
success = False
|
||||||
|
try:
|
||||||
|
# Parse JSON string passed from upstream
|
||||||
|
email_data = json.loads(content)
|
||||||
|
|
||||||
|
# Validate required fields
|
||||||
|
if "to_email" not in email_data:
|
||||||
|
return Email.be_output("Missing required field: to_email")
|
||||||
|
|
||||||
|
# Create email object
|
||||||
|
msg = MIMEMultipart('alternative')
|
||||||
|
|
||||||
|
# Properly handle sender name encoding
|
||||||
|
msg['From'] = formataddr((str(Header(self._param.sender_name,'utf-8')), self._param.email))
|
||||||
|
msg['To'] = email_data["to_email"]
|
||||||
|
if "cc_email" in email_data and email_data["cc_email"]:
|
||||||
|
msg['Cc'] = email_data["cc_email"]
|
||||||
|
msg['Subject'] = Header(email_data.get("subject", "No Subject"), 'utf-8').encode()
|
||||||
|
|
||||||
|
# Use content from email_data or default content
|
||||||
|
email_content = email_data.get("content", "No content provided")
|
||||||
|
# msg.attach(MIMEText(email_content, 'plain', 'utf-8'))
|
||||||
|
msg.attach(MIMEText(email_content, 'html', 'utf-8'))
|
||||||
|
|
||||||
|
# Connect to SMTP server and send
|
||||||
|
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
||||||
|
|
||||||
|
context = smtplib.ssl.create_default_context()
|
||||||
|
with smtplib.SMTP(self._param.smtp_server, self._param.smtp_port) as server:
|
||||||
|
server.ehlo()
|
||||||
|
server.starttls(context=context)
|
||||||
|
server.ehlo()
|
||||||
|
# Login
|
||||||
|
logging.info(f"Attempting to login with email: {self._param.email}")
|
||||||
|
server.login(self._param.email, self._param.password)
|
||||||
|
|
||||||
|
# Get all recipient list
|
||||||
|
recipients = [email_data["to_email"]]
|
||||||
|
if "cc_email" in email_data and email_data["cc_email"]:
|
||||||
|
recipients.extend(email_data["cc_email"].split(','))
|
||||||
|
|
||||||
|
# Send email
|
||||||
|
logging.info(f"Sending email to recipients: {recipients}")
|
||||||
|
try:
|
||||||
|
server.send_message(msg, self._param.email, recipients)
|
||||||
|
success = True
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error during send_message: {str(e)}")
|
||||||
|
# Try alternative method
|
||||||
|
server.sendmail(self._param.email, recipients, msg.as_string())
|
||||||
|
success = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
server.quit()
|
||||||
|
except Exception as e:
|
||||||
|
# Ignore errors when closing connection
|
||||||
|
logging.warning(f"Non-fatal error during connection close: {str(e)}")
|
||||||
|
|
||||||
|
if success:
|
||||||
|
return Email.be_output("Email sent successfully")
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
error_msg = "Invalid JSON format in input"
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(error_msg)
|
||||||
|
|
||||||
|
except smtplib.SMTPAuthenticationError:
|
||||||
|
error_msg = "SMTP Authentication failed. Please check your email and authorization code."
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||||
|
|
||||||
|
except smtplib.SMTPConnectError:
|
||||||
|
error_msg = f"Failed to connect to SMTP server {self._param.smtp_server}:{self._param.smtp_port}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||||
|
|
||||||
|
except smtplib.SMTPException as e:
|
||||||
|
error_msg = f"SMTP error occurred: {str(e)}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = f"Unexpected error: {str(e)}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||||
@ -15,13 +15,17 @@
|
|||||||
#
|
#
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import re
|
import re
|
||||||
|
from copy import deepcopy
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pymysql
|
import pymysql
|
||||||
import psycopg2
|
import psycopg2
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component import GenerateParam, Generate
|
||||||
|
import pyodbc
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
class ExeSQLParam(ComponentParamBase):
|
class ExeSQLParam(GenerateParam):
|
||||||
"""
|
"""
|
||||||
Define the ExeSQL component parameters.
|
Define the ExeSQL component parameters.
|
||||||
"""
|
"""
|
||||||
@ -38,7 +42,8 @@ class ExeSQLParam(ComponentParamBase):
|
|||||||
self.top_n = 30
|
self.top_n = 30
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb'])
|
super().check()
|
||||||
|
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb', 'mssql'])
|
||||||
self.check_empty(self.database, "Database name")
|
self.check_empty(self.database, "Database name")
|
||||||
self.check_empty(self.username, "database username")
|
self.check_empty(self.username, "database username")
|
||||||
self.check_empty(self.host, "IP Address")
|
self.check_empty(self.host, "IP Address")
|
||||||
@ -46,58 +51,104 @@ class ExeSQLParam(ComponentParamBase):
|
|||||||
self.check_empty(self.password, "Database password")
|
self.check_empty(self.password, "Database password")
|
||||||
self.check_positive_integer(self.top_n, "Number of records")
|
self.check_positive_integer(self.top_n, "Number of records")
|
||||||
if self.database == "rag_flow":
|
if self.database == "rag_flow":
|
||||||
if self.host == "ragflow-mysql": raise ValueError("The host is not accessible.")
|
if self.host == "ragflow-mysql":
|
||||||
if self.password == "infini_rag_flow": raise ValueError("The host is not accessible.")
|
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||||
|
if self.password == "infini_rag_flow":
|
||||||
|
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||||
|
|
||||||
|
|
||||||
class ExeSQL(ComponentBase, ABC):
|
class ExeSQL(Generate, ABC):
|
||||||
component_name = "ExeSQL"
|
component_name = "ExeSQL"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _refactor(self, ans):
|
||||||
if not hasattr(self, "_loop"):
|
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||||
setattr(self, "_loop", 0)
|
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||||
if self._loop >= self._param.loop:
|
if match:
|
||||||
self._loop = 0
|
ans = match.group(1) # Query content
|
||||||
raise Exception("Maximum loop time exceeds. Can't query the correct data via SQL statement.")
|
return ans
|
||||||
self._loop += 1
|
else:
|
||||||
|
print("no markdown")
|
||||||
ans = self.get_input()
|
ans = re.sub(r'^.*?SELECT ', 'SELECT ', (ans), flags=re.IGNORECASE)
|
||||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
|
||||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
|
||||||
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
||||||
ans = re.sub(r';[^;]*$', r';', ans)
|
ans = re.sub(r';[^;]*$', r';', ans)
|
||||||
if not ans:
|
if not ans:
|
||||||
raise Exception("SQL statement not found!")
|
raise Exception("SQL statement not found!")
|
||||||
|
return ans
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
ans = self.get_input()
|
||||||
|
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||||
|
ans = self._refactor(ans)
|
||||||
if self._param.db_type in ["mysql", "mariadb"]:
|
if self._param.db_type in ["mysql", "mariadb"]:
|
||||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||||
port=self._param.port, password=self._param.password)
|
port=self._param.port, password=self._param.password)
|
||||||
elif self._param.db_type == 'postgresql':
|
elif self._param.db_type == 'postgresql':
|
||||||
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
|
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
|
||||||
port=self._param.port, password=self._param.password)
|
port=self._param.port, password=self._param.password)
|
||||||
|
elif self._param.db_type == 'mssql':
|
||||||
|
conn_str = (
|
||||||
|
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
||||||
|
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
||||||
|
r'DATABASE=' + self._param.database + ';'
|
||||||
|
r'UID=' + self._param.username + ';'
|
||||||
|
r'PWD=' + self._param.password
|
||||||
|
)
|
||||||
|
db = pyodbc.connect(conn_str)
|
||||||
try:
|
try:
|
||||||
cursor = db.cursor()
|
cursor = db.cursor()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception("Database Connection Failed! \n" + str(e))
|
raise Exception("Database Connection Failed! \n" + str(e))
|
||||||
|
if not hasattr(self, "_loop"):
|
||||||
|
setattr(self, "_loop", 0)
|
||||||
|
self._loop += 1
|
||||||
|
input_list = re.split(r';', ans.replace(r"\n", " "))
|
||||||
sql_res = []
|
sql_res = []
|
||||||
for single_sql in re.split(r';', ans.replace(r"\n", " ")):
|
for i in range(len(input_list)):
|
||||||
if not single_sql:
|
single_sql = input_list[i]
|
||||||
continue
|
while self._loop <= self._param.loop:
|
||||||
try:
|
self._loop += 1
|
||||||
cursor.execute(single_sql)
|
if not single_sql:
|
||||||
if cursor.rowcount == 0:
|
break
|
||||||
sql_res.append({"content": "\nTotal: 0\n No record in the database!"})
|
try:
|
||||||
continue
|
cursor.execute(single_sql)
|
||||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(size=self._param.top_n)])
|
if cursor.rowcount == 0:
|
||||||
single_res.columns = [i[0] for i in cursor.description]
|
sql_res.append({"content": "No record in the database!"})
|
||||||
sql_res.append({"content": "\nTotal: " + str(cursor.rowcount) + "\n" + single_res.to_markdown()})
|
break
|
||||||
except Exception as e:
|
if self._param.db_type == 'mssql':
|
||||||
sql_res.append({"content": "**Error**:" + str(e) + "\nError SQL Statement:" + single_sql})
|
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),
|
||||||
pass
|
columns=[desc[0] for desc in cursor.description])
|
||||||
|
else:
|
||||||
|
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||||
|
single_res.columns = [i[0] for i in cursor.description]
|
||||||
|
sql_res.append({"content": single_res.to_markdown(index=False, floatfmt=".6f")})
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
single_sql = self._regenerate_sql(single_sql, str(e), **kwargs)
|
||||||
|
single_sql = self._refactor(single_sql)
|
||||||
|
if self._loop > self._param.loop:
|
||||||
|
sql_res.append({"content": "Can't query the correct data via SQL statement."})
|
||||||
db.close()
|
db.close()
|
||||||
|
|
||||||
if not sql_res:
|
if not sql_res:
|
||||||
return ExeSQL.be_output("")
|
return ExeSQL.be_output("")
|
||||||
|
|
||||||
return pd.DataFrame(sql_res)
|
return pd.DataFrame(sql_res)
|
||||||
|
|
||||||
|
def _regenerate_sql(self, failed_sql, error_message, **kwargs):
|
||||||
|
prompt = f'''
|
||||||
|
## You are the Repair SQL Statement Helper, please modify the original SQL statement based on the SQL query error report.
|
||||||
|
## The original SQL statement is as follows:{failed_sql}.
|
||||||
|
## The contents of the SQL query error report is as follows:{error_message}.
|
||||||
|
## Answer only the modified SQL statement. Please do not give any explanation, just answer the code.
|
||||||
|
'''
|
||||||
|
self._param.prompt = prompt
|
||||||
|
kwargs_ = deepcopy(kwargs)
|
||||||
|
kwargs_["stream"] = False
|
||||||
|
response = Generate._run(self, [], **kwargs_)
|
||||||
|
try:
|
||||||
|
regenerated_sql = response.loc[0, "content"]
|
||||||
|
return regenerated_sql
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Failed to regenerate SQL: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
return self._run([], **kwargs)
|
||||||
|
|||||||
@ -13,14 +13,30 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from typing import Any
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.dialog_service import message_fit_in
|
from api.db.services.conversation_service import structure_answer
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.settings import retrievaler
|
from api import settings
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from plugin import GlobalPluginManager
|
||||||
|
from plugin.llm_tool_plugin import llm_tool_metadata_to_openai_tool
|
||||||
|
from rag.llm.chat_model import ToolCallSession
|
||||||
|
from rag.prompts import message_fit_in
|
||||||
|
|
||||||
|
|
||||||
|
class LLMToolPluginCallSession(ToolCallSession):
|
||||||
|
def tool_call(self, name: str, arguments: dict[str, Any]) -> str:
|
||||||
|
tool = GlobalPluginManager.get_llm_tool_by_name(name)
|
||||||
|
|
||||||
|
if tool is None:
|
||||||
|
raise ValueError(f"LLM tool {name} does not exist")
|
||||||
|
|
||||||
|
return tool().invoke(**arguments)
|
||||||
|
|
||||||
|
|
||||||
class GenerateParam(ComponentParamBase):
|
class GenerateParam(ComponentParamBase):
|
||||||
@ -39,6 +55,7 @@ class GenerateParam(ComponentParamBase):
|
|||||||
self.frequency_penalty = 0
|
self.frequency_penalty = 0
|
||||||
self.cite = True
|
self.cite = True
|
||||||
self.parameters = []
|
self.parameters = []
|
||||||
|
self.llm_enabled_tools = []
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_decimal_float(self.temperature, "[Generate] Temperature")
|
self.check_decimal_float(self.temperature, "[Generate] Temperature")
|
||||||
@ -51,11 +68,16 @@ class GenerateParam(ComponentParamBase):
|
|||||||
|
|
||||||
def gen_conf(self):
|
def gen_conf(self):
|
||||||
conf = {}
|
conf = {}
|
||||||
if self.max_tokens > 0: conf["max_tokens"] = self.max_tokens
|
if self.max_tokens > 0:
|
||||||
if self.temperature > 0: conf["temperature"] = self.temperature
|
conf["max_tokens"] = self.max_tokens
|
||||||
if self.top_p > 0: conf["top_p"] = self.top_p
|
if self.temperature > 0:
|
||||||
if self.presence_penalty > 0: conf["presence_penalty"] = self.presence_penalty
|
conf["temperature"] = self.temperature
|
||||||
if self.frequency_penalty > 0: conf["frequency_penalty"] = self.frequency_penalty
|
if self.top_p > 0:
|
||||||
|
conf["top_p"] = self.top_p
|
||||||
|
if self.presence_penalty > 0:
|
||||||
|
conf["presence_penalty"] = self.presence_penalty
|
||||||
|
if self.frequency_penalty > 0:
|
||||||
|
conf["frequency_penalty"] = self.frequency_penalty
|
||||||
return conf
|
return conf
|
||||||
|
|
||||||
|
|
||||||
@ -63,60 +85,127 @@ class Generate(ComponentBase):
|
|||||||
component_name = "Generate"
|
component_name = "Generate"
|
||||||
|
|
||||||
def get_dependent_components(self):
|
def get_dependent_components(self):
|
||||||
cpnts = [para["component_id"] for para in self._param.parameters]
|
inputs = self.get_input_elements()
|
||||||
return cpnts
|
cpnts = set([i["key"] for i in inputs[1:] if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||||
|
return list(cpnts)
|
||||||
|
|
||||||
def set_cite(self, retrieval_res, answer):
|
def set_cite(self, retrieval_res, answer):
|
||||||
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
|
||||||
if "empty_response" in retrieval_res.columns:
|
if "empty_response" in retrieval_res.columns:
|
||||||
retrieval_res["empty_response"].fillna("", inplace=True)
|
retrieval_res["empty_response"].fillna("", inplace=True)
|
||||||
answer, idx = retrievaler.insert_citations(answer, [ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
chunks = json.loads(retrieval_res["chunks"][0])
|
||||||
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
answer, idx = settings.retrievaler.insert_citations(answer,
|
||||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
[ck["content_ltks"] for ck in chunks],
|
||||||
self._canvas.get_embedding_model()), tkweight=0.7,
|
[ck["vector"] for ck in chunks],
|
||||||
vtweight=0.3)
|
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||||
|
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||||
|
vtweight=0.3)
|
||||||
doc_ids = set([])
|
doc_ids = set([])
|
||||||
recall_docs = []
|
recall_docs = []
|
||||||
for i in idx:
|
for i in idx:
|
||||||
did = retrieval_res.loc[int(i), "doc_id"]
|
did = chunks[int(i)]["doc_id"]
|
||||||
if did in doc_ids: continue
|
if did in doc_ids:
|
||||||
|
continue
|
||||||
doc_ids.add(did)
|
doc_ids.add(did)
|
||||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
recall_docs.append({"doc_id": did, "doc_name": chunks[int(i)]["docnm_kwd"]})
|
||||||
|
|
||||||
del retrieval_res["vector"]
|
for c in chunks:
|
||||||
del retrieval_res["content_ltks"]
|
del c["vector"]
|
||||||
|
del c["content_ltks"]
|
||||||
|
|
||||||
reference = {
|
reference = {
|
||||||
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
"chunks": chunks,
|
||||||
"doc_aggs": recall_docs
|
"doc_aggs": recall_docs
|
||||||
}
|
}
|
||||||
|
|
||||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||||
res = {"content": answer, "reference": reference}
|
res = {"content": answer, "reference": reference}
|
||||||
|
res = structure_answer(None, res, "", "")
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def get_input_elements(self):
|
||||||
|
key_set = set([])
|
||||||
|
res = [{"key": "user", "name": "Input your question here:"}]
|
||||||
|
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.prompt, flags=re.IGNORECASE):
|
||||||
|
cpn_id = r.group(1)
|
||||||
|
if cpn_id in key_set:
|
||||||
|
continue
|
||||||
|
if cpn_id.lower().find("begin@") == 0:
|
||||||
|
cpn_id, key = cpn_id.split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] != key:
|
||||||
|
continue
|
||||||
|
res.append({"key": r.group(1), "name": p["name"]})
|
||||||
|
key_set.add(r.group(1))
|
||||||
|
continue
|
||||||
|
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||||
|
if not cpn_nm:
|
||||||
|
continue
|
||||||
|
res.append({"key": cpn_id, "name": cpn_nm})
|
||||||
|
key_set.add(cpn_id)
|
||||||
|
return res
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
|
|
||||||
|
if len(self._param.llm_enabled_tools) > 0:
|
||||||
|
tools = GlobalPluginManager.get_llm_tools_by_names(self._param.llm_enabled_tools)
|
||||||
|
|
||||||
|
chat_mdl.bind_tools(
|
||||||
|
LLMToolPluginCallSession(),
|
||||||
|
[llm_tool_metadata_to_openai_tool(t.get_metadata()) for t in tools]
|
||||||
|
)
|
||||||
|
|
||||||
prompt = self._param.prompt
|
prompt = self._param.prompt
|
||||||
|
|
||||||
retrieval_res = self.get_input()
|
retrieval_res = []
|
||||||
input = (" - "+"\n - ".join([c for c in retrieval_res["content"] if isinstance(c, str)])) if "content" in retrieval_res else ""
|
self._param.inputs = []
|
||||||
for para in self._param.parameters:
|
for para in self.get_input_elements()[1:]:
|
||||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
if para["key"].lower().find("begin@") == 0:
|
||||||
|
cpn_id, key = para["key"].split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] == key:
|
||||||
|
kwargs[para["key"]] = p.get("value", "")
|
||||||
|
self._param.inputs.append(
|
||||||
|
{"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||||
|
continue
|
||||||
|
|
||||||
|
component_id = para["key"]
|
||||||
|
cpn = self._canvas.get_component(component_id)["obj"]
|
||||||
if cpn.component_name.lower() == "answer":
|
if cpn.component_name.lower() == "answer":
|
||||||
kwargs[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
hist = self._canvas.get_history(1)
|
||||||
|
if hist:
|
||||||
|
hist = hist[0]["content"]
|
||||||
|
else:
|
||||||
|
hist = ""
|
||||||
|
kwargs[para["key"]] = hist
|
||||||
continue
|
continue
|
||||||
_, out = cpn.output(allow_partial=False)
|
_, out = cpn.output(allow_partial=False)
|
||||||
if "content" not in out.columns:
|
if "content" not in out.columns:
|
||||||
kwargs[para["key"]] = "Nothing"
|
kwargs[para["key"]] = ""
|
||||||
else:
|
else:
|
||||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
if cpn.component_name.lower() == "retrieval":
|
||||||
|
retrieval_res.append(out)
|
||||||
|
kwargs[para["key"]] = " - " + "\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||||
|
self._param.inputs.append({"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||||
|
|
||||||
|
if retrieval_res:
|
||||||
|
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||||
|
else:
|
||||||
|
retrieval_res = pd.DataFrame([])
|
||||||
|
|
||||||
kwargs["input"] = input
|
|
||||||
for n, v in kwargs.items():
|
for n, v in kwargs.items():
|
||||||
prompt = re.sub(r"\{%s\}" % re.escape(n), re.escape(str(v)), prompt)
|
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||||
|
|
||||||
|
if not self._param.inputs and prompt.find("{input}") >= 0:
|
||||||
|
retrieval_res = self.get_input()
|
||||||
|
input = (" - " + "\n - ".join(
|
||||||
|
[c for c in retrieval_res["content"] if isinstance(c, str)])) if "content" in retrieval_res else ""
|
||||||
|
prompt = re.sub(r"\{input\}", re.escape(input), prompt)
|
||||||
|
|
||||||
downstreams = self._canvas.get_component(self._id)["downstream"]
|
downstreams = self._canvas.get_component(self._id)["downstream"]
|
||||||
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
||||||
@ -124,15 +213,20 @@ class Generate(ComponentBase):
|
|||||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||||
|
|
||||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||||
return pd.DataFrame([res])
|
return pd.DataFrame([res])
|
||||||
|
|
||||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
|
if len(msg) < 1:
|
||||||
|
msg.append({"role": "user", "content": "Output: "})
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||||
|
if len(msg) < 2:
|
||||||
|
msg.append({"role": "user", "content": "Output: "})
|
||||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||||
|
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
self._canvas.set_component_infor(self._id, {"prompt":msg[0]["content"],"messages": msg[1:],"conf": self._param.gen_conf()})
|
||||||
|
if self._param.cite and "chunks" in retrieval_res.columns:
|
||||||
res = self.set_cite(retrieval_res, ans)
|
res = self.set_cite(retrieval_res, ans)
|
||||||
return pd.DataFrame([res])
|
return pd.DataFrame([res])
|
||||||
|
|
||||||
@ -141,22 +235,42 @@ class Generate(ComponentBase):
|
|||||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||||
res = None
|
res = None
|
||||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||||
yield res
|
yield res
|
||||||
self.set_output(res)
|
self.set_output(res)
|
||||||
return
|
return
|
||||||
|
|
||||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
|
if msg and msg[0]['role'] == 'assistant':
|
||||||
|
msg.pop(0)
|
||||||
|
if len(msg) < 1:
|
||||||
|
msg.append({"role": "user", "content": "Output: "})
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||||
|
if len(msg) < 2:
|
||||||
|
msg.append({"role": "user", "content": "Output: "})
|
||||||
answer = ""
|
answer = ""
|
||||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||||
res = {"content": ans, "reference": []}
|
res = {"content": ans, "reference": []}
|
||||||
answer = ans
|
answer = ans
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
if self._param.cite and "chunks" in retrieval_res.columns:
|
||||||
res = self.set_cite(retrieval_res, answer)
|
res = self.set_cite(retrieval_res, answer)
|
||||||
yield res
|
yield res
|
||||||
|
self._canvas.set_component_infor(self._id, {"prompt":msg[0]["content"],"messages": msg[1:],"conf": self._param.gen_conf()})
|
||||||
|
self.set_output(Generate.be_output(res))
|
||||||
|
|
||||||
self.set_output(res)
|
def debug(self, **kwargs):
|
||||||
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
|
prompt = self._param.prompt
|
||||||
|
|
||||||
|
for para in self._param.debug_inputs:
|
||||||
|
kwargs[para["key"]] = para.get("value", "")
|
||||||
|
|
||||||
|
for n, v in kwargs.items():
|
||||||
|
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||||
|
|
||||||
|
u = kwargs.get("user")
|
||||||
|
ans = chat_mdl.chat(prompt, [{"role": "user", "content": u if u else "Output: "}], self._param.gen_conf())
|
||||||
|
return pd.DataFrame([ans])
|
||||||
|
|||||||
@ -13,10 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import requests
|
import requests
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -57,5 +57,5 @@ class GitHub(ComponentBase, ABC):
|
|||||||
return GitHub.be_output("")
|
return GitHub.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(github_res)
|
df = pd.DataFrame(github_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,10 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from serpapi import GoogleSearch
|
from serpapi import GoogleSearch
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -85,12 +85,12 @@ class Google(ComponentBase, ABC):
|
|||||||
"hl": self._param.language, "num": self._param.top_n})
|
"hl": self._param.language, "num": self._param.top_n})
|
||||||
google_res = [{"content": '<a href="' + i["link"] + '">' + i["title"] + '</a> ' + i["snippet"]} for i in
|
google_res = [{"content": '<a href="' + i["link"] + '">' + i["title"] + '</a> ' + i["snippet"]} for i in
|
||||||
client.get_dict()["organic_results"]]
|
client.get_dict()["organic_results"]]
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return Google.be_output("**ERROR**: Existing Unavailable Parameters!")
|
return Google.be_output("**ERROR**: Existing Unavailable Parameters!")
|
||||||
|
|
||||||
if not google_res:
|
if not google_res:
|
||||||
return Google.be_output("")
|
return Google.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(google_res)
|
df = pd.DataFrame(google_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,9 +13,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
from scholarly import scholarly
|
from scholarly import scholarly
|
||||||
|
|
||||||
@ -58,13 +58,13 @@ class GoogleScholar(ComponentBase, ABC):
|
|||||||
'pub_url'] + '"></a> ' + "\n author: " + ",".join(pub['bib']['author']) + '\n Abstract: ' + pub[
|
'pub_url'] + '"></a> ' + "\n author: " + ",".join(pub['bib']['author']) + '\n Abstract: ' + pub[
|
||||||
'bib'].get('abstract', 'no abstract')})
|
'bib'].get('abstract', 'no abstract')})
|
||||||
|
|
||||||
except StopIteration or Exception as e:
|
except StopIteration or Exception:
|
||||||
print("**ERROR** " + str(e))
|
logging.exception("GoogleScholar")
|
||||||
break
|
break
|
||||||
|
|
||||||
if not scholar_res:
|
if not scholar_res:
|
||||||
return GoogleScholar.be_output("")
|
return GoogleScholar.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(scholar_res)
|
df = pd.DataFrame(scholar_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -35,12 +35,14 @@ class InvokeParam(ComponentParamBase):
|
|||||||
self.url = ""
|
self.url = ""
|
||||||
self.timeout = 60
|
self.timeout = 60
|
||||||
self.clean_html = False
|
self.clean_html = False
|
||||||
|
self.datatype = "json" # New parameter to determine data posting type
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
||||||
self.check_empty(self.url, "End point URL")
|
self.check_empty(self.url, "End point URL")
|
||||||
self.check_positive_integer(self.timeout, "Timeout time in second")
|
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||||
self.check_boolean(self.clean_html, "Clean HTML")
|
self.check_boolean(self.clean_html, "Clean HTML")
|
||||||
|
self.check_valid_value(self.datatype.lower(), "Data post type", ['json', 'formdata']) # Check for valid datapost value
|
||||||
|
|
||||||
|
|
||||||
class Invoke(ComponentBase, ABC):
|
class Invoke(ComponentBase, ABC):
|
||||||
@ -50,11 +52,24 @@ class Invoke(ComponentBase, ABC):
|
|||||||
args = {}
|
args = {}
|
||||||
for para in self._param.variables:
|
for para in self._param.variables:
|
||||||
if para.get("component_id"):
|
if para.get("component_id"):
|
||||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
if '@' in para["component_id"]:
|
||||||
_, out = cpn.output(allow_partial=False)
|
component = para["component_id"].split('@')[0]
|
||||||
args[para["key"]] = "\n".join(out["content"])
|
field = para["component_id"].split('@')[1]
|
||||||
|
cpn = self._canvas.get_component(component)["obj"]
|
||||||
|
for param in cpn._param.query:
|
||||||
|
if param["key"] == field:
|
||||||
|
if "value" in param:
|
||||||
|
args[para["key"]] = param["value"]
|
||||||
|
else:
|
||||||
|
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||||
|
if cpn.component_name.lower() == "answer":
|
||||||
|
args[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||||
|
continue
|
||||||
|
_, out = cpn.output(allow_partial=False)
|
||||||
|
if not out.empty:
|
||||||
|
args[para["key"]] = "\n".join(out["content"])
|
||||||
else:
|
else:
|
||||||
args[para["key"]] = "\n".join(para["value"])
|
args[para["key"]] = para["value"]
|
||||||
|
|
||||||
url = self._param.url.strip()
|
url = self._param.url.strip()
|
||||||
if url.find("http") != 0:
|
if url.find("http") != 0:
|
||||||
@ -81,22 +96,36 @@ class Invoke(ComponentBase, ABC):
|
|||||||
return Invoke.be_output(response.text)
|
return Invoke.be_output(response.text)
|
||||||
|
|
||||||
if method == 'put':
|
if method == 'put':
|
||||||
response = requests.put(url=url,
|
if self._param.datatype.lower() == 'json':
|
||||||
data=args,
|
response = requests.put(url=url,
|
||||||
headers=headers,
|
json=args,
|
||||||
proxies=proxies,
|
headers=headers,
|
||||||
timeout=self._param.timeout)
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
|
else:
|
||||||
|
response = requests.put(url=url,
|
||||||
|
data=args,
|
||||||
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
if self._param.clean_html:
|
if self._param.clean_html:
|
||||||
sections = HtmlParser()(None, response.content)
|
sections = HtmlParser()(None, response.content)
|
||||||
return Invoke.be_output("\n".join(sections))
|
return Invoke.be_output("\n".join(sections))
|
||||||
return Invoke.be_output(response.text)
|
return Invoke.be_output(response.text)
|
||||||
|
|
||||||
if method == 'post':
|
if method == 'post':
|
||||||
response = requests.post(url=url,
|
if self._param.datatype.lower() == 'json':
|
||||||
json=args,
|
response = requests.post(url=url,
|
||||||
headers=headers,
|
json=args,
|
||||||
proxies=proxies,
|
headers=headers,
|
||||||
timeout=self._param.timeout)
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
|
else:
|
||||||
|
response = requests.post(url=url,
|
||||||
|
data=args,
|
||||||
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
if self._param.clean_html:
|
if self._param.clean_html:
|
||||||
sections = HtmlParser()(None, response.content)
|
sections = HtmlParser()(None, response.content)
|
||||||
return Invoke.be_output("\n".join(sections))
|
return Invoke.be_output("\n".join(sections))
|
||||||
|
|||||||
45
agent/component/iteration.py
Normal file
45
agent/component/iteration.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class IterationParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Iteration component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.delimiter = ","
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_empty(self.delimiter, "Delimiter")
|
||||||
|
|
||||||
|
|
||||||
|
class Iteration(ComponentBase, ABC):
|
||||||
|
component_name = "Iteration"
|
||||||
|
|
||||||
|
def get_start(self):
|
||||||
|
for cid in self._canvas.components.keys():
|
||||||
|
if self._canvas.get_component(cid)["obj"].component_name.lower() != "iterationitem":
|
||||||
|
continue
|
||||||
|
if self._canvas.get_component(cid)["parent_id"] == self._id:
|
||||||
|
return self._canvas.get_component(cid)
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
return self.output(allow_partial=False)[1]
|
||||||
|
|
||||||
53
agent/component/iterationitem.py
Normal file
53
agent/component/iterationitem.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import pandas as pd
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class IterationItemParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the IterationItem component parameters.
|
||||||
|
"""
|
||||||
|
def check(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class IterationItem(ComponentBase, ABC):
|
||||||
|
component_name = "IterationItem"
|
||||||
|
|
||||||
|
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||||
|
super().__init__(canvas, id, param)
|
||||||
|
self._idx = 0
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
parent = self.get_parent()
|
||||||
|
ans = parent.get_input()
|
||||||
|
ans = parent._param.delimiter.join(ans["content"]) if "content" in ans else ""
|
||||||
|
ans = [a.strip() for a in ans.split(parent._param.delimiter)]
|
||||||
|
if not ans:
|
||||||
|
self._idx = -1
|
||||||
|
return pd.DataFrame()
|
||||||
|
|
||||||
|
df = pd.DataFrame([{"content": ans[self._idx]}])
|
||||||
|
self._idx += 1
|
||||||
|
if self._idx >= len(ans):
|
||||||
|
self._idx = -1
|
||||||
|
return df
|
||||||
|
|
||||||
|
def end(self):
|
||||||
|
return self._idx == -1
|
||||||
|
|
||||||
@ -1,130 +1,130 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
import json
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import requests
|
import requests
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class Jin10Param(ComponentParamBase):
|
class Jin10Param(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
Define the Jin10 component parameters.
|
Define the Jin10 component parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.type = "flash"
|
self.type = "flash"
|
||||||
self.secret_key = "xxx"
|
self.secret_key = "xxx"
|
||||||
self.flash_type = '1'
|
self.flash_type = '1'
|
||||||
self.calendar_type = 'cj'
|
self.calendar_type = 'cj'
|
||||||
self.calendar_datatype = 'data'
|
self.calendar_datatype = 'data'
|
||||||
self.symbols_type = 'GOODS'
|
self.symbols_type = 'GOODS'
|
||||||
self.symbols_datatype = 'symbols'
|
self.symbols_datatype = 'symbols'
|
||||||
self.contain = ""
|
self.contain = ""
|
||||||
self.filter = ""
|
self.filter = ""
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.type, "Type", ['flash', 'calendar', 'symbols', 'news'])
|
self.check_valid_value(self.type, "Type", ['flash', 'calendar', 'symbols', 'news'])
|
||||||
self.check_valid_value(self.flash_type, "Flash Type", ['1', '2', '3', '4', '5'])
|
self.check_valid_value(self.flash_type, "Flash Type", ['1', '2', '3', '4', '5'])
|
||||||
self.check_valid_value(self.calendar_type, "Calendar Type", ['cj', 'qh', 'hk', 'us'])
|
self.check_valid_value(self.calendar_type, "Calendar Type", ['cj', 'qh', 'hk', 'us'])
|
||||||
self.check_valid_value(self.calendar_datatype, "Calendar DataType", ['data', 'event', 'holiday'])
|
self.check_valid_value(self.calendar_datatype, "Calendar DataType", ['data', 'event', 'holiday'])
|
||||||
self.check_valid_value(self.symbols_type, "Symbols Type", ['GOODS', 'FOREX', 'FUTURE', 'CRYPTO'])
|
self.check_valid_value(self.symbols_type, "Symbols Type", ['GOODS', 'FOREX', 'FUTURE', 'CRYPTO'])
|
||||||
self.check_valid_value(self.symbols_datatype, 'Symbols DataType', ['symbols', 'quotes'])
|
self.check_valid_value(self.symbols_datatype, 'Symbols DataType', ['symbols', 'quotes'])
|
||||||
|
|
||||||
|
|
||||||
class Jin10(ComponentBase, ABC):
|
class Jin10(ComponentBase, ABC):
|
||||||
component_name = "Jin10"
|
component_name = "Jin10"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not ans:
|
||||||
return Jin10.be_output("")
|
return Jin10.be_output("")
|
||||||
|
|
||||||
jin10_res = []
|
jin10_res = []
|
||||||
headers = {'secret-key': self._param.secret_key}
|
headers = {'secret-key': self._param.secret_key}
|
||||||
try:
|
try:
|
||||||
if self._param.type == "flash":
|
if self._param.type == "flash":
|
||||||
params = {
|
params = {
|
||||||
'category': self._param.flash_type,
|
'category': self._param.flash_type,
|
||||||
'contain': self._param.contain,
|
'contain': self._param.contain,
|
||||||
'filter': self._param.filter
|
'filter': self._param.filter
|
||||||
}
|
}
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
url='https://open-data-api.jin10.com/data-api/flash?category=' + self._param.flash_type,
|
url='https://open-data-api.jin10.com/data-api/flash?category=' + self._param.flash_type,
|
||||||
headers=headers, data=json.dumps(params))
|
headers=headers, data=json.dumps(params))
|
||||||
response = response.json()
|
response = response.json()
|
||||||
for i in response['data']:
|
for i in response['data']:
|
||||||
jin10_res.append({"content": i['data']['content']})
|
jin10_res.append({"content": i['data']['content']})
|
||||||
if self._param.type == "calendar":
|
if self._param.type == "calendar":
|
||||||
params = {
|
params = {
|
||||||
'category': self._param.calendar_type
|
'category': self._param.calendar_type
|
||||||
}
|
}
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
url='https://open-data-api.jin10.com/data-api/calendar/' + self._param.calendar_datatype + '?category=' + self._param.calendar_type,
|
url='https://open-data-api.jin10.com/data-api/calendar/' + self._param.calendar_datatype + '?category=' + self._param.calendar_type,
|
||||||
headers=headers, data=json.dumps(params))
|
headers=headers, data=json.dumps(params))
|
||||||
|
|
||||||
response = response.json()
|
response = response.json()
|
||||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||||
if self._param.type == "symbols":
|
if self._param.type == "symbols":
|
||||||
params = {
|
params = {
|
||||||
'type': self._param.symbols_type
|
'type': self._param.symbols_type
|
||||||
}
|
}
|
||||||
if self._param.symbols_datatype == "quotes":
|
if self._param.symbols_datatype == "quotes":
|
||||||
params['codes'] = 'BTCUSD'
|
params['codes'] = 'BTCUSD'
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
||||||
headers=headers, data=json.dumps(params))
|
headers=headers, data=json.dumps(params))
|
||||||
response = response.json()
|
response = response.json()
|
||||||
if self._param.symbols_datatype == "symbols":
|
if self._param.symbols_datatype == "symbols":
|
||||||
for i in response['data']:
|
for i in response['data']:
|
||||||
i['Commodity Code'] = i['c']
|
i['Commodity Code'] = i['c']
|
||||||
i['Stock Exchange'] = i['e']
|
i['Stock Exchange'] = i['e']
|
||||||
i['Commodity Name'] = i['n']
|
i['Commodity Name'] = i['n']
|
||||||
i['Commodity Type'] = i['t']
|
i['Commodity Type'] = i['t']
|
||||||
del i['c'], i['e'], i['n'], i['t']
|
del i['c'], i['e'], i['n'], i['t']
|
||||||
if self._param.symbols_datatype == "quotes":
|
if self._param.symbols_datatype == "quotes":
|
||||||
for i in response['data']:
|
for i in response['data']:
|
||||||
i['Selling Price'] = i['a']
|
i['Selling Price'] = i['a']
|
||||||
i['Buying Price'] = i['b']
|
i['Buying Price'] = i['b']
|
||||||
i['Commodity Code'] = i['c']
|
i['Commodity Code'] = i['c']
|
||||||
i['Stock Exchange'] = i['e']
|
i['Stock Exchange'] = i['e']
|
||||||
i['Highest Price'] = i['h']
|
i['Highest Price'] = i['h']
|
||||||
i['Yesterday’s Closing Price'] = i['hc']
|
i['Yesterday’s Closing Price'] = i['hc']
|
||||||
i['Lowest Price'] = i['l']
|
i['Lowest Price'] = i['l']
|
||||||
i['Opening Price'] = i['o']
|
i['Opening Price'] = i['o']
|
||||||
i['Latest Price'] = i['p']
|
i['Latest Price'] = i['p']
|
||||||
i['Market Quote Time'] = i['t']
|
i['Market Quote Time'] = i['t']
|
||||||
del i['a'], i['b'], i['c'], i['e'], i['h'], i['hc'], i['l'], i['o'], i['p'], i['t']
|
del i['a'], i['b'], i['c'], i['e'], i['h'], i['hc'], i['l'], i['o'], i['p'], i['t']
|
||||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||||
if self._param.type == "news":
|
if self._param.type == "news":
|
||||||
params = {
|
params = {
|
||||||
'contain': self._param.contain,
|
'contain': self._param.contain,
|
||||||
'filter': self._param.filter
|
'filter': self._param.filter
|
||||||
}
|
}
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
url='https://open-data-api.jin10.com/data-api/news',
|
url='https://open-data-api.jin10.com/data-api/news',
|
||||||
headers=headers, data=json.dumps(params))
|
headers=headers, data=json.dumps(params))
|
||||||
response = response.json()
|
response = response.json()
|
||||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return Jin10.be_output("**ERROR**: " + str(e))
|
return Jin10.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
if not jin10_res:
|
if not jin10_res:
|
||||||
return Jin10.be_output("")
|
return Jin10.be_output("")
|
||||||
|
|
||||||
return pd.DataFrame(jin10_res)
|
return pd.DataFrame(jin10_res)
|
||||||
|
|||||||
@ -13,12 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import re
|
import re
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from agent.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
from agent.settings import DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class KeywordExtractParam(GenerateParam):
|
class KeywordExtractParam(GenerateParam):
|
||||||
@ -50,16 +50,23 @@ class KeywordExtract(Generate, ABC):
|
|||||||
component_name = "KeywordExtract"
|
component_name = "KeywordExtract"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
q = ""
|
query = self.get_input()
|
||||||
for r, c in self._canvas.history[::-1]:
|
if hasattr(query, "to_dict") and "content" in query:
|
||||||
if r == "user":
|
query = ", ".join(map(str, query["content"].dropna()))
|
||||||
q += c
|
else:
|
||||||
break
|
query = str(query)
|
||||||
|
|
||||||
|
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
self._canvas.set_component_infor(self._id, {"prompt":self._param.get_prompt(),"messages": [{"role": "user", "content": query}],"conf": self._param.gen_conf()})
|
||||||
|
|
||||||
|
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": query}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
|
|
||||||
|
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"ans: {ans}")
|
||||||
return KeywordExtract.be_output(ans)
|
return KeywordExtract.be_output(ans)
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
return self._run([], **kwargs)
|
||||||
|
|||||||
@ -13,12 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from Bio import Entrez
|
from Bio import Entrez
|
||||||
import re
|
import re
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -65,5 +65,5 @@ class PubMed(ComponentBase, ABC):
|
|||||||
return PubMed.be_output("")
|
return PubMed.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(pubmed_res)
|
df = pd.DataFrame(pubmed_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
@ -70,11 +71,13 @@ class Relevant(Generate, ABC):
|
|||||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
|
|
||||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
logging.debug(ans)
|
||||||
if ans.lower().find("yes") >= 0:
|
if ans.lower().find("yes") >= 0:
|
||||||
return Relevant.be_output(self._param.yes)
|
return Relevant.be_output(self._param.yes)
|
||||||
if ans.lower().find("no") >= 0:
|
if ans.lower().find("no") >= 0:
|
||||||
return Relevant.be_output(self._param.no)
|
return Relevant.be_output(self._param.no)
|
||||||
assert False, f"Relevant component got: {ans}"
|
assert False, f"Relevant component got: {ans}"
|
||||||
|
|
||||||
|
def debug(self, **kwargs):
|
||||||
|
return self._run([], **kwargs)
|
||||||
|
|
||||||
|
|||||||
@ -13,6 +13,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
@ -20,15 +23,18 @@ import pandas as pd
|
|||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.settings import retrievaler
|
from api import settings
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from rag.app.tag import label_question
|
||||||
|
from rag.prompts import kb_prompt
|
||||||
|
from rag.utils.tavily_conn import Tavily
|
||||||
|
|
||||||
|
|
||||||
class RetrievalParam(ComponentParamBase):
|
class RetrievalParam(ComponentParamBase):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Define the Retrieval component parameters.
|
Define the Retrieval component parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.similarity_threshold = 0.2
|
self.similarity_threshold = 0.2
|
||||||
@ -36,12 +42,15 @@ class RetrievalParam(ComponentParamBase):
|
|||||||
self.top_n = 8
|
self.top_n = 8
|
||||||
self.top_k = 1024
|
self.top_k = 1024
|
||||||
self.kb_ids = []
|
self.kb_ids = []
|
||||||
|
self.kb_vars = []
|
||||||
self.rerank_id = ""
|
self.rerank_id = ""
|
||||||
self.empty_response = ""
|
self.empty_response = ""
|
||||||
|
self.tavily_api_key = ""
|
||||||
|
self.use_kg = False
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keyword similarity weight")
|
||||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||||
|
|
||||||
|
|
||||||
@ -51,25 +60,68 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
query = self.get_input()
|
query = self.get_input()
|
||||||
query = str(query["content"][0]) if "content" in query else ""
|
query = str(query["content"][0]) if "content" in query else ""
|
||||||
|
query = re.split(r"(USER:|ASSISTANT:)", query)[-1]
|
||||||
|
|
||||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
kb_ids: list[str] = self._param.kb_ids or []
|
||||||
|
|
||||||
|
kb_vars = self._fetch_outputs_from(self._param.kb_vars)
|
||||||
|
|
||||||
|
if len(kb_vars) > 0:
|
||||||
|
for kb_var in kb_vars:
|
||||||
|
if len(kb_var) == 1:
|
||||||
|
kb_var_value = str(kb_var["content"][0])
|
||||||
|
|
||||||
|
for v in kb_var_value.split(","):
|
||||||
|
kb_ids.append(v)
|
||||||
|
else:
|
||||||
|
for v in kb_var.to_dict("records"):
|
||||||
|
kb_ids.append(v["content"])
|
||||||
|
|
||||||
|
filtered_kb_ids: list[str] = [kb_id for kb_id in kb_ids if kb_id]
|
||||||
|
|
||||||
|
kbs = KnowledgebaseService.get_by_ids(filtered_kb_ids)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
return Retrieval.be_output("")
|
return Retrieval.be_output("")
|
||||||
|
|
||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||||
|
|
||||||
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
embd_mdl = None
|
||||||
self._canvas.set_embedding_model(embd_nms[0])
|
if embd_nms:
|
||||||
|
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
||||||
|
self._canvas.set_embedding_model(embd_nms[0])
|
||||||
|
|
||||||
rerank_mdl = None
|
rerank_mdl = None
|
||||||
if self._param.rerank_id:
|
if self._param.rerank_id:
|
||||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||||
|
|
||||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
if kbs:
|
||||||
1, self._param.top_n,
|
kbinfos = settings.retrievaler.retrieval(
|
||||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
query,
|
||||||
aggs=False, rerank_mdl=rerank_mdl)
|
embd_mdl,
|
||||||
|
[kb.tenant_id for kb in kbs],
|
||||||
|
filtered_kb_ids,
|
||||||
|
1,
|
||||||
|
self._param.top_n,
|
||||||
|
self._param.similarity_threshold,
|
||||||
|
1 - self._param.keywords_similarity_weight,
|
||||||
|
aggs=False,
|
||||||
|
rerank_mdl=rerank_mdl,
|
||||||
|
rank_feature=label_question(query, kbs),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
kbinfos = {"chunks": [], "doc_aggs": []}
|
||||||
|
|
||||||
|
if self._param.use_kg and kbs:
|
||||||
|
ck = settings.kg_retrievaler.retrieval(query, [kb.tenant_id for kb in kbs], filtered_kb_ids, embd_mdl, LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
|
||||||
|
if ck["content_with_weight"]:
|
||||||
|
kbinfos["chunks"].insert(0, ck)
|
||||||
|
|
||||||
|
if self._param.tavily_api_key:
|
||||||
|
tav = Tavily(self._param.tavily_api_key)
|
||||||
|
tav_res = tav.retrieve_chunks(query)
|
||||||
|
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||||
|
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||||
|
|
||||||
if not kbinfos["chunks"]:
|
if not kbinfos["chunks"]:
|
||||||
df = Retrieval.be_output("")
|
df = Retrieval.be_output("")
|
||||||
@ -77,10 +129,6 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
df["empty_response"] = self._param.empty_response
|
df["empty_response"] = self._param.empty_response
|
||||||
return df
|
return df
|
||||||
|
|
||||||
df = pd.DataFrame(kbinfos["chunks"])
|
df = pd.DataFrame({"content": kb_prompt(kbinfos, 200000), "chunks": json.dumps(kbinfos["chunks"])})
|
||||||
df["content"] = df["content_with_weight"]
|
logging.debug("{} {}".format(query, df))
|
||||||
del df["content_with_weight"]
|
return df.dropna()
|
||||||
print(">>>>>>>>>>>>>>>>>>>>>>>>>>\n", query, df)
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -14,97 +14,81 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
|
||||||
from api.db.services.llm_service import LLMBundle
|
|
||||||
from agent.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
|
from rag.prompts import full_question
|
||||||
|
|
||||||
|
|
||||||
class RewriteQuestionParam(GenerateParam):
|
class RewriteQuestionParam(GenerateParam):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Define the QuestionRewrite component parameters.
|
Define the QuestionRewrite component parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.temperature = 0.9
|
self.temperature = 0.9
|
||||||
self.prompt = ""
|
self.prompt = ""
|
||||||
self.loop = 1
|
self.language = ""
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
super().check()
|
super().check()
|
||||||
|
|
||||||
def get_prompt(self, conv):
|
|
||||||
self.prompt = """
|
|
||||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
|
||||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
|
||||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
|
||||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
|
||||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
|
||||||
And return 5 versions of question and one is from translation.
|
|
||||||
Just list the question. No other words are needed.
|
|
||||||
"""
|
|
||||||
return f"""
|
|
||||||
Role: A helpful assistant
|
|
||||||
Task: Generate a full user question that would follow the conversation.
|
|
||||||
Requirements & Restrictions:
|
|
||||||
- Text generated MUST be in the same language of the original user's question.
|
|
||||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
|
||||||
- DON'T generate anything except a refined question.
|
|
||||||
|
|
||||||
######################
|
|
||||||
-Examples-
|
|
||||||
######################
|
|
||||||
# Example 1
|
|
||||||
## Conversation
|
|
||||||
USER: What is the name of Donald Trump's father?
|
|
||||||
ASSISTANT: Fred Trump.
|
|
||||||
USER: And his mother?
|
|
||||||
###############
|
|
||||||
Output: What's the name of Donald Trump's mother?
|
|
||||||
------------
|
|
||||||
# Example 2
|
|
||||||
## Conversation
|
|
||||||
USER: What is the name of Donald Trump's father?
|
|
||||||
ASSISTANT: Fred Trump.
|
|
||||||
USER: And his mother?
|
|
||||||
ASSISTANT: Mary Trump.
|
|
||||||
User: What's her full name?
|
|
||||||
###############
|
|
||||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
|
||||||
######################
|
|
||||||
# Real Data
|
|
||||||
## Conversation
|
|
||||||
{conv}
|
|
||||||
###############
|
|
||||||
"""
|
|
||||||
return self.prompt
|
|
||||||
|
|
||||||
|
|
||||||
class RewriteQuestion(Generate, ABC):
|
class RewriteQuestion(Generate, ABC):
|
||||||
component_name = "RewriteQuestion"
|
component_name = "RewriteQuestion"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
if not hasattr(self, "_loop"):
|
hist = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
setattr(self, "_loop", 0)
|
query = self.get_input()
|
||||||
if self._loop >= self._param.loop:
|
query = str(query["content"][0]) if "content" in query else ""
|
||||||
self._loop = 0
|
messages = [h for h in hist if h["role"]!="system"]
|
||||||
raise Exception("Sorry! Nothing relevant found.")
|
if messages[-1]["role"] != "user":
|
||||||
self._loop += 1
|
messages.append({"role": "user", "content": query})
|
||||||
|
ans = full_question(self._canvas.get_tenant_id(), self._param.llm_id, messages, self.gen_lang(self._param.language))
|
||||||
hist = self._canvas.get_history(4)
|
|
||||||
conv = []
|
|
||||||
for m in hist:
|
|
||||||
if m["role"] not in ["user", "assistant"]: continue
|
|
||||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
|
||||||
conv = "\n".join(conv)
|
|
||||||
|
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
|
||||||
ans = chat_mdl.chat(self._param.get_prompt(conv), [{"role": "user", "content": "Output: "}],
|
|
||||||
self._param.gen_conf())
|
|
||||||
self._canvas.history.pop()
|
self._canvas.history.pop()
|
||||||
self._canvas.history.append(("user", ans))
|
self._canvas.history.append(("user", ans))
|
||||||
|
|
||||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
|
||||||
return RewriteQuestion.be_output(ans)
|
return RewriteQuestion.be_output(ans)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def gen_lang(language):
|
||||||
|
# convert code lang to language word for the prompt
|
||||||
|
language_dict = {'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'ws': 'Samoan', 'am': 'Amharic',
|
||||||
|
'ar': 'Arabic', 'hy': 'Armenian', 'az': 'Azerbaijani', 'eu': 'Basque', 'be': 'Belarusian',
|
||||||
|
'bem': 'Bemba', 'bn': 'Bengali', 'bh': 'Bihari',
|
||||||
|
'xx-bork': 'Bork', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'bt': 'Bhutani',
|
||||||
|
'km': 'Cambodian', 'ca': 'Catalan', 'chr': 'Cherokee', 'ny': 'Chichewa', 'zh-cn': 'Chinese',
|
||||||
|
'zh-tw': 'Chinese', 'co': 'Corsican',
|
||||||
|
'hr': 'Croatian', 'cs': 'Czech', 'da': 'Danish', 'nl': 'Dutch', 'xx-elmer': 'Elmer',
|
||||||
|
'en': 'English', 'eo': 'Esperanto', 'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese',
|
||||||
|
'tl': 'Filipino', 'fi': 'Finnish', 'fr': 'French',
|
||||||
|
'fy': 'Frisian', 'gaa': 'Ga', 'gl': 'Galician', 'ka': 'Georgian', 'de': 'German',
|
||||||
|
'el': 'Greek', 'kl': 'Greenlandic', 'gn': 'Guarani', 'gu': 'Gujarati', 'xx-hacker': 'Hacker',
|
||||||
|
'ht': 'Haitian Creole', 'ha': 'Hausa', 'haw': 'Hawaiian',
|
||||||
|
'iw': 'Hebrew', 'hi': 'Hindi', 'hu': 'Hungarian', 'is': 'Icelandic', 'ig': 'Igbo',
|
||||||
|
'id': 'Indonesian', 'ia': 'Interlingua', 'ga': 'Irish', 'it': 'Italian', 'ja': 'Japanese',
|
||||||
|
'jw': 'Javanese', 'kn': 'Kannada', 'kk': 'Kazakh', 'rw': 'Kinyarwanda',
|
||||||
|
'rn': 'Kirundi', 'xx-klingon': 'Klingon', 'kg': 'Kongo', 'ko': 'Korean', 'kri': 'Krio',
|
||||||
|
'ku': 'Kurdish', 'ckb': 'Kurdish (Sorani)', 'ky': 'Kyrgyz', 'lo': 'Laothian', 'la': 'Latin',
|
||||||
|
'lv': 'Latvian', 'ln': 'Lingala', 'lt': 'Lithuanian',
|
||||||
|
'loz': 'Lozi', 'lg': 'Luganda', 'ach': 'Luo', 'mk': 'Macedonian', 'mg': 'Malagasy',
|
||||||
|
'ms': 'Malay', 'ml': 'Malayalam', 'mt': 'Maltese', 'mv': 'Maldivian', 'mi': 'Maori',
|
||||||
|
'mr': 'Marathi', 'mfe': 'Mauritian Creole', 'mo': 'Moldavian', 'mn': 'Mongolian',
|
||||||
|
'sr-me': 'Montenegrin', 'my': 'Burmese', 'ne': 'Nepali', 'pcm': 'Nigerian Pidgin',
|
||||||
|
'nso': 'Northern Sotho', 'no': 'Norwegian', 'nn': 'Norwegian Nynorsk', 'oc': 'Occitan',
|
||||||
|
'or': 'Oriya', 'om': 'Oromo', 'ps': 'Pashto', 'fa': 'Persian',
|
||||||
|
'xx-pirate': 'Pirate', 'pl': 'Polish', 'pt': 'Portuguese', 'pt-br': 'Portuguese (Brazilian)',
|
||||||
|
'pt-pt': 'Portuguese (Portugal)', 'pa': 'Punjabi', 'qu': 'Quechua', 'ro': 'Romanian',
|
||||||
|
'rm': 'Romansh', 'nyn': 'Runyankole', 'ru': 'Russian', 'gd': 'Scots Gaelic',
|
||||||
|
'sr': 'Serbian', 'sh': 'Serbo-Croatian', 'st': 'Sesotho', 'tn': 'Setswana',
|
||||||
|
'crs': 'Seychellois Creole', 'sn': 'Shona', 'sd': 'Sindhi', 'si': 'Sinhalese', 'sk': 'Slovak',
|
||||||
|
'sl': 'Slovenian', 'so': 'Somali', 'es': 'Spanish', 'es-419': 'Spanish (Latin America)',
|
||||||
|
'su': 'Sundanese',
|
||||||
|
'sw': 'Swahili', 'sv': 'Swedish', 'tg': 'Tajik', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu',
|
||||||
|
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tongan', 'lua': 'Tshiluba', 'tum': 'Tumbuka',
|
||||||
|
'tr': 'Turkish', 'tk': 'Turkmen', 'tw': 'Twi',
|
||||||
|
'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 'vu': 'Vanuatu',
|
||||||
|
'vi': 'Vietnamese', 'cy': 'Welsh', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish',
|
||||||
|
'yo': 'Yoruba', 'zu': 'Zulu'}
|
||||||
|
if language in language_dict:
|
||||||
|
return language_dict[language]
|
||||||
|
else:
|
||||||
|
return ""
|
||||||
|
|||||||
@ -41,19 +41,44 @@ class SwitchParam(ComponentParamBase):
|
|||||||
def check(self):
|
def check(self):
|
||||||
self.check_empty(self.conditions, "[Switch] conditions")
|
self.check_empty(self.conditions, "[Switch] conditions")
|
||||||
for cond in self.conditions:
|
for cond in self.conditions:
|
||||||
if not cond["to"]: raise ValueError(f"[Switch] 'To' can not be empty!")
|
if not cond["to"]:
|
||||||
|
raise ValueError("[Switch] 'To' can not be empty!")
|
||||||
|
|
||||||
|
|
||||||
class Switch(ComponentBase, ABC):
|
class Switch(ComponentBase, ABC):
|
||||||
component_name = "Switch"
|
component_name = "Switch"
|
||||||
|
|
||||||
|
def get_dependent_components(self):
|
||||||
|
res = []
|
||||||
|
for cond in self._param.conditions:
|
||||||
|
for item in cond["items"]:
|
||||||
|
if not item["cpn_id"]:
|
||||||
|
continue
|
||||||
|
if item["cpn_id"].lower().find("begin") >= 0 or item["cpn_id"].lower().find("answer") >= 0:
|
||||||
|
continue
|
||||||
|
cid = item["cpn_id"].split("@")[0]
|
||||||
|
res.append(cid)
|
||||||
|
|
||||||
|
return list(set(res))
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
for cond in self._param.conditions:
|
for cond in self._param.conditions:
|
||||||
res = []
|
res = []
|
||||||
for item in cond["items"]:
|
for item in cond["items"]:
|
||||||
out = self._canvas.get_component(item["cpn_id"])["obj"].output()[1]
|
if not item["cpn_id"]:
|
||||||
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
continue
|
||||||
res.append(self.process_operator(cpn_input, item["operator"], item["value"]))
|
cid = item["cpn_id"].split("@")[0]
|
||||||
|
if item["cpn_id"].find("@") > 0:
|
||||||
|
cpn_id, key = item["cpn_id"].split("@")
|
||||||
|
for p in self._canvas.get_component(cid)["obj"]._param.query:
|
||||||
|
if p["key"] == key:
|
||||||
|
res.append(self.process_operator(p.get("value",""), item["operator"], item.get("value", "")))
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
out = self._canvas.get_component(cid)["obj"].output(allow_partial=False)[1]
|
||||||
|
cpn_input = "" if "content" not in out.columns else " ".join([str(s) for s in out["content"]])
|
||||||
|
res.append(self.process_operator(cpn_input, item["operator"], item.get("value", "")))
|
||||||
|
|
||||||
if cond["logical_operator"] != "and" and any(res):
|
if cond["logical_operator"] != "and" and any(res):
|
||||||
return Switch.be_output(cond["to"])
|
return Switch.be_output(cond["to"])
|
||||||
|
|
||||||
@ -85,22 +110,22 @@ class Switch(ComponentBase, ABC):
|
|||||||
elif operator == ">":
|
elif operator == ">":
|
||||||
try:
|
try:
|
||||||
return True if float(input) > float(value) else False
|
return True if float(input) > float(value) else False
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return True if input > value else False
|
return True if input > value else False
|
||||||
elif operator == "<":
|
elif operator == "<":
|
||||||
try:
|
try:
|
||||||
return True if float(input) < float(value) else False
|
return True if float(input) < float(value) else False
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return True if input < value else False
|
return True if input < value else False
|
||||||
elif operator == "≥":
|
elif operator == "≥":
|
||||||
try:
|
try:
|
||||||
return True if float(input) >= float(value) else False
|
return True if float(input) >= float(value) else False
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return True if input >= value else False
|
return True if input >= value else False
|
||||||
elif operator == "≤":
|
elif operator == "≤":
|
||||||
try:
|
try:
|
||||||
return True if float(input) <= float(value) else False
|
return True if float(input) <= float(value) else False
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return True if input <= value else False
|
return True if input <= value else False
|
||||||
|
|
||||||
raise ValueError('Not supported operator' + operator)
|
raise ValueError('Not supported operator' + operator)
|
||||||
134
agent/component/template.py
Normal file
134
agent/component/template.py
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from jinja2 import Template as Jinja2Template
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Generate component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.content = ""
|
||||||
|
self.parameters = []
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_empty(self.content, "[Template] Content")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class Template(ComponentBase):
|
||||||
|
component_name = "Template"
|
||||||
|
|
||||||
|
def get_dependent_components(self):
|
||||||
|
inputs = self.get_input_elements()
|
||||||
|
cpnts = set([i["key"] for i in inputs if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||||
|
return list(cpnts)
|
||||||
|
|
||||||
|
def get_input_elements(self):
|
||||||
|
key_set = set([])
|
||||||
|
res = []
|
||||||
|
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.content, flags=re.IGNORECASE):
|
||||||
|
cpn_id = r.group(1)
|
||||||
|
if cpn_id in key_set:
|
||||||
|
continue
|
||||||
|
if cpn_id.lower().find("begin@") == 0:
|
||||||
|
cpn_id, key = cpn_id.split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] != key:
|
||||||
|
continue
|
||||||
|
res.append({"key": r.group(1), "name": p["name"]})
|
||||||
|
key_set.add(r.group(1))
|
||||||
|
continue
|
||||||
|
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||||
|
if not cpn_nm:
|
||||||
|
continue
|
||||||
|
res.append({"key": cpn_id, "name": cpn_nm})
|
||||||
|
key_set.add(cpn_id)
|
||||||
|
return res
|
||||||
|
|
||||||
|
def _run(self, history, **kwargs):
|
||||||
|
content = self._param.content
|
||||||
|
|
||||||
|
self._param.inputs = []
|
||||||
|
for para in self.get_input_elements():
|
||||||
|
if para["key"].lower().find("begin@") == 0:
|
||||||
|
cpn_id, key = para["key"].split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] == key:
|
||||||
|
value = p.get("value", "")
|
||||||
|
self.make_kwargs(para, kwargs, value)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||||
|
continue
|
||||||
|
|
||||||
|
component_id = para["key"]
|
||||||
|
cpn = self._canvas.get_component(component_id)["obj"]
|
||||||
|
if cpn.component_name.lower() == "answer":
|
||||||
|
hist = self._canvas.get_history(1)
|
||||||
|
if hist:
|
||||||
|
hist = hist[0]["content"]
|
||||||
|
else:
|
||||||
|
hist = ""
|
||||||
|
self.make_kwargs(para, kwargs, hist)
|
||||||
|
continue
|
||||||
|
|
||||||
|
_, out = cpn.output(allow_partial=False)
|
||||||
|
|
||||||
|
result = ""
|
||||||
|
if "content" in out.columns:
|
||||||
|
result = "\n".join(
|
||||||
|
[o if isinstance(o, str) else str(o) for o in out["content"]]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.make_kwargs(para, kwargs, result)
|
||||||
|
|
||||||
|
template = Jinja2Template(content)
|
||||||
|
|
||||||
|
try:
|
||||||
|
content = template.render(kwargs)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for n, v in kwargs.items():
|
||||||
|
if not isinstance(v, str):
|
||||||
|
try:
|
||||||
|
v = json.dumps(v, ensure_ascii=False)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
content = re.sub(
|
||||||
|
r"\{%s\}" % re.escape(n), v, content
|
||||||
|
)
|
||||||
|
content = re.sub(
|
||||||
|
r"(#+)", r" \1 ", content
|
||||||
|
)
|
||||||
|
|
||||||
|
return Template.be_output(content)
|
||||||
|
|
||||||
|
def make_kwargs(self, para, kwargs, value):
|
||||||
|
self._param.inputs.append(
|
||||||
|
{"component_id": para["key"], "content": value}
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
value = json.loads(value)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
kwargs[para["key"]] = value
|
||||||
@ -1,72 +1,72 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
import json
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import time
|
import time
|
||||||
import requests
|
import requests
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class TuShareParam(ComponentParamBase):
|
class TuShareParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
Define the TuShare component parameters.
|
Define the TuShare component parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.token = "xxx"
|
self.token = "xxx"
|
||||||
self.src = "eastmoney"
|
self.src = "eastmoney"
|
||||||
self.start_date = "2024-01-01 09:00:00"
|
self.start_date = "2024-01-01 09:00:00"
|
||||||
self.end_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
self.end_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||||
self.keyword = ""
|
self.keyword = ""
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.src, "Quick News Source",
|
self.check_valid_value(self.src, "Quick News Source",
|
||||||
["sina", "wallstreetcn", "10jqka", "eastmoney", "yuncaijing", "fenghuang", "jinrongjie"])
|
["sina", "wallstreetcn", "10jqka", "eastmoney", "yuncaijing", "fenghuang", "jinrongjie"])
|
||||||
|
|
||||||
|
|
||||||
class TuShare(ComponentBase, ABC):
|
class TuShare(ComponentBase, ABC):
|
||||||
component_name = "TuShare"
|
component_name = "TuShare"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not ans:
|
||||||
return TuShare.be_output("")
|
return TuShare.be_output("")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tus_res = []
|
tus_res = []
|
||||||
params = {
|
params = {
|
||||||
"api_name": "news",
|
"api_name": "news",
|
||||||
"token": self._param.token,
|
"token": self._param.token,
|
||||||
"params": {"src": self._param.src, "start_date": self._param.start_date,
|
"params": {"src": self._param.src, "start_date": self._param.start_date,
|
||||||
"end_date": self._param.end_date}
|
"end_date": self._param.end_date}
|
||||||
}
|
}
|
||||||
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
||||||
response = response.json()
|
response = response.json()
|
||||||
if response['code'] != 0:
|
if response['code'] != 0:
|
||||||
return TuShare.be_output(response['msg'])
|
return TuShare.be_output(response['msg'])
|
||||||
df = pd.DataFrame(response['data']['items'])
|
df = pd.DataFrame(response['data']['items'])
|
||||||
df.columns = response['data']['fields']
|
df.columns = response['data']['fields']
|
||||||
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return TuShare.be_output("**ERROR**: " + str(e))
|
return TuShare.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
if not tus_res:
|
if not tus_res:
|
||||||
return TuShare.be_output("")
|
return TuShare.be_output("")
|
||||||
|
|
||||||
return pd.DataFrame(tus_res)
|
return pd.DataFrame(tus_res)
|
||||||
|
|||||||
@ -1,80 +1,80 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pywencai
|
import pywencai
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
class WenCaiParam(ComponentParamBase):
|
class WenCaiParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
Define the WenCai component parameters.
|
Define the WenCai component parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.top_n = 10
|
self.top_n = 10
|
||||||
self.query_type = "stock"
|
self.query_type = "stock"
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_positive_integer(self.top_n, "Top N")
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
self.check_valid_value(self.query_type, "Query type",
|
self.check_valid_value(self.query_type, "Query type",
|
||||||
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
||||||
'futures', 'lccp',
|
'futures', 'lccp',
|
||||||
'foreign_exchange'])
|
'foreign_exchange'])
|
||||||
|
|
||||||
|
|
||||||
class WenCai(ComponentBase, ABC):
|
class WenCai(ComponentBase, ABC):
|
||||||
component_name = "WenCai"
|
component_name = "WenCai"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not ans:
|
||||||
return WenCai.be_output("")
|
return WenCai.be_output("")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
wencai_res = []
|
wencai_res = []
|
||||||
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
||||||
if isinstance(res, pd.DataFrame):
|
if isinstance(res, pd.DataFrame):
|
||||||
wencai_res.append({"content": res.to_markdown()})
|
wencai_res.append({"content": res.to_markdown()})
|
||||||
if isinstance(res, dict):
|
if isinstance(res, dict):
|
||||||
for item in res.items():
|
for item in res.items():
|
||||||
if isinstance(item[1], list):
|
if isinstance(item[1], list):
|
||||||
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
||||||
continue
|
continue
|
||||||
if isinstance(item[1], str):
|
if isinstance(item[1], str):
|
||||||
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
||||||
continue
|
continue
|
||||||
if isinstance(item[1], dict):
|
if isinstance(item[1], dict):
|
||||||
if "meta" in item[1].keys():
|
if "meta" in item[1].keys():
|
||||||
continue
|
continue
|
||||||
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
||||||
continue
|
continue
|
||||||
if isinstance(item[1], pd.DataFrame):
|
if isinstance(item[1], pd.DataFrame):
|
||||||
if "image_url" in item[1].columns:
|
if "image_url" in item[1].columns:
|
||||||
continue
|
continue
|
||||||
wencai_res.append({"content": item[1].to_markdown()})
|
wencai_res.append({"content": item[1].to_markdown()})
|
||||||
continue
|
continue
|
||||||
|
|
||||||
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return WenCai.be_output("**ERROR**: " + str(e))
|
return WenCai.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
if not wencai_res:
|
if not wencai_res:
|
||||||
return WenCai.be_output("")
|
return WenCai.be_output("")
|
||||||
|
|
||||||
return pd.DataFrame(wencai_res)
|
return pd.DataFrame(wencai_res)
|
||||||
|
|||||||
@ -13,12 +13,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import random
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from functools import partial
|
|
||||||
import wikipedia
|
import wikipedia
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from agent.settings import DEBUG
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
@ -65,5 +63,5 @@ class Wikipedia(ComponentBase, ABC):
|
|||||||
return Wikipedia.be_output("")
|
return Wikipedia.be_output("")
|
||||||
|
|
||||||
df = pd.DataFrame(wiki_res)
|
df = pd.DataFrame(wiki_res)
|
||||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
logging.debug(f"df: {df}")
|
||||||
return df
|
return df
|
||||||
|
|||||||
@ -1,83 +1,84 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from abc import ABC
|
import logging
|
||||||
import pandas as pd
|
from abc import ABC
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
import pandas as pd
|
||||||
import yfinance as yf
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
import yfinance as yf
|
||||||
|
|
||||||
class YahooFinanceParam(ComponentParamBase):
|
|
||||||
"""
|
class YahooFinanceParam(ComponentParamBase):
|
||||||
Define the YahooFinance component parameters.
|
"""
|
||||||
"""
|
Define the YahooFinance component parameters.
|
||||||
|
"""
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
def __init__(self):
|
||||||
self.info = True
|
super().__init__()
|
||||||
self.history = False
|
self.info = True
|
||||||
self.count = False
|
self.history = False
|
||||||
self.financials = False
|
self.count = False
|
||||||
self.income_stmt = False
|
self.financials = False
|
||||||
self.balance_sheet = False
|
self.income_stmt = False
|
||||||
self.cash_flow_statement = False
|
self.balance_sheet = False
|
||||||
self.news = True
|
self.cash_flow_statement = False
|
||||||
|
self.news = True
|
||||||
def check(self):
|
|
||||||
self.check_boolean(self.info, "get all stock info")
|
def check(self):
|
||||||
self.check_boolean(self.history, "get historical market data")
|
self.check_boolean(self.info, "get all stock info")
|
||||||
self.check_boolean(self.count, "show share count")
|
self.check_boolean(self.history, "get historical market data")
|
||||||
self.check_boolean(self.financials, "show financials")
|
self.check_boolean(self.count, "show share count")
|
||||||
self.check_boolean(self.income_stmt, "income statement")
|
self.check_boolean(self.financials, "show financials")
|
||||||
self.check_boolean(self.balance_sheet, "balance sheet")
|
self.check_boolean(self.income_stmt, "income statement")
|
||||||
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
self.check_boolean(self.balance_sheet, "balance sheet")
|
||||||
self.check_boolean(self.news, "show news")
|
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
||||||
|
self.check_boolean(self.news, "show news")
|
||||||
|
|
||||||
class YahooFinance(ComponentBase, ABC):
|
|
||||||
component_name = "YahooFinance"
|
class YahooFinance(ComponentBase, ABC):
|
||||||
|
component_name = "YahooFinance"
|
||||||
def _run(self, history, **kwargs):
|
|
||||||
ans = self.get_input()
|
def _run(self, history, **kwargs):
|
||||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
ans = self.get_input()
|
||||||
if not ans:
|
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||||
return YahooFinance.be_output("")
|
if not ans:
|
||||||
|
return YahooFinance.be_output("")
|
||||||
yohoo_res = []
|
|
||||||
try:
|
yohoo_res = []
|
||||||
msft = yf.Ticker(ans)
|
try:
|
||||||
if self._param.info:
|
msft = yf.Ticker(ans)
|
||||||
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
if self._param.info:
|
||||||
if self._param.history:
|
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
||||||
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
if self._param.history:
|
||||||
if self._param.financials:
|
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
||||||
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
if self._param.financials:
|
||||||
if self._param.balance_sheet:
|
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
||||||
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
if self._param.balance_sheet:
|
||||||
yohoo_res.append(
|
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
||||||
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
yohoo_res.append(
|
||||||
if self._param.cash_flow_statement:
|
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
||||||
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
if self._param.cash_flow_statement:
|
||||||
yohoo_res.append(
|
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
||||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
yohoo_res.append(
|
||||||
if self._param.news:
|
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
if self._param.news:
|
||||||
except Exception as e:
|
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||||
print("**ERROR** " + str(e))
|
except Exception:
|
||||||
|
logging.exception("YahooFinance got exception")
|
||||||
if not yohoo_res:
|
|
||||||
return YahooFinance.be_output("")
|
if not yohoo_res:
|
||||||
|
return YahooFinance.be_output("")
|
||||||
return pd.DataFrame(yohoo_res)
|
|
||||||
|
return pd.DataFrame(yohoo_res)
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2019 The FATE Authors. All Rights Reserved.
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -13,22 +13,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
# Logger
|
|
||||||
import os
|
|
||||||
|
|
||||||
from api.utils.file_utils import get_project_base_directory
|
|
||||||
from api.utils.log_utils import LoggerFactory, getLogger
|
|
||||||
|
|
||||||
DEBUG = 0
|
|
||||||
LoggerFactory.set_directory(
|
|
||||||
os.path.join(
|
|
||||||
get_project_base_directory(),
|
|
||||||
"logs",
|
|
||||||
"flow"))
|
|
||||||
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
|
||||||
LoggerFactory.LEVEL = 30
|
|
||||||
|
|
||||||
flow_logger = getLogger("flow")
|
|
||||||
database_logger = getLogger("database")
|
|
||||||
FLOAT_ZERO = 1e-8
|
FLOAT_ZERO = 1e-8
|
||||||
PARAM_MAXDEPTH = 5
|
PARAM_MAXDEPTH = 5
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,253 +1,468 @@
|
|||||||
{
|
{
|
||||||
"id": 4,
|
"id": 4,
|
||||||
"title": "Interpreter",
|
"title": "Interpreter",
|
||||||
"description": "A simple interpreter that translates user input into a target language. Try 'Hi there => Spanish' to see the translation!",
|
"description": "A translation agent based on a reflection agentic workflow, inspired by Andrew Ng's project: https://github.com/andrewyng/translation-agent\n\n1. Prompt an LLM to translate a text into the target language.\n2. Have the LLM reflect on the translation and provide constructive suggestions for improvement.\n3. Use these suggestions to improve the translation.",
|
||||||
"canvas_type": "chatbot",
|
"canvas_type": "chatbot",
|
||||||
"dsl": {
|
"dsl": {
|
||||||
"answer": [],
|
"answer": [],
|
||||||
"components": {
|
"components": {
|
||||||
"begin": {
|
"Answer:TinyGamesGuess": {
|
||||||
"obj": {
|
"downstream": [],
|
||||||
"component_name": "Begin",
|
|
||||||
"params": {
|
|
||||||
"prologue": "Hi there! Please enter the text you want to translate in format like: 'text you want to translate' => target language. For an example: 您好! => English"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"downstream": [
|
|
||||||
"Answer:ShortPapersShake"
|
|
||||||
],
|
|
||||||
"upstream": []
|
|
||||||
},
|
|
||||||
"Answer:ShortPapersShake": {
|
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Answer",
|
"component_name": "Answer",
|
||||||
"params": {}
|
"inputs": [],
|
||||||
|
"output": null,
|
||||||
|
"params": {
|
||||||
|
"debug_inputs": [],
|
||||||
|
"inputs": [],
|
||||||
|
"message_history_window_size": 22,
|
||||||
|
"output": null,
|
||||||
|
"output_var_name": "output",
|
||||||
|
"post_answers": [],
|
||||||
|
"query": []
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"downstream": [
|
|
||||||
"Generate:HeavyForksTell"
|
|
||||||
],
|
|
||||||
"upstream": [
|
"upstream": [
|
||||||
"begin",
|
"Generate:FuzzyEmusWork"
|
||||||
"Generate:HeavyForksTell"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"Generate:HeavyForksTell": {
|
"Generate:FuzzyEmusWork": {
|
||||||
|
"downstream": [
|
||||||
|
"Answer:TinyGamesGuess"
|
||||||
|
],
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Generate",
|
"component_name": "Generate",
|
||||||
|
"inputs": [],
|
||||||
|
"output": null,
|
||||||
"params": {
|
"params": {
|
||||||
"cite": true,
|
"cite": false,
|
||||||
|
"debug_inputs": [],
|
||||||
"frequency_penalty": 0.7,
|
"frequency_penalty": 0.7,
|
||||||
|
"inputs": [],
|
||||||
"llm_id": "deepseek-chat@DeepSeek",
|
"llm_id": "deepseek-chat@DeepSeek",
|
||||||
"max_tokens": 256,
|
"max_tokens": 0,
|
||||||
"message_history_window_size": 12,
|
"message_history_window_size": 1,
|
||||||
|
"output": null,
|
||||||
|
"output_var_name": "output",
|
||||||
"parameters": [],
|
"parameters": [],
|
||||||
"presence_penalty": 0.4,
|
"presence_penalty": 0.4,
|
||||||
"prompt": "You are an professional interpreter.\n- Role: an professional interpreter.\n- Input format: content need to be translated => target language. \n- Answer format: => translated content in target language. \n- Examples:\n - user: 您好! => English. assistant: => How are you doing!\n - user: You look good today. => Japanese. assistant: => 今日は調子がいいですね 。\n",
|
"prompt": "Your task is to carefully read, then edit, a translation to {begin@lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS>\nas follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{Generate:ShinySquidsSneeze}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||||
|
"query": [],
|
||||||
"temperature": 0.1,
|
"temperature": 0.1,
|
||||||
"top_p": 0.3
|
"top_p": 0.3
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"downstream": [
|
|
||||||
"Answer:ShortPapersShake"
|
|
||||||
],
|
|
||||||
"upstream": [
|
"upstream": [
|
||||||
"Answer:ShortPapersShake"
|
"Generate:ShinySquidsSneeze"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
"Generate:ShinySquidsSneeze": {
|
||||||
|
"downstream": [
|
||||||
|
"Generate:FuzzyEmusWork"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Generate",
|
||||||
|
"inputs": [],
|
||||||
|
"output": null,
|
||||||
|
"params": {
|
||||||
|
"cite": false,
|
||||||
|
"debug_inputs": [],
|
||||||
|
"frequency_penalty": 0.7,
|
||||||
|
"inputs": [],
|
||||||
|
"llm_id": "deepseek-chat@DeepSeek",
|
||||||
|
"max_tokens": 0,
|
||||||
|
"message_history_window_size": 1,
|
||||||
|
"output": null,
|
||||||
|
"output_var_name": "output",
|
||||||
|
"parameters": [],
|
||||||
|
"presence_penalty": 0.4,
|
||||||
|
"prompt": "Your task is to carefully read a source text and a translation to {begin@lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {begin@lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||||
|
"query": [],
|
||||||
|
"temperature": 0.1,
|
||||||
|
"top_p": 0.3
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"Generate:VastKeysKick"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Generate:VastKeysKick": {
|
||||||
|
"downstream": [
|
||||||
|
"Generate:ShinySquidsSneeze"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Generate",
|
||||||
|
"inputs": [],
|
||||||
|
"output": null,
|
||||||
|
"params": {
|
||||||
|
"cite": false,
|
||||||
|
"debug_inputs": [],
|
||||||
|
"frequency_penalty": 0.7,
|
||||||
|
"inputs": [],
|
||||||
|
"llm_id": "deepseek-chat@DeepSeek",
|
||||||
|
"max_tokens": 0,
|
||||||
|
"message_history_window_size": 1,
|
||||||
|
"output": null,
|
||||||
|
"output_var_name": "output",
|
||||||
|
"parameters": [],
|
||||||
|
"presence_penalty": 0.4,
|
||||||
|
"prompt": "Role: You are a professional translator proficient in {begin@lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {begin@lang}, ensuring that its style resembles that of popular science articles in {begin@lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{begin@file}\n\n<TRANSLATED_TEXT>",
|
||||||
|
"query": [],
|
||||||
|
"temperature": 0.1,
|
||||||
|
"top_p": 0.3
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"begin"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"begin": {
|
||||||
|
"downstream": [
|
||||||
|
"Generate:VastKeysKick"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Begin",
|
||||||
|
"inputs": [],
|
||||||
|
"output": null,
|
||||||
|
"params": {
|
||||||
|
"debug_inputs": [],
|
||||||
|
"inputs": [],
|
||||||
|
"message_history_window_size": 22,
|
||||||
|
"output": null,
|
||||||
|
"output_var_name": "output",
|
||||||
|
"prologue": "",
|
||||||
|
"query": [
|
||||||
|
{
|
||||||
|
"key": "lang",
|
||||||
|
"name": "Target Language",
|
||||||
|
"optional": false,
|
||||||
|
"type": "line"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "file",
|
||||||
|
"name": "Files",
|
||||||
|
"optional": false,
|
||||||
|
"type": "file"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": []
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"embed_id": "",
|
"embed_id": "",
|
||||||
"graph": {
|
"graph": {
|
||||||
|
"edges": [
|
||||||
|
{
|
||||||
|
"id": "xy-edge__begin-Generate:VastKeysKickc",
|
||||||
|
"markerEnd": "logo",
|
||||||
|
"source": "begin",
|
||||||
|
"style": {
|
||||||
|
"stroke": "rgb(202 197 245)",
|
||||||
|
"strokeWidth": 2
|
||||||
|
},
|
||||||
|
"target": "Generate:VastKeysKick",
|
||||||
|
"targetHandle": "c",
|
||||||
|
"type": "buttonEdge",
|
||||||
|
"zIndex": 1001
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "xy-edge__Generate:VastKeysKickb-Generate:ShinySquidsSneezec",
|
||||||
|
"markerEnd": "logo",
|
||||||
|
"source": "Generate:VastKeysKick",
|
||||||
|
"sourceHandle": "b",
|
||||||
|
"style": {
|
||||||
|
"stroke": "rgb(202 197 245)",
|
||||||
|
"strokeWidth": 2
|
||||||
|
},
|
||||||
|
"target": "Generate:ShinySquidsSneeze",
|
||||||
|
"targetHandle": "c",
|
||||||
|
"type": "buttonEdge",
|
||||||
|
"zIndex": 1001
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "xy-edge__Generate:FuzzyEmusWorkb-Answer:TinyGamesGuessc",
|
||||||
|
"markerEnd": "logo",
|
||||||
|
"source": "Generate:FuzzyEmusWork",
|
||||||
|
"sourceHandle": "b",
|
||||||
|
"style": {
|
||||||
|
"stroke": "rgb(202 197 245)",
|
||||||
|
"strokeWidth": 2
|
||||||
|
},
|
||||||
|
"target": "Answer:TinyGamesGuess",
|
||||||
|
"targetHandle": "c",
|
||||||
|
"type": "buttonEdge",
|
||||||
|
"zIndex": 1001
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "xy-edge__Generate:ShinySquidsSneezeb-Generate:FuzzyEmusWorkc",
|
||||||
|
"markerEnd": "logo",
|
||||||
|
"source": "Generate:ShinySquidsSneeze",
|
||||||
|
"sourceHandle": "b",
|
||||||
|
"style": {
|
||||||
|
"stroke": "rgb(202 197 245)",
|
||||||
|
"strokeWidth": 2
|
||||||
|
},
|
||||||
|
"target": "Generate:FuzzyEmusWork",
|
||||||
|
"targetHandle": "c",
|
||||||
|
"type": "buttonEdge",
|
||||||
|
"zIndex": 1001
|
||||||
|
}
|
||||||
|
],
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"form": {
|
"form": {
|
||||||
"prologue": "Hi there! Please enter the text you want to translate in format like: 'text you want to translate' => target language. For an example: 您好! => English"
|
"prologue": "",
|
||||||
|
"query": [
|
||||||
|
{
|
||||||
|
"key": "lang",
|
||||||
|
"name": "Target Language",
|
||||||
|
"optional": false,
|
||||||
|
"type": "line"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "file",
|
||||||
|
"name": "Files",
|
||||||
|
"optional": false,
|
||||||
|
"type": "file"
|
||||||
|
}
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"label": "Begin",
|
"label": "Begin",
|
||||||
"name": "Instruction"
|
"name": "begin"
|
||||||
},
|
},
|
||||||
"dragging": false,
|
"dragging": false,
|
||||||
"height": 44,
|
"height": 128,
|
||||||
"id": "begin",
|
"id": "begin",
|
||||||
|
"measured": {
|
||||||
|
"height": 128,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
"position": {
|
"position": {
|
||||||
"x": -227.62119327532662,
|
"x": -383.5,
|
||||||
"y": 204.18864081386155
|
"y": 142.62256327439624
|
||||||
},
|
},
|
||||||
"positionAbsolute": {
|
"positionAbsolute": {
|
||||||
"x": -227.62119327532662,
|
"x": -383.5,
|
||||||
"y": 204.18864081386155
|
"y": 143.5
|
||||||
},
|
},
|
||||||
"selected": false,
|
"selected": true,
|
||||||
"sourcePosition": "left",
|
"sourcePosition": "left",
|
||||||
"targetPosition": "right",
|
"targetPosition": "right",
|
||||||
"type": "beginNode",
|
"type": "beginNode",
|
||||||
"width": 100
|
"width": 200
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"form": {},
|
"form": {},
|
||||||
"label": "Answer",
|
"label": "Answer",
|
||||||
"name": "Interface"
|
"name": "Interact_0"
|
||||||
},
|
},
|
||||||
"dragging": false,
|
"dragging": false,
|
||||||
"height": 44,
|
"height": 44,
|
||||||
"id": "Answer:ShortPapersShake",
|
"id": "Answer:TinyGamesGuess",
|
||||||
|
"measured": {
|
||||||
|
"height": 44,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
"position": {
|
"position": {
|
||||||
"x": -2.51245296887717,
|
"x": 645.5056004454161,
|
||||||
"y": 206.25402277426554
|
"y": 182.98193827439627
|
||||||
},
|
},
|
||||||
"positionAbsolute": {
|
"positionAbsolute": {
|
||||||
"x": -2.51245296887717,
|
"x": 688.5,
|
||||||
"y": 206.25402277426554
|
"y": 183.859375
|
||||||
},
|
},
|
||||||
"selected": false,
|
"selected": false,
|
||||||
"sourcePosition": "left",
|
"sourcePosition": "right",
|
||||||
"targetPosition": "right",
|
"targetPosition": "left",
|
||||||
"type": "logicNode",
|
"type": "logicNode",
|
||||||
"width": 200
|
"width": 200
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"form": {
|
"form": {
|
||||||
"cite": true,
|
"text": "Translation Agent: Agentic translation using reflection workflow\n\nThis is inspired by Andrew NG's project: https://github.com/andrewyng/translation-agent\n\n1. Prompt an LLM to translate a text into the target language;\n2. Have the LLM reflect on the translation and provide constructive suggestions for improvement;\n3. Use these suggestions to improve the translation."
|
||||||
|
},
|
||||||
|
"label": "Note",
|
||||||
|
"name": "Brief"
|
||||||
|
},
|
||||||
|
"dragHandle": ".note-drag-handle",
|
||||||
|
"dragging": false,
|
||||||
|
"height": 227,
|
||||||
|
"id": "Note:MoodyKnivesCheat",
|
||||||
|
"measured": {
|
||||||
|
"height": 227,
|
||||||
|
"width": 703
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 46.02198421645994,
|
||||||
|
"y": -267.69527832581736
|
||||||
|
},
|
||||||
|
"positionAbsolute": {
|
||||||
|
"x": 46.02198421645994,
|
||||||
|
"y": -267.69527832581736
|
||||||
|
},
|
||||||
|
"resizing": false,
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"style": {
|
||||||
|
"height": 227,
|
||||||
|
"width": 703
|
||||||
|
},
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "noteNode",
|
||||||
|
"width": 703
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"text": "Many businesses use specialized terms that are not widely used on the internet and that LLMs thus don’t know about, and there are also many terms that can be translated in multiple ways. For example, ”open source” in Spanish can be “Código abierto” or “Fuente abierta”; both are fine, but it’d better to pick one and stick with it for a single document.\n\nYou can add those glossary translation into prompt to any of `Translate directly` or 'Reflect'."
|
||||||
|
},
|
||||||
|
"label": "Note",
|
||||||
|
"name": "Tip: Add glossary "
|
||||||
|
},
|
||||||
|
"dragHandle": ".note-drag-handle",
|
||||||
|
"dragging": false,
|
||||||
|
"height": 181,
|
||||||
|
"id": "Note:SourCarrotsAct",
|
||||||
|
"measured": {
|
||||||
|
"height": 181,
|
||||||
|
"width": 832
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 65.0676250238289,
|
||||||
|
"y": 397.6323270065299
|
||||||
|
},
|
||||||
|
"positionAbsolute": {
|
||||||
|
"x": 65.0676250238289,
|
||||||
|
"y": 397.6323270065299
|
||||||
|
},
|
||||||
|
"resizing": false,
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"style": {
|
||||||
|
"height": 181,
|
||||||
|
"width": 832
|
||||||
|
},
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "noteNode",
|
||||||
|
"width": 832
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"cite": false,
|
||||||
"frequencyPenaltyEnabled": true,
|
"frequencyPenaltyEnabled": true,
|
||||||
"frequency_penalty": 0.7,
|
"frequency_penalty": 0.7,
|
||||||
"llm_id": "deepseek-chat@DeepSeek",
|
"llm_id": "deepseek-chat@DeepSeek",
|
||||||
"maxTokensEnabled": true,
|
"maxTokensEnabled": false,
|
||||||
"max_tokens": 256,
|
"max_tokens": 256,
|
||||||
"message_history_window_size": 12,
|
"message_history_window_size": 1,
|
||||||
"parameter": "Precise",
|
"parameter": "Precise",
|
||||||
"parameters": [],
|
"parameters": [],
|
||||||
"presencePenaltyEnabled": true,
|
"presencePenaltyEnabled": true,
|
||||||
"presence_penalty": 0.4,
|
"presence_penalty": 0.4,
|
||||||
"prompt": "You are an professional interpreter.\n- Role: an professional interpreter.\n- Input format: content need to be translated => target language. \n- Answer format: => translated content in target language. \n- Examples:\n - user: 您好! => English. assistant: => How are you doing!\n - user: You look good today. => Japanese. assistant: => 今日は調子がいいですね 。\n",
|
"prompt": "Role: You are a professional translator proficient in {begin@lang}, with an exceptional ability to convert specialized academic papers into accessible popular science articles. Please assist me in translating the following paragraph into {begin@lang}, ensuring that its style resembles that of popular science articles in {begin@lang}.\n\nRequirements & Restrictions:\n - Use Markdown format to output.\n - DO NOT overlook any details.\n\n\n<ORIGINAL_TEXT>\n{begin@file}\n\n<TRANSLATED_TEXT>",
|
||||||
"temperature": 0.1,
|
"temperature": 0.1,
|
||||||
"temperatureEnabled": true,
|
"temperatureEnabled": true,
|
||||||
"topPEnabled": true,
|
"topPEnabled": true,
|
||||||
"top_p": 0.3
|
"top_p": 0.3
|
||||||
},
|
},
|
||||||
"label": "Generate",
|
"label": "Generate",
|
||||||
"name": "Translate"
|
"name": "Translate directly"
|
||||||
},
|
},
|
||||||
"dragging": false,
|
"dragging": false,
|
||||||
"height": 86,
|
"id": "Generate:VastKeysKick",
|
||||||
"id": "Generate:HeavyForksTell",
|
"measured": {
|
||||||
"position": {
|
"height": 106,
|
||||||
"x": -1.8557846635797546,
|
"width": 200
|
||||||
"y": 70.16420357406685
|
|
||||||
},
|
},
|
||||||
"positionAbsolute": {
|
"position": {
|
||||||
"x": -1.8557846635797546,
|
"x": -132.6338674989604,
|
||||||
"y": 70.16420357406685
|
"y": 153.70663786774483
|
||||||
},
|
},
|
||||||
"selected": false,
|
"selected": false,
|
||||||
"sourcePosition": "right",
|
"sourcePosition": "right",
|
||||||
"targetPosition": "left",
|
"targetPosition": "left",
|
||||||
"type": "generateNode",
|
"type": "generateNode"
|
||||||
"width": 200
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"form": {
|
"form": {
|
||||||
"text": "The large model translates the user's desired content into the target language, returns the translated language."
|
"cite": false,
|
||||||
|
"frequencyPenaltyEnabled": true,
|
||||||
|
"frequency_penalty": 0.7,
|
||||||
|
"llm_id": "deepseek-chat@DeepSeek",
|
||||||
|
"maxTokensEnabled": false,
|
||||||
|
"max_tokens": 256,
|
||||||
|
"message_history_window_size": 1,
|
||||||
|
"parameter": "Precise",
|
||||||
|
"parameters": [],
|
||||||
|
"presencePenaltyEnabled": true,
|
||||||
|
"presence_penalty": 0.4,
|
||||||
|
"prompt": "Your task is to carefully read a source text and a translation to {begin@lang}, and then give constructive criticisms and helpful suggestions to improve the translation. \n\nThe source text and initial translation, delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT> and <TRANSLATION></TRANSLATION>, are as follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\nWhen writing suggestions, pay attention to whether there are ways to improve the translation's \n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules, and ensuring there are no unnecessary repetitions),\n(iii) style (by ensuring the translations reflect the style of the source text and take into account any cultural context),\n(iv) terminology (by ensuring terminology use is consistent and reflects the source text domain; and by only ensuring you use equivalent idioms {begin@lang}).\n\nWrite a list of specific, helpful and constructive suggestions for improving the translation.\nEach suggestion should address one specific part of the translation.\nOutput only the suggestions and nothing else.",
|
||||||
|
"temperature": 0.1,
|
||||||
|
"temperatureEnabled": true,
|
||||||
|
"topPEnabled": true,
|
||||||
|
"top_p": 0.3
|
||||||
},
|
},
|
||||||
"label": "Note",
|
"label": "Generate",
|
||||||
"name": "N: Translate"
|
"name": "Reflect"
|
||||||
},
|
},
|
||||||
"dragging": false,
|
"dragging": false,
|
||||||
"height": 180,
|
"id": "Generate:ShinySquidsSneeze",
|
||||||
"id": "Note:VioletNumbersStrive",
|
"measured": {
|
||||||
|
"height": 106,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
"position": {
|
"position": {
|
||||||
"x": 0.8506882512325546,
|
"x": 121.1675336631696,
|
||||||
"y": -119.10519445109118
|
"y": 152.92865408917177
|
||||||
},
|
},
|
||||||
"positionAbsolute": {
|
|
||||||
"x": 0.8506882512325546,
|
|
||||||
"y": -119.10519445109118
|
|
||||||
},
|
|
||||||
"resizing": false,
|
|
||||||
"selected": false,
|
"selected": false,
|
||||||
"sourcePosition": "right",
|
"sourcePosition": "right",
|
||||||
"style": {
|
|
||||||
"height": 180,
|
|
||||||
"width": 209
|
|
||||||
},
|
|
||||||
"targetPosition": "left",
|
"targetPosition": "left",
|
||||||
"type": "noteNode",
|
"type": "generateNode"
|
||||||
"width": 209,
|
|
||||||
"dragHandle": ".note-drag-handle"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"form": {
|
"form": {
|
||||||
"text": "Receives the content the user wants to translate and the target language, displays the translation result from the large model."
|
"cite": false,
|
||||||
|
"frequencyPenaltyEnabled": true,
|
||||||
|
"frequency_penalty": 0.7,
|
||||||
|
"llm_id": "deepseek-chat@DeepSeek",
|
||||||
|
"maxTokensEnabled": false,
|
||||||
|
"max_tokens": 256,
|
||||||
|
"message_history_window_size": 1,
|
||||||
|
"parameter": "Precise",
|
||||||
|
"parameters": [],
|
||||||
|
"presencePenaltyEnabled": true,
|
||||||
|
"presence_penalty": 0.4,
|
||||||
|
"prompt": "Your task is to carefully read, then edit, a translation to {begin@lang}, taking into\naccount a list of expert suggestions and constructive criticisms.\n\nThe source text, the initial translation, and the expert linguist suggestions are delimited by XML tags <SOURCE_TEXT></SOURCE_TEXT>, <TRANSLATION></TRANSLATION> and <EXPERT_SUGGESTIONS></EXPERT_SUGGESTIONS>\nas follows:\n\n<SOURCE_TEXT>\n{begin@file}\n</SOURCE_TEXT>\n\n<TRANSLATION>\n{Generate:VastKeysKick}\n</TRANSLATION>\n\n<EXPERT_SUGGESTIONS>\n{Generate:ShinySquidsSneeze}\n</EXPERT_SUGGESTIONS>\n\nPlease take into account the expert suggestions when editing the translation. Edit the translation by ensuring:\n\n(i) accuracy (by correcting errors of addition, mistranslation, omission, or untranslated text),\n(ii) fluency (by applying {begin@lang} grammar, spelling and punctuation rules and ensuring there are no unnecessary repetitions), \n(iii) style (by ensuring the translations reflect the style of the source text)\n(iv) terminology (inappropriate for context, inconsistent use), or\n(v) other errors.\n\nOutput only the new translation and nothing else.",
|
||||||
|
"temperature": 0.1,
|
||||||
|
"temperatureEnabled": true,
|
||||||
|
"topPEnabled": true,
|
||||||
|
"top_p": 0.3
|
||||||
},
|
},
|
||||||
"label": "Note",
|
"label": "Generate",
|
||||||
"name": "N: Interface"
|
"name": "Improve"
|
||||||
},
|
},
|
||||||
"dragging": false,
|
"dragging": false,
|
||||||
"height": 157,
|
"id": "Generate:FuzzyEmusWork",
|
||||||
"id": "Note:WarmDoodlesSwim",
|
"measured": {
|
||||||
|
"height": 106,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
"position": {
|
"position": {
|
||||||
"x": 22.5293807600396,
|
"x": 383.1474420163898,
|
||||||
"y": 267.8448268086032
|
"y": 152.0472805236579
|
||||||
},
|
},
|
||||||
"positionAbsolute": {
|
|
||||||
"x": 22.5293807600396,
|
|
||||||
"y": 267.8448268086032
|
|
||||||
},
|
|
||||||
"resizing": false,
|
|
||||||
"selected": false,
|
"selected": false,
|
||||||
"sourcePosition": "right",
|
"sourcePosition": "right",
|
||||||
"style": {
|
|
||||||
"height": 157,
|
|
||||||
"width": 252
|
|
||||||
},
|
|
||||||
"targetPosition": "left",
|
"targetPosition": "left",
|
||||||
"type": "noteNode",
|
"type": "generateNode"
|
||||||
"width": 252,
|
|
||||||
"dragHandle": ".note-drag-handle"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"edges": [
|
|
||||||
{
|
|
||||||
"id": "reactflow__edge-begin-Answer:ShortPapersShakec",
|
|
||||||
"markerEnd": "logo",
|
|
||||||
"source": "begin",
|
|
||||||
"sourceHandle": null,
|
|
||||||
"style": {
|
|
||||||
"stroke": "rgb(202 197 245)",
|
|
||||||
"strokeWidth": 2
|
|
||||||
},
|
|
||||||
"target": "Answer:ShortPapersShake",
|
|
||||||
"targetHandle": "c",
|
|
||||||
"type": "buttonEdge"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "reactflow__edge-Answer:ShortPapersShakeb-Generate:HeavyForksTellb",
|
|
||||||
"markerEnd": "logo",
|
|
||||||
"source": "Answer:ShortPapersShake",
|
|
||||||
"sourceHandle": "b",
|
|
||||||
"style": {
|
|
||||||
"stroke": "rgb(202 197 245)",
|
|
||||||
"strokeWidth": 2
|
|
||||||
},
|
|
||||||
"target": "Generate:HeavyForksTell",
|
|
||||||
"targetHandle": "b",
|
|
||||||
"type": "buttonEdge"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "reactflow__edge-Generate:HeavyForksTellc-Answer:ShortPapersShakec",
|
|
||||||
"markerEnd": "logo",
|
|
||||||
"source": "Generate:HeavyForksTell",
|
|
||||||
"sourceHandle": "c",
|
|
||||||
"style": {
|
|
||||||
"stroke": "rgb(202 197 245)",
|
|
||||||
"strokeWidth": 2
|
|
||||||
},
|
|
||||||
"target": "Answer:ShortPapersShake",
|
|
||||||
"targetHandle": "c",
|
|
||||||
"type": "buttonEdge"
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
1107
agent/templates/research_report.json
Normal file
1107
agent/templates/research_report.json
Normal file
File diff suppressed because one or more lines are too long
1209
agent/templates/seo_blog.json
Normal file
1209
agent/templates/seo_blog.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -43,6 +43,7 @@ if __name__ == '__main__':
|
|||||||
else:
|
else:
|
||||||
print(ans["content"])
|
print(ans["content"])
|
||||||
|
|
||||||
if DEBUG: print(canvas.path)
|
if DEBUG:
|
||||||
|
print(canvas.path)
|
||||||
question = input("\n==================== User =====================\n> ")
|
question = input("\n==================== User =====================\n> ")
|
||||||
canvas.add_user_input(question)
|
canvas.add_user_input(question)
|
||||||
|
|||||||
@ -1,113 +1,113 @@
|
|||||||
{
|
{
|
||||||
"components": {
|
"components": {
|
||||||
"begin": {
|
"begin": {
|
||||||
"obj":{
|
"obj":{
|
||||||
"component_name": "Begin",
|
"component_name": "Begin",
|
||||||
"params": {
|
"params": {
|
||||||
"prologue": "Hi there!"
|
"prologue": "Hi there!"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"downstream": ["answer:0"],
|
"downstream": ["answer:0"],
|
||||||
"upstream": []
|
"upstream": []
|
||||||
},
|
},
|
||||||
"answer:0": {
|
"answer:0": {
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Answer",
|
"component_name": "Answer",
|
||||||
"params": {}
|
"params": {}
|
||||||
},
|
},
|
||||||
"downstream": ["categorize:0"],
|
"downstream": ["categorize:0"],
|
||||||
"upstream": ["begin"]
|
"upstream": ["begin"]
|
||||||
},
|
},
|
||||||
"categorize:0": {
|
"categorize:0": {
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Categorize",
|
"component_name": "Categorize",
|
||||||
"params": {
|
"params": {
|
||||||
"llm_id": "deepseek-chat",
|
"llm_id": "deepseek-chat",
|
||||||
"category_description": {
|
"category_description": {
|
||||||
"product_related": {
|
"product_related": {
|
||||||
"description": "The question is about the product usage, appearance and how it works.",
|
"description": "The question is about the product usage, appearance and how it works.",
|
||||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
||||||
"to": "concentrator:0"
|
"to": "concentrator:0"
|
||||||
},
|
},
|
||||||
"others": {
|
"others": {
|
||||||
"description": "The question is not about the product usage, appearance and how it works.",
|
"description": "The question is not about the product usage, appearance and how it works.",
|
||||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||||
"to": "concentrator:1"
|
"to": "concentrator:1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"downstream": ["concentrator:0","concentrator:1"],
|
"downstream": ["concentrator:0","concentrator:1"],
|
||||||
"upstream": ["answer:0"]
|
"upstream": ["answer:0"]
|
||||||
},
|
},
|
||||||
"concentrator:0": {
|
"concentrator:0": {
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Concentrator",
|
"component_name": "Concentrator",
|
||||||
"params": {}
|
"params": {}
|
||||||
},
|
},
|
||||||
"downstream": ["message:0"],
|
"downstream": ["message:0"],
|
||||||
"upstream": ["categorize:0"]
|
"upstream": ["categorize:0"]
|
||||||
},
|
},
|
||||||
"concentrator:1": {
|
"concentrator:1": {
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Concentrator",
|
"component_name": "Concentrator",
|
||||||
"params": {}
|
"params": {}
|
||||||
},
|
},
|
||||||
"downstream": ["message:1_0","message:1_1","message:1_2"],
|
"downstream": ["message:1_0","message:1_1","message:1_2"],
|
||||||
"upstream": ["categorize:0"]
|
"upstream": ["categorize:0"]
|
||||||
},
|
},
|
||||||
"message:0": {
|
"message:0": {
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Message",
|
"component_name": "Message",
|
||||||
"params": {
|
"params": {
|
||||||
"messages": [
|
"messages": [
|
||||||
"Message 0_0!!!!!!!"
|
"Message 0_0!!!!!!!"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"downstream": ["answer:0"],
|
"downstream": ["answer:0"],
|
||||||
"upstream": ["concentrator:0"]
|
"upstream": ["concentrator:0"]
|
||||||
},
|
},
|
||||||
"message:1_0": {
|
"message:1_0": {
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Message",
|
"component_name": "Message",
|
||||||
"params": {
|
"params": {
|
||||||
"messages": [
|
"messages": [
|
||||||
"Message 1_0!!!!!!!"
|
"Message 1_0!!!!!!!"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"downstream": ["answer:0"],
|
"downstream": ["answer:0"],
|
||||||
"upstream": ["concentrator:1"]
|
"upstream": ["concentrator:1"]
|
||||||
},
|
},
|
||||||
"message:1_1": {
|
"message:1_1": {
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Message",
|
"component_name": "Message",
|
||||||
"params": {
|
"params": {
|
||||||
"messages": [
|
"messages": [
|
||||||
"Message 1_1!!!!!!!"
|
"Message 1_1!!!!!!!"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"downstream": ["answer:0"],
|
"downstream": ["answer:0"],
|
||||||
"upstream": ["concentrator:1"]
|
"upstream": ["concentrator:1"]
|
||||||
},
|
},
|
||||||
"message:1_2": {
|
"message:1_2": {
|
||||||
"obj": {
|
"obj": {
|
||||||
"component_name": "Message",
|
"component_name": "Message",
|
||||||
"params": {
|
"params": {
|
||||||
"messages": [
|
"messages": [
|
||||||
"Message 1_2!!!!!!!"
|
"Message 1_2!!!!!!!"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"downstream": ["answer:0"],
|
"downstream": ["answer:0"],
|
||||||
"upstream": ["concentrator:1"]
|
"upstream": ["concentrator:1"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"history": [],
|
"history": [],
|
||||||
"messages": [],
|
"messages": [],
|
||||||
"path": [],
|
"path": [],
|
||||||
"reference": [],
|
"reference": [],
|
||||||
"answer": []
|
"answer": []
|
||||||
}
|
}
|
||||||
1
agentic_reasoning/__init__.py
Normal file
1
agentic_reasoning/__init__.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
from .deep_research import DeepResearcher as DeepResearcher
|
||||||
223
agentic_reasoning/deep_research.py
Normal file
223
agentic_reasoning/deep_research.py
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from functools import partial
|
||||||
|
from agentic_reasoning.prompts import BEGIN_SEARCH_QUERY, BEGIN_SEARCH_RESULT, END_SEARCH_RESULT, MAX_SEARCH_LIMIT, \
|
||||||
|
END_SEARCH_QUERY, REASON_PROMPT, RELEVANT_EXTRACTION_PROMPT
|
||||||
|
from api.db.services.llm_service import LLMBundle
|
||||||
|
from rag.nlp import extract_between
|
||||||
|
from rag.prompts import kb_prompt
|
||||||
|
from rag.utils.tavily_conn import Tavily
|
||||||
|
|
||||||
|
|
||||||
|
class DeepResearcher:
|
||||||
|
def __init__(self,
|
||||||
|
chat_mdl: LLMBundle,
|
||||||
|
prompt_config: dict,
|
||||||
|
kb_retrieve: partial = None,
|
||||||
|
kg_retrieve: partial = None
|
||||||
|
):
|
||||||
|
self.chat_mdl = chat_mdl
|
||||||
|
self.prompt_config = prompt_config
|
||||||
|
self._kb_retrieve = kb_retrieve
|
||||||
|
self._kg_retrieve = kg_retrieve
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _remove_query_tags(text):
|
||||||
|
"""Remove query tags from text"""
|
||||||
|
pattern = re.escape(BEGIN_SEARCH_QUERY) + r"(.*?)" + re.escape(END_SEARCH_QUERY)
|
||||||
|
return re.sub(pattern, "", text)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _remove_result_tags(text):
|
||||||
|
"""Remove result tags from text"""
|
||||||
|
pattern = re.escape(BEGIN_SEARCH_RESULT) + r"(.*?)" + re.escape(END_SEARCH_RESULT)
|
||||||
|
return re.sub(pattern, "", text)
|
||||||
|
|
||||||
|
def _generate_reasoning(self, msg_history):
|
||||||
|
"""Generate reasoning steps"""
|
||||||
|
query_think = ""
|
||||||
|
if msg_history[-1]["role"] != "user":
|
||||||
|
msg_history.append({"role": "user", "content": "Continues reasoning with the new information.\n"})
|
||||||
|
else:
|
||||||
|
msg_history[-1]["content"] += "\n\nContinues reasoning with the new information.\n"
|
||||||
|
|
||||||
|
for ans in self.chat_mdl.chat_streamly(REASON_PROMPT, msg_history, {"temperature": 0.7}):
|
||||||
|
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||||
|
if not ans:
|
||||||
|
continue
|
||||||
|
query_think = ans
|
||||||
|
yield query_think
|
||||||
|
return query_think
|
||||||
|
|
||||||
|
def _extract_search_queries(self, query_think, question, step_index):
|
||||||
|
"""Extract search queries from thinking"""
|
||||||
|
queries = extract_between(query_think, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
|
||||||
|
if not queries and step_index == 0:
|
||||||
|
# If this is the first step and no queries are found, use the original question as the query
|
||||||
|
queries = [question]
|
||||||
|
return queries
|
||||||
|
|
||||||
|
def _truncate_previous_reasoning(self, all_reasoning_steps):
|
||||||
|
"""Truncate previous reasoning steps to maintain a reasonable length"""
|
||||||
|
truncated_prev_reasoning = ""
|
||||||
|
for i, step in enumerate(all_reasoning_steps):
|
||||||
|
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
|
||||||
|
|
||||||
|
prev_steps = truncated_prev_reasoning.split('\n\n')
|
||||||
|
if len(prev_steps) <= 5:
|
||||||
|
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
||||||
|
else:
|
||||||
|
truncated_prev_reasoning = ''
|
||||||
|
for i, step in enumerate(prev_steps):
|
||||||
|
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
||||||
|
truncated_prev_reasoning += step + '\n\n'
|
||||||
|
else:
|
||||||
|
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
||||||
|
truncated_prev_reasoning += '...\n\n'
|
||||||
|
|
||||||
|
return truncated_prev_reasoning.strip('\n')
|
||||||
|
|
||||||
|
def _retrieve_information(self, search_query):
|
||||||
|
"""Retrieve information from different sources"""
|
||||||
|
# 1. Knowledge base retrieval
|
||||||
|
kbinfos = self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
|
||||||
|
|
||||||
|
# 2. Web retrieval (if Tavily API is configured)
|
||||||
|
if self.prompt_config.get("tavily_api_key"):
|
||||||
|
tav = Tavily(self.prompt_config["tavily_api_key"])
|
||||||
|
tav_res = tav.retrieve_chunks(search_query)
|
||||||
|
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||||
|
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||||
|
|
||||||
|
# 3. Knowledge graph retrieval (if configured)
|
||||||
|
if self.prompt_config.get("use_kg") and self._kg_retrieve:
|
||||||
|
ck = self._kg_retrieve(question=search_query)
|
||||||
|
if ck["content_with_weight"]:
|
||||||
|
kbinfos["chunks"].insert(0, ck)
|
||||||
|
|
||||||
|
return kbinfos
|
||||||
|
|
||||||
|
def _update_chunk_info(self, chunk_info, kbinfos):
|
||||||
|
"""Update chunk information for citations"""
|
||||||
|
if not chunk_info["chunks"]:
|
||||||
|
# If this is the first retrieval, use the retrieval results directly
|
||||||
|
for k in chunk_info.keys():
|
||||||
|
chunk_info[k] = kbinfos[k]
|
||||||
|
else:
|
||||||
|
# Merge newly retrieved information, avoiding duplicates
|
||||||
|
cids = [c["chunk_id"] for c in chunk_info["chunks"]]
|
||||||
|
for c in kbinfos["chunks"]:
|
||||||
|
if c["chunk_id"] not in cids:
|
||||||
|
chunk_info["chunks"].append(c)
|
||||||
|
|
||||||
|
dids = [d["doc_id"] for d in chunk_info["doc_aggs"]]
|
||||||
|
for d in kbinfos["doc_aggs"]:
|
||||||
|
if d["doc_id"] not in dids:
|
||||||
|
chunk_info["doc_aggs"].append(d)
|
||||||
|
|
||||||
|
def _extract_relevant_info(self, truncated_prev_reasoning, search_query, kbinfos):
|
||||||
|
"""Extract and summarize relevant information"""
|
||||||
|
summary_think = ""
|
||||||
|
for ans in self.chat_mdl.chat_streamly(
|
||||||
|
RELEVANT_EXTRACTION_PROMPT.format(
|
||||||
|
prev_reasoning=truncated_prev_reasoning,
|
||||||
|
search_query=search_query,
|
||||||
|
document="\n".join(kb_prompt(kbinfos, 4096))
|
||||||
|
),
|
||||||
|
[{"role": "user",
|
||||||
|
"content": f'Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.'}],
|
||||||
|
{"temperature": 0.7}):
|
||||||
|
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||||
|
if not ans:
|
||||||
|
continue
|
||||||
|
summary_think = ans
|
||||||
|
yield summary_think
|
||||||
|
|
||||||
|
return summary_think
|
||||||
|
|
||||||
|
def thinking(self, chunk_info: dict, question: str):
|
||||||
|
executed_search_queries = []
|
||||||
|
msg_history = [{"role": "user", "content": f'Question:\"{question}\"\n'}]
|
||||||
|
all_reasoning_steps = []
|
||||||
|
think = "<think>"
|
||||||
|
|
||||||
|
for step_index in range(MAX_SEARCH_LIMIT + 1):
|
||||||
|
# Check if the maximum search limit has been reached
|
||||||
|
if step_index == MAX_SEARCH_LIMIT - 1:
|
||||||
|
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
||||||
|
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
all_reasoning_steps.append(summary_think)
|
||||||
|
msg_history.append({"role": "assistant", "content": summary_think})
|
||||||
|
break
|
||||||
|
|
||||||
|
# Step 1: Generate reasoning
|
||||||
|
query_think = ""
|
||||||
|
for ans in self._generate_reasoning(msg_history):
|
||||||
|
query_think = ans
|
||||||
|
yield {"answer": think + self._remove_query_tags(query_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
|
||||||
|
think += self._remove_query_tags(query_think)
|
||||||
|
all_reasoning_steps.append(query_think)
|
||||||
|
|
||||||
|
# Step 2: Extract search queries
|
||||||
|
queries = self._extract_search_queries(query_think, question, step_index)
|
||||||
|
if not queries and step_index > 0:
|
||||||
|
# If not the first step and no queries, end the search process
|
||||||
|
break
|
||||||
|
|
||||||
|
# Process each search query
|
||||||
|
for search_query in queries:
|
||||||
|
logging.info(f"[THINK]Query: {step_index}. {search_query}")
|
||||||
|
msg_history.append({"role": "assistant", "content": search_query})
|
||||||
|
think += f"\n\n> {step_index + 1}. {search_query}\n\n"
|
||||||
|
yield {"answer": think + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
|
||||||
|
# Check if the query has already been executed
|
||||||
|
if search_query in executed_search_queries:
|
||||||
|
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
||||||
|
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
all_reasoning_steps.append(summary_think)
|
||||||
|
msg_history.append({"role": "user", "content": summary_think})
|
||||||
|
think += summary_think
|
||||||
|
continue
|
||||||
|
|
||||||
|
executed_search_queries.append(search_query)
|
||||||
|
|
||||||
|
# Step 3: Truncate previous reasoning steps
|
||||||
|
truncated_prev_reasoning = self._truncate_previous_reasoning(all_reasoning_steps)
|
||||||
|
|
||||||
|
# Step 4: Retrieve information
|
||||||
|
kbinfos = self._retrieve_information(search_query)
|
||||||
|
|
||||||
|
# Step 5: Update chunk information
|
||||||
|
self._update_chunk_info(chunk_info, kbinfos)
|
||||||
|
|
||||||
|
# Step 6: Extract relevant information
|
||||||
|
think += "\n\n"
|
||||||
|
summary_think = ""
|
||||||
|
for ans in self._extract_relevant_info(truncated_prev_reasoning, search_query, kbinfos):
|
||||||
|
summary_think = ans
|
||||||
|
yield {"answer": think + self._remove_result_tags(summary_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
|
||||||
|
all_reasoning_steps.append(summary_think)
|
||||||
|
msg_history.append(
|
||||||
|
{"role": "user", "content": f"\n\n{BEGIN_SEARCH_RESULT}{summary_think}{END_SEARCH_RESULT}\n\n"})
|
||||||
|
think += self._remove_result_tags(summary_think)
|
||||||
|
logging.info(f"[THINK]Summary: {step_index}. {summary_think}")
|
||||||
|
|
||||||
|
yield think + "</think>"
|
||||||
113
agentic_reasoning/prompts.py
Normal file
113
agentic_reasoning/prompts.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
||||||
|
END_SEARCH_QUERY = "<|end_search_query|>"
|
||||||
|
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
||||||
|
END_SEARCH_RESULT = "<|end_search_result|>"
|
||||||
|
MAX_SEARCH_LIMIT = 6
|
||||||
|
|
||||||
|
REASON_PROMPT = (
|
||||||
|
"You are a reasoning assistant with the ability to perform dataset searches to help "
|
||||||
|
"you answer the user's question accurately. You have special tools:\n\n"
|
||||||
|
f"- To perform a search: write {BEGIN_SEARCH_QUERY} your query here {END_SEARCH_QUERY}.\n"
|
||||||
|
f"Then, the system will search and analyze relevant content, then provide you with helpful information in the format {BEGIN_SEARCH_RESULT} ...search results... {END_SEARCH_RESULT}.\n\n"
|
||||||
|
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
||||||
|
"Once you have all the information you need, continue your reasoning.\n\n"
|
||||||
|
"-- Example 1 --\n" ########################################
|
||||||
|
"Question: \"Are both the directors of Jaws and Casino Royale from the same country?\"\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Who is the director of Jaws?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nThe director of Jaws is Steven Spielberg...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information.\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Where is Steven Spielberg from?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nSteven Allan Spielberg is an American filmmaker...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information...\n\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Who is the director of Casino Royale?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nCasino Royale is a 2006 spy film directed by Martin Campbell...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information...\n\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Where is Martin Campbell from?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nMartin Campbell (born 24 October 1943) is a New Zealand film and television director...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information...\n\n"
|
||||||
|
"Assistant:\nIt's enough to answer the question\n"
|
||||||
|
|
||||||
|
"-- Example 2 --\n" #########################################
|
||||||
|
"Question: \"When was the founder of craigslist born?\"\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Who was the founder of craigslist?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nCraigslist was founded by Craig Newmark...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information.\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY} When was Craig Newmark born?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nCraig Newmark was born on December 6, 1952...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information...\n\n"
|
||||||
|
"Assistant:\nIt's enough to answer the question\n"
|
||||||
|
"**Remember**:\n"
|
||||||
|
f"- You have a dataset to search, so you just provide a proper search query.\n"
|
||||||
|
f"- Use {BEGIN_SEARCH_QUERY} to request a dataset search and end with {END_SEARCH_QUERY}.\n"
|
||||||
|
"- The language of query MUST be as the same as 'Question' or 'search result'.\n"
|
||||||
|
"- If no helpful information can be found, rewrite the search query to be less and precise keywords.\n"
|
||||||
|
"- When done searching, continue your reasoning.\n\n"
|
||||||
|
'Please answer the following question. You should think step by step to solve it.\n\n'
|
||||||
|
)
|
||||||
|
|
||||||
|
RELEVANT_EXTRACTION_PROMPT = """**Task Instruction:**
|
||||||
|
|
||||||
|
You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
|
||||||
|
|
||||||
|
**Guidelines:**
|
||||||
|
|
||||||
|
1. **Analyze the Searched Web Pages:**
|
||||||
|
- Carefully review the content of each searched web page.
|
||||||
|
- Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
|
||||||
|
|
||||||
|
2. **Extract Relevant Information:**
|
||||||
|
- Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
|
||||||
|
- Ensure that the extracted information is accurate and relevant.
|
||||||
|
|
||||||
|
3. **Output Format:**
|
||||||
|
- **If the web pages provide helpful information for current search query:** Present the information beginning with `**Final Information**` as shown below.
|
||||||
|
- The language of query **MUST BE** as the same as 'Search Query' or 'Web Pages'.\n"
|
||||||
|
**Final Information**
|
||||||
|
|
||||||
|
[Helpful information]
|
||||||
|
|
||||||
|
- **If the web pages do not provide any helpful information for current search query:** Output the following text.
|
||||||
|
|
||||||
|
**Final Information**
|
||||||
|
|
||||||
|
No helpful information found.
|
||||||
|
|
||||||
|
**Inputs:**
|
||||||
|
- **Previous Reasoning Steps:**
|
||||||
|
{prev_reasoning}
|
||||||
|
|
||||||
|
- **Current Search Query:**
|
||||||
|
{search_query}
|
||||||
|
|
||||||
|
- **Searched Web Pages:**
|
||||||
|
{document}
|
||||||
|
|
||||||
|
"""
|
||||||
@ -0,0 +1,18 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from beartype.claw import beartype_this_package
|
||||||
|
beartype_this_package()
|
||||||
|
|||||||
@ -13,14 +13,16 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import logging
|
||||||
from importlib.util import module_from_spec, spec_from_file_location
|
from importlib.util import module_from_spec, spec_from_file_location
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from flask import Blueprint, Flask
|
from flask import Blueprint, Flask
|
||||||
from werkzeug.wrappers.request import Request
|
from werkzeug.wrappers.request import Request
|
||||||
from flask_cors import CORS
|
from flask_cors import CORS
|
||||||
|
from flasgger import Swagger
|
||||||
|
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||||
|
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
from api.db.db_models import close_connection
|
from api.db.db_models import close_connection
|
||||||
@ -29,32 +31,60 @@ from api.utils import CustomJSONEncoder, commands
|
|||||||
|
|
||||||
from flask_session import Session
|
from flask_session import Session
|
||||||
from flask_login import LoginManager
|
from flask_login import LoginManager
|
||||||
from api.settings import SECRET_KEY, stat_logger
|
from api import settings
|
||||||
from api.settings import API_VERSION, access_logger
|
|
||||||
from api.utils.api_utils import server_error_response
|
from api.utils.api_utils import server_error_response
|
||||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
from api.constants import API_VERSION
|
||||||
|
|
||||||
__all__ = ['app']
|
__all__ = ["app"]
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('flask.app')
|
|
||||||
for h in access_logger.handlers:
|
|
||||||
logger.addHandler(h)
|
|
||||||
|
|
||||||
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
CORS(app, supports_credentials=True,max_age=2592000)
|
|
||||||
|
# Add this at the beginning of your file to configure Swagger UI
|
||||||
|
swagger_config = {
|
||||||
|
"headers": [],
|
||||||
|
"specs": [
|
||||||
|
{
|
||||||
|
"endpoint": "apispec",
|
||||||
|
"route": "/apispec.json",
|
||||||
|
"rule_filter": lambda rule: True, # Include all endpoints
|
||||||
|
"model_filter": lambda tag: True, # Include all models
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"static_url_path": "/flasgger_static",
|
||||||
|
"swagger_ui": True,
|
||||||
|
"specs_route": "/apidocs/",
|
||||||
|
}
|
||||||
|
|
||||||
|
swagger = Swagger(
|
||||||
|
app,
|
||||||
|
config=swagger_config,
|
||||||
|
template={
|
||||||
|
"swagger": "2.0",
|
||||||
|
"info": {
|
||||||
|
"title": "RAGFlow API",
|
||||||
|
"description": "",
|
||||||
|
"version": "1.0.0",
|
||||||
|
},
|
||||||
|
"securityDefinitions": {
|
||||||
|
"ApiKeyAuth": {"type": "apiKey", "name": "Authorization", "in": "header"}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
CORS(app, supports_credentials=True, max_age=2592000)
|
||||||
app.url_map.strict_slashes = False
|
app.url_map.strict_slashes = False
|
||||||
app.json_encoder = CustomJSONEncoder
|
app.json_encoder = CustomJSONEncoder
|
||||||
app.errorhandler(Exception)(server_error_response)
|
app.errorhandler(Exception)(server_error_response)
|
||||||
|
|
||||||
|
|
||||||
## convince for dev and debug
|
## convince for dev and debug
|
||||||
#app.config["LOGIN_DISABLED"] = True
|
# app.config["LOGIN_DISABLED"] = True
|
||||||
app.config["SESSION_PERMANENT"] = False
|
app.config["SESSION_PERMANENT"] = False
|
||||||
app.config["SESSION_TYPE"] = "filesystem"
|
app.config["SESSION_TYPE"] = "filesystem"
|
||||||
app.config['MAX_CONTENT_LENGTH'] = int(os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024))
|
app.config["MAX_CONTENT_LENGTH"] = int(
|
||||||
|
os.environ.get("MAX_CONTENT_LENGTH", 1024 * 1024 * 1024)
|
||||||
|
)
|
||||||
|
|
||||||
Session(app)
|
Session(app)
|
||||||
login_manager = LoginManager()
|
login_manager = LoginManager()
|
||||||
@ -64,17 +94,23 @@ commands.register_commands(app)
|
|||||||
|
|
||||||
|
|
||||||
def search_pages_path(pages_dir):
|
def search_pages_path(pages_dir):
|
||||||
app_path_list = [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
app_path_list = [
|
||||||
api_path_list = [path for path in pages_dir.glob('*sdk/*.py') if not path.name.startswith('.')]
|
path for path in pages_dir.glob("*_app.py") if not path.name.startswith(".")
|
||||||
|
]
|
||||||
|
api_path_list = [
|
||||||
|
path for path in pages_dir.glob("*sdk/*.py") if not path.name.startswith(".")
|
||||||
|
]
|
||||||
app_path_list.extend(api_path_list)
|
app_path_list.extend(api_path_list)
|
||||||
return app_path_list
|
return app_path_list
|
||||||
|
|
||||||
|
|
||||||
def register_page(page_path):
|
def register_page(page_path):
|
||||||
path = f'{page_path}'
|
path = f"{page_path}"
|
||||||
|
|
||||||
page_name = page_path.stem.rstrip('_app')
|
page_name = page_path.stem.removesuffix("_app")
|
||||||
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name,))
|
module_name = ".".join(
|
||||||
|
page_path.parts[page_path.parts.index("api"): -1] + (page_name,)
|
||||||
|
)
|
||||||
|
|
||||||
spec = spec_from_file_location(module_name, page_path)
|
spec = spec_from_file_location(module_name, page_path)
|
||||||
page = module_from_spec(spec)
|
page = module_from_spec(spec)
|
||||||
@ -82,8 +118,11 @@ def register_page(page_path):
|
|||||||
page.manager = Blueprint(page_name, module_name)
|
page.manager = Blueprint(page_name, module_name)
|
||||||
sys.modules[module_name] = page
|
sys.modules[module_name] = page
|
||||||
spec.loader.exec_module(page)
|
spec.loader.exec_module(page)
|
||||||
page_name = getattr(page, 'page_name', page_name)
|
page_name = getattr(page, "page_name", page_name)
|
||||||
url_prefix = f'/api/{API_VERSION}' if "/sdk/" in path else f'/{API_VERSION}/{page_name}'
|
sdk_path = "\\sdk\\" if sys.platform.startswith("win") else "/sdk/"
|
||||||
|
url_prefix = (
|
||||||
|
f"/api/{API_VERSION}" if sdk_path in path else f"/{API_VERSION}/{page_name}"
|
||||||
|
)
|
||||||
|
|
||||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||||
return url_prefix
|
return url_prefix
|
||||||
@ -91,31 +130,31 @@ def register_page(page_path):
|
|||||||
|
|
||||||
pages_dir = [
|
pages_dir = [
|
||||||
Path(__file__).parent,
|
Path(__file__).parent,
|
||||||
Path(__file__).parent.parent / 'api' / 'apps',
|
Path(__file__).parent.parent / "api" / "apps",
|
||||||
Path(__file__).parent.parent / 'api' / 'apps' / 'sdk',
|
Path(__file__).parent.parent / "api" / "apps" / "sdk",
|
||||||
]
|
]
|
||||||
|
|
||||||
client_urls_prefix = [
|
client_urls_prefix = [
|
||||||
register_page(path)
|
register_page(path) for dir in pages_dir for path in search_pages_path(dir)
|
||||||
for dir in pages_dir
|
|
||||||
for path in search_pages_path(dir)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@login_manager.request_loader
|
@login_manager.request_loader
|
||||||
def load_user(web_request):
|
def load_user(web_request):
|
||||||
jwt = Serializer(secret_key=SECRET_KEY)
|
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||||
authorization = web_request.headers.get("Authorization")
|
authorization = web_request.headers.get("Authorization")
|
||||||
if authorization:
|
if authorization:
|
||||||
try:
|
try:
|
||||||
access_token = str(jwt.loads(authorization))
|
access_token = str(jwt.loads(authorization))
|
||||||
user = UserService.query(access_token=access_token, status=StatusEnum.VALID.value)
|
user = UserService.query(
|
||||||
|
access_token=access_token, status=StatusEnum.VALID.value
|
||||||
|
)
|
||||||
if user:
|
if user:
|
||||||
return user[0]
|
return user[0]
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
stat_logger.exception(e)
|
logging.warning(f"load_user got exception {e}")
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
@ -123,4 +162,4 @@ def load_user(web_request):
|
|||||||
|
|
||||||
@app.teardown_request
|
@app.teardown_request
|
||||||
def _db_close(exc):
|
def _db_close(exc):
|
||||||
close_connection()
|
close_connection()
|
||||||
|
|||||||
@ -21,23 +21,25 @@ from flask import request, Response
|
|||||||
from api.db.services.llm_service import TenantLLMService
|
from api.db.services.llm_service import TenantLLMService
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
from api.db import FileType, LLMType, ParserType, FileSource
|
from api.db import VALID_FILE_TYPES, VALID_TASK_STATUS, FileType, LLMType, ParserType, FileSource
|
||||||
from api.db.db_models import APIToken, Task, File
|
from api.db.db_models import APIToken, Task, File
|
||||||
from api.db.services import duplicate_name
|
from api.db.services import duplicate_name
|
||||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||||
from api.db.services.dialog_service import DialogService, chat, keyword_extraction
|
from api.db.services.dialog_service import DialogService, chat
|
||||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.task_service import queue_tasks, TaskService
|
from api.db.services.task_service import queue_tasks, TaskService
|
||||||
from api.db.services.user_service import UserTenantService
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.settings import RetCode, retrievaler
|
from api import settings
|
||||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request, \
|
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request, \
|
||||||
generate_confirmation_token
|
generate_confirmation_token
|
||||||
|
|
||||||
from api.utils.file_utils import filename_type, thumbnail
|
from api.utils.file_utils import filename_type, thumbnail
|
||||||
|
from rag.app.tag import label_question
|
||||||
|
from rag.prompts import keyword_extraction
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
from api.db.services.canvas_service import UserCanvasService
|
from api.db.services.canvas_service import UserCanvasService
|
||||||
@ -45,14 +47,14 @@ from agent.canvas import Canvas
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/new_token', methods=['POST'])
|
@manager.route('/new_token', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def new_token():
|
def new_token():
|
||||||
req = request.json
|
req = request.json
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
tenant_id = tenants[0].tenant_id
|
tenant_id = tenants[0].tenant_id
|
||||||
obj = {"tenant_id": tenant_id, "token": generate_confirmation_token(tenant_id),
|
obj = {"tenant_id": tenant_id, "token": generate_confirmation_token(tenant_id),
|
||||||
@ -68,20 +70,20 @@ def new_token():
|
|||||||
obj["dialog_id"] = req["dialog_id"]
|
obj["dialog_id"] = req["dialog_id"]
|
||||||
|
|
||||||
if not APITokenService.save(**obj):
|
if not APITokenService.save(**obj):
|
||||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
return get_data_error_result(message="Fail to new a dialog!")
|
||||||
|
|
||||||
return get_json_result(data=obj)
|
return get_json_result(data=obj)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/token_list', methods=['GET'])
|
@manager.route('/token_list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def token_list():
|
def token_list():
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
id = request.args["dialog_id"] if "dialog_id" in request.args else request.args["canvas_id"]
|
id = request.args["dialog_id"] if "dialog_id" in request.args else request.args["canvas_id"]
|
||||||
objs = APITokenService.query(tenant_id=tenants[0].tenant_id, dialog_id=id)
|
objs = APITokenService.query(tenant_id=tenants[0].tenant_id, dialog_id=id)
|
||||||
@ -90,7 +92,7 @@ def token_list():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@validate_request("tokens", "tenant_id")
|
@validate_request("tokens", "tenant_id")
|
||||||
@login_required
|
@login_required
|
||||||
def rm():
|
def rm():
|
||||||
@ -104,13 +106,13 @@ def rm():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/stats', methods=['GET'])
|
@manager.route('/stats', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def stats():
|
def stats():
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
objs = API4ConversationService.stats(
|
objs = API4ConversationService.stats(
|
||||||
tenants[0].tenant_id,
|
tenants[0].tenant_id,
|
||||||
request.args.get(
|
request.args.get(
|
||||||
@ -135,14 +137,13 @@ def stats():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/new_conversation', methods=['GET'])
|
@manager.route('/new_conversation', methods=['GET']) # noqa: F821
|
||||||
def set_conversation():
|
def set_conversation():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
req = request.json
|
|
||||||
try:
|
try:
|
||||||
if objs[0].source == "agent":
|
if objs[0].source == "agent":
|
||||||
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
||||||
@ -163,7 +164,7 @@ def set_conversation():
|
|||||||
else:
|
else:
|
||||||
e, dia = DialogService.get_by_id(objs[0].dialog_id)
|
e, dia = DialogService.get_by_id(objs[0].dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found")
|
return get_data_error_result(message="Dialog not found")
|
||||||
conv = {
|
conv = {
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
"dialog_id": dia.id,
|
"dialog_id": dia.id,
|
||||||
@ -176,19 +177,20 @@ def set_conversation():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion', methods=['POST'])
|
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||||
@validate_request("conversation_id", "messages")
|
@validate_request("conversation_id", "messages")
|
||||||
def completion():
|
def completion():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
req = request.json
|
req = request.json
|
||||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
if "quote" not in req: req["quote"] = False
|
if "quote" not in req:
|
||||||
|
req["quote"] = False
|
||||||
|
|
||||||
msg = []
|
msg = []
|
||||||
for m in req["messages"]:
|
for m in req["messages"]:
|
||||||
@ -197,7 +199,8 @@ def completion():
|
|||||||
if m["role"] == "assistant" and not msg:
|
if m["role"] == "assistant" and not msg:
|
||||||
continue
|
continue
|
||||||
msg.append(m)
|
msg.append(m)
|
||||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
if not msg[-1].get("id"):
|
||||||
|
msg[-1]["id"] = get_uuid()
|
||||||
message_id = msg[-1]["id"]
|
message_id = msg[-1]["id"]
|
||||||
|
|
||||||
def fillin_conv(ans):
|
def fillin_conv(ans):
|
||||||
@ -257,19 +260,20 @@ def completion():
|
|||||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||||
fillin_conv(ans)
|
fillin_conv(ans)
|
||||||
rename_field(ans)
|
rename_field(ans)
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans},
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
|
canvas.history.append(("assistant", final_ans["content"]))
|
||||||
if final_ans.get("reference"):
|
if final_ans.get("reference"):
|
||||||
canvas.reference.append(final_ans["reference"])
|
canvas.reference.append(final_ans["reference"])
|
||||||
cvs.dsl = json.loads(str(canvas))
|
cvs.dsl = json.loads(str(canvas))
|
||||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
resp = Response(sse(), mimetype="text/event-stream")
|
resp = Response(sse(), mimetype="text/event-stream")
|
||||||
resp.headers.add_header("Cache-control", "no-cache")
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
@ -289,12 +293,12 @@ def completion():
|
|||||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
rename_field(result)
|
rename_field(result)
|
||||||
return get_json_result(data=result)
|
return get_json_result(data=result)
|
||||||
|
|
||||||
#******************For dialog******************
|
# ******************For dialog******************
|
||||||
conv.message.append(msg[-1])
|
conv.message.append(msg[-1])
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
del req["conversation_id"]
|
del req["conversation_id"]
|
||||||
del req["messages"]
|
del req["messages"]
|
||||||
|
|
||||||
@ -309,14 +313,14 @@ def completion():
|
|||||||
for ans in chat(dia, msg, True, **req):
|
for ans in chat(dia, msg, True, **req):
|
||||||
fillin_conv(ans)
|
fillin_conv(ans)
|
||||||
rename_field(ans)
|
rename_field(ans)
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans},
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
if req.get("stream", True):
|
if req.get("stream", True):
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
@ -325,7 +329,7 @@ def completion():
|
|||||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
answer = None
|
answer = None
|
||||||
for ans in chat(dia, msg, **req):
|
for ans in chat(dia, msg, **req):
|
||||||
answer = ans
|
answer = ans
|
||||||
@ -339,25 +343,25 @@ def completion():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/conversation/<conversation_id>', methods=['GET'])
|
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def get(conversation_id):
|
def get_conversation(conversation_id):
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, conv = API4ConversationService.get_by_id(conversation_id)
|
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
||||||
return get_json_result(data=False, retmsg='Token is not valid for this conversation_id!"',
|
return get_json_result(data=False, message='Authentication error: API key is invalid for this conversation_id!"',
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR)
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
for referenct_i in conv['reference']:
|
for referenct_i in conv['reference']:
|
||||||
if referenct_i is None or len(referenct_i) == 0:
|
if referenct_i is None or len(referenct_i) == 0:
|
||||||
continue
|
continue
|
||||||
@ -370,14 +374,14 @@ def get(conversation_id):
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/document/upload', methods=['POST'])
|
@manager.route('/document/upload', methods=['POST']) # noqa: F821
|
||||||
@validate_request("kb_name")
|
@validate_request("kb_name")
|
||||||
def upload():
|
def upload():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
kb_name = request.form.get("kb_name").strip()
|
kb_name = request.form.get("kb_name").strip()
|
||||||
tenant_id = objs[0].tenant_id
|
tenant_id = objs[0].tenant_id
|
||||||
@ -386,19 +390,19 @@ def upload():
|
|||||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
kb_id = kb.id
|
kb_id = kb.id
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
if 'file' not in request.files:
|
if 'file' not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
file = request.files['file']
|
file = request.files['file']
|
||||||
if file.filename == '':
|
if file.filename == '':
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
root_folder = FileService.get_root_folder(tenant_id)
|
root_folder = FileService.get_root_folder(tenant_id)
|
||||||
pf_id = root_folder["id"]
|
pf_id = root_folder["id"]
|
||||||
@ -409,7 +413,7 @@ def upload():
|
|||||||
try:
|
try:
|
||||||
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
|
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Exceed the maximum file number of a free user!")
|
message="Exceed the maximum file number of a free user!")
|
||||||
|
|
||||||
filename = duplicate_name(
|
filename = duplicate_name(
|
||||||
DocumentService.query,
|
DocumentService.query,
|
||||||
@ -418,7 +422,7 @@ def upload():
|
|||||||
filetype = filename_type(filename)
|
filetype = filename_type(filename)
|
||||||
if not filetype:
|
if not filetype:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="This type of file has not been supported yet!")
|
message="This type of file has not been supported yet!")
|
||||||
|
|
||||||
location = filename
|
location = filename
|
||||||
while STORAGE_IMPL.obj_exist(kb_id, location):
|
while STORAGE_IMPL.obj_exist(kb_id, location):
|
||||||
@ -467,7 +471,7 @@ def upload():
|
|||||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||||
tenant_id = DocumentService.get_tenant_id(doc["id"])
|
tenant_id = DocumentService.get_tenant_id(doc["id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
# e, doc = DocumentService.get_by_id(doc["id"])
|
# e, doc = DocumentService.get_by_id(doc["id"])
|
||||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||||
@ -475,44 +479,44 @@ def upload():
|
|||||||
doc = doc.to_dict()
|
doc = doc.to_dict()
|
||||||
doc["tenant_id"] = tenant_id
|
doc["tenant_id"] = tenant_id
|
||||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||||
queue_tasks(doc, bucket, name)
|
queue_tasks(doc, bucket, name, 0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
return get_json_result(data=doc_result.to_json())
|
return get_json_result(data=doc_result.to_json())
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/document/upload_and_parse', methods=['POST'])
|
@manager.route('/document/upload_and_parse', methods=['POST']) # noqa: F821
|
||||||
@validate_request("conversation_id")
|
@validate_request("conversation_id")
|
||||||
def upload_parse():
|
def upload_parse():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
if 'file' not in request.files:
|
if 'file' not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
file_objs = request.files.getlist('file')
|
file_objs = request.files.getlist('file')
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
if file_obj.filename == '':
|
if file_obj.filename == '':
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, objs[0].tenant_id)
|
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, objs[0].tenant_id)
|
||||||
return get_json_result(data=doc_ids)
|
return get_json_result(data=doc_ids)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list_chunks', methods=['POST'])
|
@manager.route('/list_chunks', methods=['POST']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def list_chunks():
|
def list_chunks():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
req = request.json
|
req = request.json
|
||||||
|
|
||||||
@ -526,15 +530,16 @@ def list_chunks():
|
|||||||
doc_id = req['doc_id']
|
doc_id = req['doc_id']
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg="Can't find doc_name or doc_id"
|
data=False, message="Can't find doc_name or doc_id"
|
||||||
)
|
)
|
||||||
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||||
|
|
||||||
res = retrievaler.chunk_list(doc_id=doc_id, tenant_id=tenant_id)
|
res = settings.retrievaler.chunk_list(doc_id, tenant_id, kb_ids)
|
||||||
res = [
|
res = [
|
||||||
{
|
{
|
||||||
"content": res_item["content_with_weight"],
|
"content": res_item["content_with_weight"],
|
||||||
"doc_name": res_item["docnm_kwd"],
|
"doc_name": res_item["docnm_kwd"],
|
||||||
"img_id": res_item["img_id"]
|
"image_id": res_item["img_id"]
|
||||||
} for res_item in res
|
} for res_item in res
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -543,15 +548,40 @@ def list_chunks():
|
|||||||
|
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=res)
|
||||||
|
|
||||||
|
@manager.route('/get_chunk/<chunk_id>', methods=['GET']) # noqa: F821
|
||||||
|
# @login_required
|
||||||
|
def get_chunk(chunk_id):
|
||||||
|
from rag.nlp import search
|
||||||
|
token = request.headers.get('Authorization').split()[1]
|
||||||
|
objs = APIToken.query(token=token)
|
||||||
|
if not objs:
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
try:
|
||||||
|
tenant_id = objs[0].tenant_id
|
||||||
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||||
|
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||||
|
if chunk is None:
|
||||||
|
return server_error_response(Exception("Chunk not found"))
|
||||||
|
k = []
|
||||||
|
for n in chunk.keys():
|
||||||
|
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||||
|
k.append(n)
|
||||||
|
for n in k:
|
||||||
|
del chunk[n]
|
||||||
|
|
||||||
@manager.route('/list_kb_docs', methods=['POST'])
|
return get_json_result(data=chunk)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def list_kb_docs():
|
def list_kb_docs():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
req = request.json
|
req = request.json
|
||||||
tenant_id = objs[0].tenant_id
|
tenant_id = objs[0].tenant_id
|
||||||
@ -561,7 +591,7 @@ def list_kb_docs():
|
|||||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
kb_id = kb.id
|
kb_id = kb.id
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -572,10 +602,23 @@ def list_kb_docs():
|
|||||||
orderby = req.get("orderby", "create_time")
|
orderby = req.get("orderby", "create_time")
|
||||||
desc = req.get("desc", True)
|
desc = req.get("desc", True)
|
||||||
keywords = req.get("keywords", "")
|
keywords = req.get("keywords", "")
|
||||||
|
status = req.get("status", [])
|
||||||
|
if status:
|
||||||
|
invalid_status = {s for s in status if s not in VALID_TASK_STATUS}
|
||||||
|
if invalid_status:
|
||||||
|
return get_data_error_result(
|
||||||
|
message=f"Invalid filter status conditions: {', '.join(invalid_status)}"
|
||||||
|
)
|
||||||
|
types = req.get("types", [])
|
||||||
|
if types:
|
||||||
|
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||||
|
if invalid_types:
|
||||||
|
return get_data_error_result(
|
||||||
|
message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}"
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
docs, tol = DocumentService.get_by_kb_id(
|
docs, tol = DocumentService.get_by_kb_id(
|
||||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
kb_id, page_number, items_per_page, orderby, desc, keywords, status, types)
|
||||||
docs = [{"doc_id": doc['id'], "doc_name": doc['name']} for doc in docs]
|
docs = [{"doc_id": doc['id'], "doc_name": doc['name']} for doc in docs]
|
||||||
|
|
||||||
return get_json_result(data={"total": tol, "docs": docs})
|
return get_json_result(data={"total": tol, "docs": docs})
|
||||||
@ -583,40 +626,41 @@ def list_kb_docs():
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
@manager.route('/document/infos', methods=['POST'])
|
|
||||||
|
@manager.route('/document/infos', methods=['POST']) # noqa: F821
|
||||||
@validate_request("doc_ids")
|
@validate_request("doc_ids")
|
||||||
def docinfos():
|
def docinfos():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
req = request.json
|
req = request.json
|
||||||
doc_ids = req["doc_ids"]
|
doc_ids = req["doc_ids"]
|
||||||
docs = DocumentService.get_by_ids(doc_ids)
|
docs = DocumentService.get_by_ids(doc_ids)
|
||||||
return get_json_result(data=list(docs.dicts()))
|
return get_json_result(data=list(docs.dicts()))
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/document', methods=['DELETE'])
|
@manager.route('/document', methods=['DELETE']) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def document_rm():
|
def document_rm():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
tenant_id = objs[0].tenant_id
|
tenant_id = objs[0].tenant_id
|
||||||
req = request.json
|
req = request.json
|
||||||
try:
|
try:
|
||||||
doc_ids = [DocumentService.get_doc_id_by_doc_name(doc_name) for doc_name in req.get("doc_names", [])]
|
doc_ids = DocumentService.get_doc_ids_by_doc_names(req.get("doc_names", []))
|
||||||
for doc_id in req.get("doc_ids", []):
|
for doc_id in req.get("doc_ids", []):
|
||||||
if doc_id not in doc_ids:
|
if doc_id not in doc_ids:
|
||||||
doc_ids.append(doc_id)
|
doc_ids.append(doc_id)
|
||||||
|
|
||||||
if not doc_ids:
|
if not doc_ids:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg="Can't find doc_names or doc_ids"
|
data=False, message="Can't find doc_names or doc_ids"
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -627,20 +671,25 @@ def document_rm():
|
|||||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||||
|
|
||||||
errors = ""
|
errors = ""
|
||||||
|
docs = DocumentService.get_by_ids(doc_ids)
|
||||||
|
doc_dic = {}
|
||||||
|
for doc in docs:
|
||||||
|
doc_dic[doc.id] = doc
|
||||||
|
|
||||||
for doc_id in doc_ids:
|
for doc_id in doc_ids:
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
if doc_id not in doc_dic:
|
||||||
if not e:
|
return get_data_error_result(message="Document not found!")
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
doc = doc_dic[doc_id]
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||||
|
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
|
|
||||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
@ -651,12 +700,12 @@ def document_rm():
|
|||||||
errors += str(e)
|
errors += str(e)
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
return get_json_result(data=False, message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion_aibotk', methods=['POST'])
|
@manager.route('/completion_aibotk', methods=['POST']) # noqa: F821
|
||||||
@validate_request("Authorization", "conversation_id", "word")
|
@validate_request("Authorization", "conversation_id", "word")
|
||||||
def completion_faq():
|
def completion_faq():
|
||||||
import base64
|
import base64
|
||||||
@ -666,16 +715,18 @@ def completion_faq():
|
|||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
if "quote" not in req: req["quote"] = True
|
if "quote" not in req:
|
||||||
|
req["quote"] = True
|
||||||
|
|
||||||
msg = []
|
msg = []
|
||||||
msg.append({"role": "user", "content": req["word"]})
|
msg.append({"role": "user", "content": req["word"]})
|
||||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
if not msg[-1].get("id"):
|
||||||
|
msg[-1]["id"] = get_uuid()
|
||||||
message_id = msg[-1]["id"]
|
message_id = msg[-1]["id"]
|
||||||
|
|
||||||
def fillin_conv(ans):
|
def fillin_conv(ans):
|
||||||
@ -751,7 +802,7 @@ def completion_faq():
|
|||||||
conv.message.append(msg[-1])
|
conv.message.append(msg[-1])
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
del req["conversation_id"]
|
del req["conversation_id"]
|
||||||
|
|
||||||
if not conv.reference:
|
if not conv.reference:
|
||||||
@ -796,50 +847,52 @@ def completion_faq():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/retrieval', methods=['POST'])
|
@manager.route('/retrieval', methods=['POST']) # noqa: F821
|
||||||
@validate_request("kb_id", "question")
|
@validate_request("kb_id", "question")
|
||||||
def retrieval():
|
def retrieval():
|
||||||
token = request.headers.get('Authorization').split()[1]
|
token = request.headers.get('Authorization').split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Token is not valid!"', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
req = request.json
|
req = request.json
|
||||||
kb_ids = req.get("kb_id",[])
|
kb_ids = req.get("kb_id", [])
|
||||||
doc_ids = req.get("doc_ids", [])
|
doc_ids = req.get("doc_ids", [])
|
||||||
question = req.get("question")
|
question = req.get("question")
|
||||||
page = int(req.get("page", 1))
|
page = int(req.get("page", 1))
|
||||||
size = int(req.get("size", 30))
|
size = int(req.get("page_size", 30))
|
||||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
||||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||||
top = int(req.get("top_k", 1024))
|
top = int(req.get("top_k", 1024))
|
||||||
|
highlight = bool(req.get("highlight", False))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||||
if len(embd_nms) != 1:
|
if len(embd_nms) != 1:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Knowledge bases use different embedding models or does not exist."', retcode=RetCode.AUTHENTICATION_ERROR)
|
data=False, message='Knowledge bases use different embedding models or does not exist."',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
|
||||||
embd_mdl = TenantLLMService.model_instance(
|
embd_mdl = TenantLLMService.model_instance(
|
||||||
kbs[0].tenant_id, LLMType.EMBEDDING.value, llm_name=kbs[0].embd_id)
|
kbs[0].tenant_id, LLMType.EMBEDDING.value, llm_name=kbs[0].embd_id)
|
||||||
rerank_mdl = None
|
rerank_mdl = None
|
||||||
if req.get("rerank_id"):
|
if req.get("rerank_id"):
|
||||||
rerank_mdl = TenantLLMService.model_instance(
|
rerank_mdl = TenantLLMService.model_instance(
|
||||||
kbs[0].tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
kbs[0].tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||||
if req.get("keyword", False):
|
if req.get("keyword", False):
|
||||||
chat_mdl = TenantLLMService.model_instance(kbs[0].tenant_id, LLMType.CHAT)
|
chat_mdl = TenantLLMService.model_instance(kbs[0].tenant_id, LLMType.CHAT)
|
||||||
question += keyword_extraction(chat_mdl, question)
|
question += keyword_extraction(chat_mdl, question)
|
||||||
ranks = retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
ranks = settings.retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||||
similarity_threshold, vector_similarity_weight, top,
|
similarity_threshold, vector_similarity_weight, top,
|
||||||
doc_ids, rerank_mdl=rerank_mdl)
|
doc_ids, rerank_mdl=rerank_mdl, highlight= highlight,
|
||||||
|
rank_feature=label_question(question, kbs))
|
||||||
for c in ranks["chunks"]:
|
for c in ranks["chunks"]:
|
||||||
if "vector" in c:
|
c.pop("vector", None)
|
||||||
del c["vector"]
|
|
||||||
return get_json_result(data=ranks)
|
return get_json_result(data=ranks)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e).find("not_found") > 0:
|
if str(e).find("not_found") > 0:
|
||||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||||
retcode=RetCode.DATA_ERROR)
|
code=settings.RetCode.DATA_ERROR)
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|||||||
76
api/apps/auth/README.md
Normal file
76
api/apps/auth/README.md
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# Auth
|
||||||
|
|
||||||
|
The Auth module provides implementations of OAuth2 and OpenID Connect (OIDC) authentication for integration with third-party identity providers.
|
||||||
|
|
||||||
|
**Features**
|
||||||
|
|
||||||
|
- Supports both OAuth2 and OIDC authentication protocols
|
||||||
|
- Automatic OIDC configuration discovery (via `/.well-known/openid-configuration`)
|
||||||
|
- JWT token validation
|
||||||
|
- Unified user information handling
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```python
|
||||||
|
# OAuth2 configuration
|
||||||
|
oauth_config = {
|
||||||
|
"type": "oauth2",
|
||||||
|
"client_id": "your_client_id",
|
||||||
|
"client_secret": "your_client_secret",
|
||||||
|
"authorization_url": "https://your-oauth-provider.com/oauth/authorize",
|
||||||
|
"token_url": "https://your-oauth-provider.com/oauth/token",
|
||||||
|
"userinfo_url": "https://your-oauth-provider.com/oauth/userinfo",
|
||||||
|
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||||
|
}
|
||||||
|
|
||||||
|
# OIDC configuration
|
||||||
|
oidc_config = {
|
||||||
|
"type": "oidc",
|
||||||
|
"issuer": "https://your-oauth-provider.com/oidc",
|
||||||
|
"client_id": "your_client_id",
|
||||||
|
"client_secret": "your_client_secret",
|
||||||
|
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Github OAuth configuration
|
||||||
|
github_config = {
|
||||||
|
"type": "github"
|
||||||
|
"client_id": "your_client_id",
|
||||||
|
"client_secret": "your_client_secret",
|
||||||
|
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get client instance
|
||||||
|
client = get_auth_client(oauth_config)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Authentication Flow
|
||||||
|
|
||||||
|
1. Get authorization URL:
|
||||||
|
```python
|
||||||
|
auth_url = client.get_authorization_url()
|
||||||
|
```
|
||||||
|
|
||||||
|
2. After user authorization, exchange authorization code for token:
|
||||||
|
```python
|
||||||
|
token_response = client.exchange_code_for_token(authorization_code)
|
||||||
|
access_token = token_response["access_token"]
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Fetch user information:
|
||||||
|
```python
|
||||||
|
user_info = client.fetch_user_info(access_token)
|
||||||
|
```
|
||||||
|
|
||||||
|
## User Information Structure
|
||||||
|
|
||||||
|
All authentication methods return user information following this structure:
|
||||||
|
|
||||||
|
```python
|
||||||
|
{
|
||||||
|
"email": "user@example.com",
|
||||||
|
"username": "username",
|
||||||
|
"nickname": "User Name",
|
||||||
|
"avatar_url": "https://example.com/avatar.jpg"
|
||||||
|
}
|
||||||
|
```
|
||||||
40
api/apps/auth/__init__.py
Normal file
40
api/apps/auth/__init__.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from .oauth import OAuthClient
|
||||||
|
from .oidc import OIDCClient
|
||||||
|
from .github import GithubOAuthClient
|
||||||
|
|
||||||
|
|
||||||
|
CLIENT_TYPES = {
|
||||||
|
"oauth2": OAuthClient,
|
||||||
|
"oidc": OIDCClient,
|
||||||
|
"github": GithubOAuthClient
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_auth_client(config)->OAuthClient:
|
||||||
|
channel_type = str(config.get("type", "")).lower()
|
||||||
|
if channel_type == "":
|
||||||
|
if config.get("issuer"):
|
||||||
|
channel_type = "oidc"
|
||||||
|
else:
|
||||||
|
channel_type = "oauth2"
|
||||||
|
client_class = CLIENT_TYPES.get(channel_type)
|
||||||
|
if not client_class:
|
||||||
|
raise ValueError(f"Unsupported type: {channel_type}")
|
||||||
|
|
||||||
|
return client_class(config)
|
||||||
63
api/apps/auth/github.py
Normal file
63
api/apps/auth/github.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from .oauth import OAuthClient, UserInfo
|
||||||
|
|
||||||
|
|
||||||
|
class GithubOAuthClient(OAuthClient):
|
||||||
|
def __init__(self, config):
|
||||||
|
"""
|
||||||
|
Initialize the GithubOAuthClient with the provider's configuration.
|
||||||
|
"""
|
||||||
|
config.update({
|
||||||
|
"authorization_url": "https://github.com/login/oauth/authorize",
|
||||||
|
"token_url": "https://github.com/login/oauth/access_token",
|
||||||
|
"userinfo_url": "https://api.github.com/user",
|
||||||
|
"scope": "user:email"
|
||||||
|
})
|
||||||
|
super().__init__(config)
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_user_info(self, access_token, **kwargs):
|
||||||
|
"""
|
||||||
|
Fetch github user info.
|
||||||
|
"""
|
||||||
|
user_info = {}
|
||||||
|
try:
|
||||||
|
headers = {"Authorization": f"Bearer {access_token}"}
|
||||||
|
# user info
|
||||||
|
response = requests.get(self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
|
||||||
|
response.raise_for_status()
|
||||||
|
user_info.update(response.json())
|
||||||
|
# email info
|
||||||
|
response = requests.get(self.userinfo_url+"/emails", headers=headers, timeout=self.http_request_timeout)
|
||||||
|
response.raise_for_status()
|
||||||
|
email_info = response.json()
|
||||||
|
user_info["email"] = next(
|
||||||
|
(email for email in email_info if email["primary"]), None
|
||||||
|
)["email"]
|
||||||
|
return self.normalize_user_info(user_info)
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
raise ValueError(f"Failed to fetch github user info: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_user_info(self, user_info):
|
||||||
|
email = user_info.get("email")
|
||||||
|
username = user_info.get("login", str(email).split("@")[0])
|
||||||
|
nickname = user_info.get("name", username)
|
||||||
|
avatar_url = user_info.get("avatar_url", "")
|
||||||
|
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
|
||||||
110
api/apps/auth/oauth.py
Normal file
110
api/apps/auth/oauth.py
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
|
|
||||||
|
class UserInfo:
|
||||||
|
def __init__(self, email, username, nickname, avatar_url):
|
||||||
|
self.email = email
|
||||||
|
self.username = username
|
||||||
|
self.nickname = nickname
|
||||||
|
self.avatar_url = avatar_url
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {key: value for key, value in self.__dict__.items()}
|
||||||
|
|
||||||
|
|
||||||
|
class OAuthClient:
|
||||||
|
def __init__(self, config):
|
||||||
|
"""
|
||||||
|
Initialize the OAuthClient with the provider's configuration.
|
||||||
|
"""
|
||||||
|
self.client_id = config["client_id"]
|
||||||
|
self.client_secret = config["client_secret"]
|
||||||
|
self.authorization_url = config["authorization_url"]
|
||||||
|
self.token_url = config["token_url"]
|
||||||
|
self.userinfo_url = config["userinfo_url"]
|
||||||
|
self.redirect_uri = config["redirect_uri"]
|
||||||
|
self.scope = config.get("scope", None)
|
||||||
|
|
||||||
|
self.http_request_timeout = 7
|
||||||
|
|
||||||
|
|
||||||
|
def get_authorization_url(self, state=None):
|
||||||
|
"""
|
||||||
|
Generate the authorization URL for user login.
|
||||||
|
"""
|
||||||
|
params = {
|
||||||
|
"client_id": self.client_id,
|
||||||
|
"redirect_uri": self.redirect_uri,
|
||||||
|
"response_type": "code",
|
||||||
|
}
|
||||||
|
if self.scope:
|
||||||
|
params["scope"] = self.scope
|
||||||
|
if state:
|
||||||
|
params["state"] = state
|
||||||
|
authorization_url = f"{self.authorization_url}?{urllib.parse.urlencode(params)}"
|
||||||
|
return authorization_url
|
||||||
|
|
||||||
|
|
||||||
|
def exchange_code_for_token(self, code):
|
||||||
|
"""
|
||||||
|
Exchange authorization code for access token.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
payload = {
|
||||||
|
"client_id": self.client_id,
|
||||||
|
"client_secret": self.client_secret,
|
||||||
|
"code": code,
|
||||||
|
"redirect_uri": self.redirect_uri,
|
||||||
|
"grant_type": "authorization_code"
|
||||||
|
}
|
||||||
|
response = requests.post(
|
||||||
|
self.token_url,
|
||||||
|
data=payload,
|
||||||
|
headers={"Accept": "application/json"},
|
||||||
|
timeout=self.http_request_timeout
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
raise ValueError(f"Failed to exchange authorization code for token: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_user_info(self, access_token, **kwargs):
|
||||||
|
"""
|
||||||
|
Fetch user information using access token.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
headers = {"Authorization": f"Bearer {access_token}"}
|
||||||
|
response = requests.get(self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
|
||||||
|
response.raise_for_status()
|
||||||
|
user_info = response.json()
|
||||||
|
return self.normalize_user_info(user_info)
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
raise ValueError(f"Failed to fetch user info: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_user_info(self, user_info):
|
||||||
|
email = user_info.get("email")
|
||||||
|
username = user_info.get("username", str(email).split("@")[0])
|
||||||
|
nickname = user_info.get("nickname", username)
|
||||||
|
avatar_url = user_info.get("avatar_url", None)
|
||||||
|
if avatar_url is None:
|
||||||
|
avatar_url = user_info.get("picture", "")
|
||||||
|
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
|
||||||
100
api/apps/auth/oidc.py
Normal file
100
api/apps/auth/oidc.py
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import jwt
|
||||||
|
import requests
|
||||||
|
from .oauth import OAuthClient
|
||||||
|
|
||||||
|
|
||||||
|
class OIDCClient(OAuthClient):
|
||||||
|
def __init__(self, config):
|
||||||
|
"""
|
||||||
|
Initialize the OIDCClient with the provider's configuration.
|
||||||
|
Use `issuer` as the single source of truth for configuration discovery.
|
||||||
|
"""
|
||||||
|
self.issuer = config.get("issuer")
|
||||||
|
if not self.issuer:
|
||||||
|
raise ValueError("Missing issuer in configuration.")
|
||||||
|
|
||||||
|
oidc_metadata = self._load_oidc_metadata(self.issuer)
|
||||||
|
config.update({
|
||||||
|
'issuer': oidc_metadata['issuer'],
|
||||||
|
'jwks_uri': oidc_metadata['jwks_uri'],
|
||||||
|
'authorization_url': oidc_metadata['authorization_endpoint'],
|
||||||
|
'token_url': oidc_metadata['token_endpoint'],
|
||||||
|
'userinfo_url': oidc_metadata['userinfo_endpoint']
|
||||||
|
})
|
||||||
|
|
||||||
|
super().__init__(config)
|
||||||
|
self.issuer = config['issuer']
|
||||||
|
self.jwks_uri = config['jwks_uri']
|
||||||
|
|
||||||
|
|
||||||
|
def _load_oidc_metadata(self, issuer):
|
||||||
|
"""
|
||||||
|
Load OIDC metadata from `/.well-known/openid-configuration`.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
metadata_url = f"{issuer}/.well-known/openid-configuration"
|
||||||
|
response = requests.get(metadata_url, timeout=7)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
raise ValueError(f"Failed to fetch OIDC metadata: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_id_token(self, id_token):
|
||||||
|
"""
|
||||||
|
Parse and validate OIDC ID Token (JWT format) with signature verification.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Decode JWT header without verifying signature
|
||||||
|
headers = jwt.get_unverified_header(id_token)
|
||||||
|
|
||||||
|
# OIDC usually uses `RS256` for signing
|
||||||
|
alg = headers.get("alg", "RS256")
|
||||||
|
|
||||||
|
# Use PyJWT's PyJWKClient to fetch JWKS and find signing key
|
||||||
|
jwks_url = f"{self.issuer}/.well-known/jwks.json"
|
||||||
|
jwks_cli = jwt.PyJWKClient(jwks_url)
|
||||||
|
signing_key = jwks_cli.get_signing_key_from_jwt(id_token).key
|
||||||
|
|
||||||
|
# Decode and verify signature
|
||||||
|
decoded_token = jwt.decode(
|
||||||
|
id_token,
|
||||||
|
key=signing_key,
|
||||||
|
algorithms=[alg],
|
||||||
|
audience=str(self.client_id),
|
||||||
|
issuer=self.issuer,
|
||||||
|
)
|
||||||
|
return decoded_token
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Error parsing ID Token: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_user_info(self, access_token, id_token=None, **kwargs):
|
||||||
|
"""
|
||||||
|
Fetch user info.
|
||||||
|
"""
|
||||||
|
user_info = {}
|
||||||
|
if id_token:
|
||||||
|
user_info = self.parse_id_token(id_token)
|
||||||
|
user_info.update(super().fetch_user_info(access_token).to_dict())
|
||||||
|
return self.normalize_user_info(user_info)
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_user_info(self, user_info):
|
||||||
|
return super().normalize_user_info(user_info)
|
||||||
@ -14,24 +14,27 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
import json
|
||||||
from functools import partial
|
import traceback
|
||||||
from flask import request, Response
|
from flask import request, Response
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||||
from api.settings import RetCode
|
from api.settings import RetCode
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||||
from agent.canvas import Canvas
|
from agent.canvas import Canvas
|
||||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||||
|
from api.db.db_models import APIToken
|
||||||
|
import time
|
||||||
|
|
||||||
|
@manager.route('/templates', methods=['GET']) # noqa: F821
|
||||||
@manager.route('/templates', methods=['GET'])
|
|
||||||
@login_required
|
@login_required
|
||||||
def templates():
|
def templates():
|
||||||
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.get_all()])
|
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.get_all()])
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def canvas_list():
|
def canvas_list():
|
||||||
return get_json_result(data=sorted([c.to_dict() for c in \
|
return get_json_result(data=sorted([c.to_dict() for c in \
|
||||||
@ -39,65 +42,85 @@ def canvas_list():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@validate_request("canvas_ids")
|
@validate_request("canvas_ids")
|
||||||
@login_required
|
@login_required
|
||||||
def rm():
|
def rm():
|
||||||
for i in request.json["canvas_ids"]:
|
for i in request.json["canvas_ids"]:
|
||||||
if not UserCanvasService.query(user_id=current_user.id,id=i):
|
if not UserCanvasService.query(user_id=current_user.id,id=i):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
UserCanvasService.delete_by_id(i)
|
UserCanvasService.delete_by_id(i)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST'])
|
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||||
@validate_request("dsl", "title")
|
@validate_request("dsl", "title")
|
||||||
@login_required
|
@login_required
|
||||||
def save():
|
def save():
|
||||||
req = request.json
|
req = request.json
|
||||||
req["user_id"] = current_user.id
|
req["user_id"] = current_user.id
|
||||||
if not isinstance(req["dsl"], str): req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
if not isinstance(req["dsl"], str):
|
||||||
|
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||||
req["dsl"] = json.loads(req["dsl"])
|
req["dsl"] = json.loads(req["dsl"])
|
||||||
if "id" not in req:
|
if "id" not in req:
|
||||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||||
return server_error_response(ValueError("Duplicated title."))
|
return get_data_error_result(message=f"{req['title'].strip()} already exists.")
|
||||||
req["id"] = get_uuid()
|
req["id"] = get_uuid()
|
||||||
if not UserCanvasService.save(**req):
|
if not UserCanvasService.save(**req):
|
||||||
return get_data_error_result(retmsg="Fail to save canvas.")
|
return get_data_error_result(message="Fail to save canvas.")
|
||||||
else:
|
else:
|
||||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
UserCanvasService.update_by_id(req["id"], req)
|
UserCanvasService.update_by_id(req["id"], req)
|
||||||
|
# save version
|
||||||
|
UserCanvasVersionService.insert( user_canvas_id=req["id"], dsl=req["dsl"], title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")))
|
||||||
|
UserCanvasVersionService.delete_all_versions(req["id"])
|
||||||
return get_json_result(data=req)
|
return get_json_result(data=req)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get/<canvas_id>', methods=['GET'])
|
|
||||||
|
@manager.route('/get/<canvas_id>', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get(canvas_id):
|
def get(canvas_id):
|
||||||
|
e, c = UserCanvasService.get_by_tenant_id(canvas_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="canvas not found.")
|
||||||
|
return get_json_result(data=c)
|
||||||
|
|
||||||
|
@manager.route('/getsse/<canvas_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||||
|
def getsse(canvas_id):
|
||||||
|
token = request.headers.get('Authorization').split()
|
||||||
|
if len(token) != 2:
|
||||||
|
return get_data_error_result(message='Authorization is not valid!"')
|
||||||
|
token = token[1]
|
||||||
|
objs = APIToken.query(beta=token)
|
||||||
|
if not objs:
|
||||||
|
return get_data_error_result(message='Authentication error: API key is invalid!"')
|
||||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="canvas not found.")
|
return get_data_error_result(message="canvas not found.")
|
||||||
return get_json_result(data=c.to_dict())
|
return get_json_result(data=c.to_dict())
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion', methods=['POST'])
|
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||||
@validate_request("id")
|
@validate_request("id")
|
||||||
@login_required
|
@login_required
|
||||||
def run():
|
def run():
|
||||||
req = request.json
|
req = request.json
|
||||||
stream = req.get("stream", True)
|
stream = req.get("stream", True)
|
||||||
|
running_hint_text = req.get("running_hint_text", "")
|
||||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="canvas not found.")
|
return get_data_error_result(message="canvas not found.")
|
||||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
if not isinstance(cvs.dsl, str):
|
if not isinstance(cvs.dsl, str):
|
||||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||||
@ -108,40 +131,44 @@ def run():
|
|||||||
canvas = Canvas(cvs.dsl, current_user.id)
|
canvas = Canvas(cvs.dsl, current_user.id)
|
||||||
if "message" in req:
|
if "message" in req:
|
||||||
canvas.messages.append({"role": "user", "content": req["message"], "id": message_id})
|
canvas.messages.append({"role": "user", "content": req["message"], "id": message_id})
|
||||||
if len([m for m in canvas.messages if m["role"] == "user"]) > 1:
|
|
||||||
#ten = TenantService.get_info_by(current_user.id)[0]
|
|
||||||
#req["message"] = full_question(ten["tenant_id"], ten["llm_id"], canvas.messages)
|
|
||||||
pass
|
|
||||||
canvas.add_user_input(req["message"])
|
canvas.add_user_input(req["message"])
|
||||||
answer = canvas.run(stream=stream)
|
|
||||||
print(canvas)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
assert answer is not None, "Nothing. Is it over?"
|
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
assert isinstance(answer, partial), "Nothing. Is it over?"
|
|
||||||
|
|
||||||
def sse():
|
def sse():
|
||||||
nonlocal answer, cvs
|
nonlocal answer, cvs
|
||||||
try:
|
try:
|
||||||
for ans in answer():
|
for ans in canvas.run(running_hint_text = running_hint_text, stream=True):
|
||||||
|
if ans.get("running_status"):
|
||||||
|
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||||
|
"data": {"answer": ans["content"],
|
||||||
|
"running_status": True}},
|
||||||
|
ensure_ascii=False) + "\n\n"
|
||||||
|
continue
|
||||||
for k in ans.keys():
|
for k in ans.keys():
|
||||||
final_ans[k] = ans[k]
|
final_ans[k] = ans[k]
|
||||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
|
canvas.history.append(("assistant", final_ans["content"]))
|
||||||
|
if not canvas.path[-1]:
|
||||||
|
canvas.path.pop(-1)
|
||||||
if final_ans.get("reference"):
|
if final_ans.get("reference"):
|
||||||
canvas.reference.append(final_ans["reference"])
|
canvas.reference.append(final_ans["reference"])
|
||||||
cvs.dsl = json.loads(str(canvas))
|
cvs.dsl = json.loads(str(canvas))
|
||||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
cvs.dsl = json.loads(str(canvas))
|
||||||
|
if not canvas.path[-1]:
|
||||||
|
canvas.path.pop(-1)
|
||||||
|
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||||
|
traceback.print_exc()
|
||||||
|
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
resp = Response(sse(), mimetype="text/event-stream")
|
resp = Response(sse(), mimetype="text/event-stream")
|
||||||
resp.headers.add_header("Cache-control", "no-cache")
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
@ -150,16 +177,19 @@ def run():
|
|||||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
for answer in canvas.run(running_hint_text = running_hint_text, stream=False):
|
||||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
if answer.get("running_status"):
|
||||||
if final_ans.get("reference"):
|
continue
|
||||||
canvas.reference.append(final_ans["reference"])
|
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||||
cvs.dsl = json.loads(str(canvas))
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
if final_ans.get("reference"):
|
||||||
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
canvas.reference.append(final_ans["reference"])
|
||||||
|
cvs.dsl = json.loads(str(canvas))
|
||||||
|
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||||
|
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/reset', methods=['POST'])
|
@manager.route('/reset', methods=['POST']) # noqa: F821
|
||||||
@validate_request("id")
|
@validate_request("id")
|
||||||
@login_required
|
@login_required
|
||||||
def reset():
|
def reset():
|
||||||
@ -167,11 +197,11 @@ def reset():
|
|||||||
try:
|
try:
|
||||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="canvas not found.")
|
return get_data_error_result(message="canvas not found.")
|
||||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||||
canvas.reset()
|
canvas.reset()
|
||||||
@ -182,7 +212,51 @@ def reset():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/test_db_connect', methods=['POST'])
|
@manager.route('/input_elements', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def input_elements():
|
||||||
|
cvs_id = request.args.get("id")
|
||||||
|
cpn_id = request.args.get("component_id")
|
||||||
|
try:
|
||||||
|
e, user_canvas = UserCanvasService.get_by_id(cvs_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="canvas not found.")
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=cvs_id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||||
|
return get_json_result(data=canvas.get_component_input_elements(cpn_id))
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/debug', methods=['POST']) # noqa: F821
|
||||||
|
@validate_request("id", "component_id", "params")
|
||||||
|
@login_required
|
||||||
|
def debug():
|
||||||
|
req = request.json
|
||||||
|
for p in req["params"]:
|
||||||
|
assert p.get("key")
|
||||||
|
try:
|
||||||
|
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="canvas not found.")
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||||
|
canvas.get_component(req["component_id"])["obj"]._param.debug_inputs = req["params"]
|
||||||
|
df = canvas.get_component(req["component_id"])["obj"].debug()
|
||||||
|
return get_json_result(data=df.to_dict(orient="records"))
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/test_db_connect', methods=['POST']) # noqa: F821
|
||||||
@validate_request("db_type", "database", "username", "host", "port", "password")
|
@validate_request("db_type", "database", "username", "host", "port", "password")
|
||||||
@login_required
|
@login_required
|
||||||
def test_db_connect():
|
def test_db_connect():
|
||||||
@ -194,8 +268,84 @@ def test_db_connect():
|
|||||||
elif req["db_type"] == 'postgresql':
|
elif req["db_type"] == 'postgresql':
|
||||||
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||||
password=req["password"])
|
password=req["password"])
|
||||||
db.connect()
|
elif req["db_type"] == 'mssql':
|
||||||
|
import pyodbc
|
||||||
|
connection_string = (
|
||||||
|
f"DRIVER={{ODBC Driver 17 for SQL Server}};"
|
||||||
|
f"SERVER={req['host']},{req['port']};"
|
||||||
|
f"DATABASE={req['database']};"
|
||||||
|
f"UID={req['username']};"
|
||||||
|
f"PWD={req['password']};"
|
||||||
|
)
|
||||||
|
db = pyodbc.connect(connection_string)
|
||||||
|
cursor = db.cursor()
|
||||||
|
cursor.execute("SELECT 1")
|
||||||
|
cursor.close()
|
||||||
|
else:
|
||||||
|
return server_error_response("Unsupported database type.")
|
||||||
|
if req["db_type"] != 'mssql':
|
||||||
|
db.connect()
|
||||||
db.close()
|
db.close()
|
||||||
|
|
||||||
return get_json_result(data="Database Connection Successful!")
|
return get_json_result(data="Database Connection Successful!")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
#api get list version dsl of canvas
|
||||||
|
@manager.route('/getlistversion/<canvas_id>', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def getlistversion(canvas_id):
|
||||||
|
try:
|
||||||
|
list =sorted([c.to_dict() for c in UserCanvasVersionService.list_by_canvas_id(canvas_id)], key=lambda x: x["update_time"]*-1)
|
||||||
|
return get_json_result(data=list)
|
||||||
|
except Exception as e:
|
||||||
|
return get_data_error_result(message=f"Error getting history files: {e}")
|
||||||
|
#api get version dsl of canvas
|
||||||
|
@manager.route('/getversion/<version_id>', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def getversion( version_id):
|
||||||
|
try:
|
||||||
|
|
||||||
|
e, version = UserCanvasVersionService.get_by_id(version_id)
|
||||||
|
if version:
|
||||||
|
return get_json_result(data=version.to_dict())
|
||||||
|
except Exception as e:
|
||||||
|
return get_json_result(data=f"Error getting history file: {e}")
|
||||||
|
@manager.route('/listteam', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def list_kbs():
|
||||||
|
keywords = request.args.get("keywords", "")
|
||||||
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 150))
|
||||||
|
orderby = request.args.get("orderby", "create_time")
|
||||||
|
desc = request.args.get("desc", True)
|
||||||
|
try:
|
||||||
|
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||||
|
kbs, total = UserCanvasService.get_by_tenant_ids(
|
||||||
|
[m["tenant_id"] for m in tenants], current_user.id, page_number,
|
||||||
|
items_per_page, orderby, desc, keywords)
|
||||||
|
return get_json_result(data={"kbs": kbs, "total": total})
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
@manager.route('/setting', methods=['POST']) # noqa: F821
|
||||||
|
@validate_request("id", "title", "permission")
|
||||||
|
@login_required
|
||||||
|
def setting():
|
||||||
|
req = request.json
|
||||||
|
req["user_id"] = current_user.id
|
||||||
|
e,flow = UserCanvasService.get_by_id(req["id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="canvas not found.")
|
||||||
|
flow = flow.to_dict()
|
||||||
|
flow["title"] = req["title"]
|
||||||
|
if req["description"]:
|
||||||
|
flow["description"] = req["description"]
|
||||||
|
if req["permission"]:
|
||||||
|
flow["permission"] = req["permission"]
|
||||||
|
if req["avatar"]:
|
||||||
|
flow["avatar"] = req["avatar"]
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
num= UserCanvasService.update_by_id(req["id"], flow)
|
||||||
|
return get_json_result(data=num)
|
||||||
|
|||||||
@ -15,16 +15,15 @@
|
|||||||
#
|
#
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
import traceback
|
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
|
|
||||||
from api.db.services.dialog_service import keyword_extraction
|
|
||||||
from rag.app.qa import rmPrefix, beAdoc
|
from rag.app.qa import rmPrefix, beAdoc
|
||||||
|
from rag.app.tag import label_question
|
||||||
from rag.nlp import search, rag_tokenizer
|
from rag.nlp import search, rag_tokenizer
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
from rag.prompts import keyword_extraction, cross_languages
|
||||||
|
from rag.settings import PAGERANK_FLD
|
||||||
from rag.utils import rmSpace
|
from rag.utils import rmSpace
|
||||||
from api.db import LLMType, ParserType
|
from api.db import LLMType, ParserType
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
@ -32,13 +31,14 @@ from api.db.services.llm_service import LLMBundle
|
|||||||
from api.db.services.user_service import UserTenantService
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.settings import RetCode, retrievaler, kg_retrievaler
|
from api import settings
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
import hashlib
|
import xxhash
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['POST'])
|
|
||||||
|
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id")
|
@validate_request("doc_id")
|
||||||
def list_chunk():
|
def list_chunk():
|
||||||
@ -50,16 +50,17 @@ def list_chunk():
|
|||||||
try:
|
try:
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||||
query = {
|
query = {
|
||||||
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||||
}
|
}
|
||||||
if "available_int" in req:
|
if "available_int" in req:
|
||||||
query["available_int"] = int(req["available_int"])
|
query["available_int"] = int(req["available_int"])
|
||||||
sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
|
sres = settings.retrievaler.search(query, search.index_name(tenant_id), kb_ids, highlight=True)
|
||||||
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||||
for id in sres.ids:
|
for id in sres.ids:
|
||||||
d = {
|
d = {
|
||||||
@ -70,60 +71,56 @@ def list_chunk():
|
|||||||
"doc_id": sres.field[id]["doc_id"],
|
"doc_id": sres.field[id]["doc_id"],
|
||||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||||
"img_id": sres.field[id].get("img_id", ""),
|
"question_kwd": sres.field[id].get("question_kwd", []),
|
||||||
"available_int": sres.field[id].get("available_int", 1),
|
"image_id": sres.field[id].get("img_id", ""),
|
||||||
"positions": sres.field[id].get("position_int", "").split("\t")
|
"available_int": int(sres.field[id].get("available_int", 1)),
|
||||||
|
"positions": sres.field[id].get("position_int", []),
|
||||||
}
|
}
|
||||||
if len(d["positions"]) % 5 == 0:
|
assert isinstance(d["positions"], list)
|
||||||
poss = []
|
assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5)
|
||||||
for i in range(0, len(d["positions"]), 5):
|
|
||||||
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
|
||||||
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
|
||||||
d["positions"] = poss
|
|
||||||
res["chunks"].append(d)
|
res["chunks"].append(d)
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=res)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e).find("not_found") > 0:
|
if str(e).find("not_found") > 0:
|
||||||
return get_json_result(data=False, retmsg=f'No chunk found!',
|
return get_json_result(data=False, message='No chunk found!',
|
||||||
retcode=RetCode.DATA_ERROR)
|
code=settings.RetCode.DATA_ERROR)
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET'])
|
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get():
|
def get():
|
||||||
chunk_id = request.args["chunk_id"]
|
chunk_id = request.args["chunk_id"]
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
res = ELASTICSEARCH.get(
|
for tenant in tenants:
|
||||||
chunk_id, search.index_name(
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant.tenant_id)
|
||||||
tenants[0].tenant_id))
|
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant.tenant_id), kb_ids)
|
||||||
if not res.get("found"):
|
if chunk:
|
||||||
return server_error_response("Chunk not found")
|
break
|
||||||
id = res["_id"]
|
if chunk is None:
|
||||||
res = res["_source"]
|
return server_error_response(Exception("Chunk not found"))
|
||||||
res["chunk_id"] = id
|
|
||||||
k = []
|
k = []
|
||||||
for n in res.keys():
|
for n in chunk.keys():
|
||||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||||
k.append(n)
|
k.append(n)
|
||||||
for n in k:
|
for n in k:
|
||||||
del res[n]
|
del chunk[n]
|
||||||
|
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=chunk)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e).find("NotFoundError") >= 0:
|
if str(e).find("NotFoundError") >= 0:
|
||||||
return get_json_result(data=False, retmsg=f'Chunk not found!',
|
return get_json_result(data=False, message='Chunk not found!',
|
||||||
retcode=RetCode.DATA_ERROR)
|
code=settings.RetCode.DATA_ERROR)
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST'])
|
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
@validate_request("doc_id", "chunk_id", "content_with_weight")
|
||||||
"important_kwd")
|
|
||||||
def set():
|
def set():
|
||||||
req = request.json
|
req = request.json
|
||||||
d = {
|
d = {
|
||||||
@ -131,116 +128,134 @@ def set():
|
|||||||
"content_with_weight": req["content_with_weight"]}
|
"content_with_weight": req["content_with_weight"]}
|
||||||
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
||||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||||
d["important_kwd"] = req["important_kwd"]
|
if "important_kwd" in req:
|
||||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
d["important_kwd"] = req["important_kwd"]
|
||||||
|
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||||
|
if "question_kwd" in req:
|
||||||
|
d["question_kwd"] = req["question_kwd"]
|
||||||
|
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["question_kwd"]))
|
||||||
|
if "tag_kwd" in req:
|
||||||
|
d["tag_kwd"] = req["tag_kwd"]
|
||||||
|
if "tag_feas" in req:
|
||||||
|
d["tag_feas"] = req["tag_feas"]
|
||||||
if "available_int" in req:
|
if "available_int" in req:
|
||||||
d["available_int"] = req["available_int"]
|
d["available_int"] = req["available_int"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_id)
|
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_id)
|
||||||
|
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
|
|
||||||
if doc.parser_id == ParserType.QA:
|
if doc.parser_id == ParserType.QA:
|
||||||
arr = [
|
arr = [
|
||||||
t for t in re.split(
|
t for t in re.split(
|
||||||
r"[\n\t]",
|
r"[\n\t]",
|
||||||
req["content_with_weight"]) if len(t) > 1]
|
req["content_with_weight"]) if len(t) > 1]
|
||||||
if len(arr) != 2:
|
q, a = rmPrefix(arr[0]), rmPrefix("\n".join(arr[1:]))
|
||||||
return get_data_error_result(
|
d = beAdoc(d, q, a, not any(
|
||||||
retmsg="Q&A must be separated by TAB/ENTER key.")
|
|
||||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
|
||||||
d = beAdoc(d, arr[0], arr[1], not any(
|
|
||||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||||
|
|
||||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d.get("question_kwd") else "\n".join(d["question_kwd"])])
|
||||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||||
d["q_%d_vec" % len(v)] = v.tolist()
|
d["q_%d_vec" % len(v)] = v.tolist()
|
||||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
settings.docStoreConn.update({"id": req["chunk_id"]}, d, search.index_name(tenant_id), doc.kb_id)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/switch', methods=['POST'])
|
@manager.route('/switch', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("chunk_ids", "available_int", "doc_id")
|
@validate_request("chunk_ids", "available_int", "doc_id")
|
||||||
def switch():
|
def switch():
|
||||||
req = request.json
|
req = request.json
|
||||||
try:
|
try:
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
if not ELASTICSEARCH.upsert([{"id": i, "available_int": int(req["available_int"])} for i in req["chunk_ids"]],
|
for cid in req["chunk_ids"]:
|
||||||
search.index_name(tenant_id)):
|
if not settings.docStoreConn.update({"id": cid},
|
||||||
return get_data_error_result(retmsg="Index updating failure")
|
{"available_int": int(req["available_int"])},
|
||||||
|
search.index_name(DocumentService.get_tenant_id(req["doc_id"])),
|
||||||
|
doc.kb_id):
|
||||||
|
return get_data_error_result(message="Index updating failure")
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("chunk_ids", "doc_id")
|
@validate_request("chunk_ids", "doc_id")
|
||||||
def rm():
|
def rm():
|
||||||
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
req = request.json
|
req = request.json
|
||||||
try:
|
try:
|
||||||
if not ELASTICSEARCH.deleteByQuery(
|
|
||||||
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
|
||||||
return get_data_error_result(retmsg="Index updating failure")
|
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
|
if not settings.docStoreConn.delete({"id": req["chunk_ids"]}, search.index_name(current_user.id), doc.kb_id):
|
||||||
|
return get_data_error_result(message="Index updating failure")
|
||||||
deleted_chunk_ids = req["chunk_ids"]
|
deleted_chunk_ids = req["chunk_ids"]
|
||||||
chunk_number = len(deleted_chunk_ids)
|
chunk_number = len(deleted_chunk_ids)
|
||||||
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||||
|
for cid in deleted_chunk_ids:
|
||||||
|
if STORAGE_IMPL.obj_exist(doc.kb_id, cid):
|
||||||
|
STORAGE_IMPL.rm(doc.kb_id, cid)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['POST'])
|
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "content_with_weight")
|
@validate_request("doc_id", "content_with_weight")
|
||||||
def create():
|
def create():
|
||||||
req = request.json
|
req = request.json
|
||||||
md5 = hashlib.md5()
|
chunck_id = xxhash.xxh64((req["content_with_weight"] + req["doc_id"]).encode("utf-8")).hexdigest()
|
||||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
|
||||||
chunck_id = md5.hexdigest()
|
|
||||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||||
"content_with_weight": req["content_with_weight"]}
|
"content_with_weight": req["content_with_weight"]}
|
||||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||||
d["important_kwd"] = req.get("important_kwd", [])
|
d["important_kwd"] = req.get("important_kwd", [])
|
||||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||||
|
d["question_kwd"] = req.get("question_kwd", [])
|
||||||
|
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req.get("question_kwd", [])))
|
||||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
d["kb_id"] = [doc.kb_id]
|
d["kb_id"] = [doc.kb_id]
|
||||||
d["docnm_kwd"] = doc.name
|
d["docnm_kwd"] = doc.name
|
||||||
|
d["title_tks"] = rag_tokenizer.tokenize(doc.name)
|
||||||
d["doc_id"] = doc.id
|
d["doc_id"] = doc.id
|
||||||
|
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="Knowledgebase not found!")
|
||||||
|
if kb.pagerank:
|
||||||
|
d[PAGERANK_FLD] = kb.pagerank
|
||||||
|
|
||||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||||
|
|
||||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||||
v = 0.1 * v[0] + 0.9 * v[1]
|
v = 0.1 * v[0] + 0.9 * v[1]
|
||||||
d["q_%d_vec" % len(v)] = v.tolist()
|
d["q_%d_vec" % len(v)] = v.tolist()
|
||||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
settings.docStoreConn.insert([d], search.index_name(tenant_id), doc.kb_id)
|
||||||
|
|
||||||
DocumentService.increment_chunk_num(
|
DocumentService.increment_chunk_num(
|
||||||
doc.id, doc.kb_id, c, 1, 0)
|
doc.id, doc.kb_id, c, 1, 0)
|
||||||
@ -249,7 +264,7 @@ def create():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/retrieval_test', methods=['POST'])
|
@manager.route('/retrieval_test', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id", "question")
|
@validate_request("kb_id", "question")
|
||||||
def retrieval_test():
|
def retrieval_test():
|
||||||
@ -257,28 +272,36 @@ def retrieval_test():
|
|||||||
page = int(req.get("page", 1))
|
page = int(req.get("page", 1))
|
||||||
size = int(req.get("size", 30))
|
size = int(req.get("size", 30))
|
||||||
question = req["question"]
|
question = req["question"]
|
||||||
kb_id = req["kb_id"]
|
kb_ids = req["kb_id"]
|
||||||
if isinstance(kb_id, str): kb_id = [kb_id]
|
if isinstance(kb_ids, str):
|
||||||
|
kb_ids = [kb_ids]
|
||||||
doc_ids = req.get("doc_ids", [])
|
doc_ids = req.get("doc_ids", [])
|
||||||
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
||||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||||
|
use_kg = req.get("use_kg", False)
|
||||||
top = int(req.get("top_k", 1024))
|
top = int(req.get("top_k", 1024))
|
||||||
|
langs = req.get("cross_languages", [])
|
||||||
|
tenant_ids = []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
for kid in kb_id:
|
for kb_id in kb_ids:
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
if KnowledgebaseService.query(
|
if KnowledgebaseService.query(
|
||||||
tenant_id=tenant.tenant_id, id=kid):
|
tenant_id=tenant.tenant_id, id=kb_id):
|
||||||
|
tenant_ids.append(tenant.tenant_id)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id[0])
|
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
return get_data_error_result(message="Knowledgebase not found!")
|
||||||
|
|
||||||
|
if langs:
|
||||||
|
question = cross_languages(kb.tenant_id, None, question, langs)
|
||||||
|
|
||||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||||
|
|
||||||
@ -290,38 +313,50 @@ def retrieval_test():
|
|||||||
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
||||||
question += keyword_extraction(chat_mdl, question)
|
question += keyword_extraction(chat_mdl, question)
|
||||||
|
|
||||||
retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
|
labels = label_question(question, [kb])
|
||||||
ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_id, page, size,
|
ranks = settings.retrievaler.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
|
||||||
similarity_threshold, vector_similarity_weight, top,
|
similarity_threshold, vector_similarity_weight, top,
|
||||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
|
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"),
|
||||||
|
rank_feature=labels
|
||||||
|
)
|
||||||
|
if use_kg:
|
||||||
|
ck = settings.kg_retrievaler.retrieval(question,
|
||||||
|
tenant_ids,
|
||||||
|
kb_ids,
|
||||||
|
embd_mdl,
|
||||||
|
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||||
|
if ck["content_with_weight"]:
|
||||||
|
ranks["chunks"].insert(0, ck)
|
||||||
|
|
||||||
for c in ranks["chunks"]:
|
for c in ranks["chunks"]:
|
||||||
if "vector" in c:
|
c.pop("vector", None)
|
||||||
del c["vector"]
|
ranks["labels"] = labels
|
||||||
|
|
||||||
return get_json_result(data=ranks)
|
return get_json_result(data=ranks)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if str(e).find("not_found") > 0:
|
if str(e).find("not_found") > 0:
|
||||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||||
retcode=RetCode.DATA_ERROR)
|
code=settings.RetCode.DATA_ERROR)
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/knowledge_graph', methods=['GET'])
|
@manager.route('/knowledge_graph', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def knowledge_graph():
|
def knowledge_graph():
|
||||||
doc_id = request.args["doc_id"]
|
doc_id = request.args["doc_id"]
|
||||||
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||||
req = {
|
req = {
|
||||||
"doc_ids":[doc_id],
|
"doc_ids": [doc_id],
|
||||||
"knowledge_graph_kwd": ["graph", "mind_map"]
|
"knowledge_graph_kwd": ["graph", "mind_map"]
|
||||||
}
|
}
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
sres = settings.retrievaler.search(req, search.index_name(tenant_id), kb_ids)
|
||||||
sres = retrievaler.search(req, search.index_name(tenant_id))
|
|
||||||
obj = {"graph": {}, "mind_map": {}}
|
obj = {"graph": {}, "mind_map": {}}
|
||||||
for id in sres.ids[:2]:
|
for id in sres.ids[:2]:
|
||||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||||
try:
|
try:
|
||||||
content_json = json.loads(sres.field[id]["content_with_weight"])
|
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||||
except Exception as e:
|
except Exception:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if ty == 'mind_map':
|
if ty == 'mind_map':
|
||||||
@ -344,4 +379,3 @@ def knowledge_graph():
|
|||||||
obj[ty] = content_json
|
obj[ty] = content_json
|
||||||
|
|
||||||
return get_json_result(data=obj)
|
return get_json_result(data=obj)
|
||||||
|
|
||||||
|
|||||||
@ -17,36 +17,44 @@ import json
|
|||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from api.db.services.user_service import UserTenantService
|
|
||||||
from flask import request, Response
|
|
||||||
from flask_login import login_required, current_user
|
|
||||||
|
|
||||||
|
import trio
|
||||||
|
from flask import Response, request
|
||||||
|
from flask_login import current_user, login_required
|
||||||
|
|
||||||
|
from api import settings
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
|
from api.db.db_models import APIToken
|
||||||
|
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||||
|
from api.db.services.dialog_service import DialogService, ask, chat
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
from api.db.services.llm_service import LLMBundle, TenantService
|
||||||
from api.settings import RetCode, retrievaler
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_data_error_result, get_json_result, server_error_response, validate_request
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||||
from graphrag.mind_map_extractor import MindMapExtractor
|
from rag.app.tag import label_question
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST'])
|
@manager.route("/set", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def set_conversation():
|
def set_conversation():
|
||||||
req = request.json
|
req = request.json
|
||||||
conv_id = req.get("conversation_id")
|
conv_id = req.get("conversation_id")
|
||||||
is_new = req.get("is_new")
|
is_new = req.get("is_new")
|
||||||
|
name = req.get("name", "New conversation")
|
||||||
|
|
||||||
|
if len(name) > 255:
|
||||||
|
name = name[0:255]
|
||||||
|
|
||||||
del req["is_new"]
|
del req["is_new"]
|
||||||
if not is_new:
|
if not is_new:
|
||||||
del req["conversation_id"]
|
del req["conversation_id"]
|
||||||
try:
|
try:
|
||||||
if not ConversationService.update_by_id(conv_id, req):
|
if not ConversationService.update_by_id(conv_id, req):
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
e, conv = ConversationService.get_by_id(conv_id)
|
e, conv = ConversationService.get_by_id(conv_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(message="Fail to update a conversation!")
|
||||||
retmsg="Fail to update a conversation!")
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -55,46 +63,81 @@ def set_conversation():
|
|||||||
try:
|
try:
|
||||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found")
|
return get_data_error_result(message="Dialog not found")
|
||||||
conv = {
|
conv = {"id": conv_id, "dialog_id": req["dialog_id"], "name": name, "message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]}
|
||||||
"id": conv_id,
|
|
||||||
"dialog_id": req["dialog_id"],
|
|
||||||
"name": req.get("name", "New conversation"),
|
|
||||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
|
||||||
}
|
|
||||||
ConversationService.save(**conv)
|
ConversationService.save(**conv)
|
||||||
e, conv = ConversationService.get_by_id(conv["id"])
|
|
||||||
if not e:
|
|
||||||
return get_data_error_result(retmsg="Fail to new a conversation!")
|
|
||||||
conv = conv.to_dict()
|
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET'])
|
@manager.route("/get", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get():
|
def get():
|
||||||
conv_id = request.args["conversation_id"]
|
conv_id = request.args["conversation_id"]
|
||||||
try:
|
try:
|
||||||
e, conv = ConversationService.get_by_id(conv_id)
|
e, conv = ConversationService.get_by_id(conv_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
|
avatar = None
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
||||||
|
if dialog and len(dialog) > 0:
|
||||||
|
avatar = dialog[0].icon
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="Only owner of conversation authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||||
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
def get_value(d, k1, k2):
|
||||||
|
return d.get(k1, d.get(k2))
|
||||||
|
|
||||||
|
for ref in conv.reference:
|
||||||
|
if isinstance(ref, list):
|
||||||
|
continue
|
||||||
|
ref["chunks"] = [
|
||||||
|
{
|
||||||
|
"id": get_value(ck, "chunk_id", "id"),
|
||||||
|
"content": get_value(ck, "content", "content_with_weight"),
|
||||||
|
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||||
|
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||||
|
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||||
|
"image_id": get_value(ck, "image_id", "img_id"),
|
||||||
|
"positions": get_value(ck, "positions", "position_int"),
|
||||||
|
"doc_type": get_value(ck, "doc_type", "doc_type_kwd"),
|
||||||
|
}
|
||||||
|
for ck in ref.get("chunks", [])
|
||||||
|
]
|
||||||
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
|
conv["avatar"] = avatar
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route("/getsse/<dialog_id>", methods=["GET"]) # type: ignore # noqa: F821
|
||||||
|
def getsse(dialog_id):
|
||||||
|
token = request.headers.get("Authorization").split()
|
||||||
|
if len(token) != 2:
|
||||||
|
return get_data_error_result(message='Authorization is not valid!"')
|
||||||
|
token = token[1]
|
||||||
|
objs = APIToken.query(beta=token)
|
||||||
|
if not objs:
|
||||||
|
return get_data_error_result(message='Authentication error: API key is invalid!"')
|
||||||
|
try:
|
||||||
|
e, conv = DialogService.get_by_id(dialog_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="Dialog not found!")
|
||||||
|
conv = conv.to_dict()
|
||||||
|
conv["avatar"] = conv["icon"]
|
||||||
|
del conv["icon"]
|
||||||
|
return get_json_result(data=conv)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/rm", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def rm():
|
def rm():
|
||||||
conv_ids = request.json["conversation_ids"]
|
conv_ids = request.json["conversation_ids"]
|
||||||
@ -102,41 +145,35 @@ def rm():
|
|||||||
for cid in conv_ids:
|
for cid in conv_ids:
|
||||||
exist, conv = ConversationService.get_by_id(cid)
|
exist, conv = ConversationService.get_by_id(cid)
|
||||||
if not exist:
|
if not exist:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="Only owner of conversation authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||||
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
|
||||||
ConversationService.delete_by_id(cid)
|
ConversationService.delete_by_id(cid)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_convsersation():
|
def list_convsersation():
|
||||||
dialog_id = request.args["dialog_id"]
|
dialog_id = request.args["dialog_id"]
|
||||||
try:
|
try:
|
||||||
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="Only owner of dialog authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||||
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
convs = ConversationService.query(dialog_id=dialog_id, order_by=ConversationService.model.create_time, reverse=True)
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
|
||||||
convs = ConversationService.query(
|
|
||||||
dialog_id=dialog_id,
|
|
||||||
order_by=ConversationService.model.create_time,
|
|
||||||
reverse=True)
|
|
||||||
convs = [d.to_dict() for d in convs]
|
convs = [d.to_dict() for d in convs]
|
||||||
return get_json_result(data=convs)
|
return get_json_result(data=convs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion', methods=['POST'])
|
@manager.route("/completion", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "messages")
|
@validate_request("conversation_id", "messages")
|
||||||
def completion():
|
def completion():
|
||||||
@ -152,42 +189,53 @@ def completion():
|
|||||||
try:
|
try:
|
||||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
conv.message = deepcopy(req["messages"])
|
conv.message = deepcopy(req["messages"])
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
del req["conversation_id"]
|
del req["conversation_id"]
|
||||||
del req["messages"]
|
del req["messages"]
|
||||||
|
|
||||||
if not conv.reference:
|
if not conv.reference:
|
||||||
conv.reference = []
|
conv.reference = []
|
||||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
else:
|
||||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
|
||||||
|
|
||||||
def fillin_conv(ans):
|
def get_value(d, k1, k2):
|
||||||
nonlocal conv, message_id
|
return d.get(k1, d.get(k2))
|
||||||
if not conv.reference:
|
|
||||||
conv.reference.append(ans["reference"])
|
for ref in conv.reference:
|
||||||
else:
|
if isinstance(ref, list):
|
||||||
conv.reference[-1] = ans["reference"]
|
continue
|
||||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
ref["chunks"] = [
|
||||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
{
|
||||||
ans["id"] = message_id
|
"id": get_value(ck, "chunk_id", "id"),
|
||||||
|
"content": get_value(ck, "content", "content_with_weight"),
|
||||||
|
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||||
|
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||||
|
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||||
|
"image_id": get_value(ck, "image_id", "img_id"),
|
||||||
|
"positions": get_value(ck, "positions", "position_int"),
|
||||||
|
"doc_type": get_value(ck, "doc_type_kwd", "doc_type_kwd"),
|
||||||
|
}
|
||||||
|
for ck in ref.get("chunks", [])
|
||||||
|
]
|
||||||
|
|
||||||
|
if not conv.reference:
|
||||||
|
conv.reference = []
|
||||||
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
def stream():
|
def stream():
|
||||||
nonlocal dia, msg, req, conv
|
nonlocal dia, msg, req, conv
|
||||||
try:
|
try:
|
||||||
for ans in chat(dia, msg, True, **req):
|
for ans in chat(dia, msg, True, **req):
|
||||||
fillin_conv(ans)
|
ans = structure_answer(conv, ans, message_id, conv.id)
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
ensure_ascii=False) + "\n\n"
|
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
|
||||||
|
|
||||||
if req.get("stream", True):
|
if req.get("stream", True):
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
@ -200,8 +248,7 @@ def completion():
|
|||||||
else:
|
else:
|
||||||
answer = None
|
answer = None
|
||||||
for ans in chat(dia, msg, **req):
|
for ans in chat(dia, msg, **req):
|
||||||
answer = ans
|
answer = structure_answer(conv, ans, message_id, req["conversation_id"])
|
||||||
fillin_conv(ans)
|
|
||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
break
|
break
|
||||||
return get_json_result(data=answer)
|
return get_json_result(data=answer)
|
||||||
@ -209,7 +256,7 @@ def completion():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/tts', methods=['POST'])
|
@manager.route("/tts", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def tts():
|
def tts():
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -217,11 +264,11 @@ def tts():
|
|||||||
|
|
||||||
tenants = TenantService.get_info_by(current_user.id)
|
tenants = TenantService.get_info_by(current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
tts_id = tenants[0]["tts_id"]
|
tts_id = tenants[0]["tts_id"]
|
||||||
if not tts_id:
|
if not tts_id:
|
||||||
return get_data_error_result(retmsg="No default TTS model is set")
|
return get_data_error_result(message="No default TTS model is set")
|
||||||
|
|
||||||
tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
|
tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
|
||||||
|
|
||||||
@ -231,9 +278,7 @@ def tts():
|
|||||||
for chunk in tts_mdl.tts(txt):
|
for chunk in tts_mdl.tts(txt):
|
||||||
yield chunk
|
yield chunk
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield ("data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e)}}, ensure_ascii=False)).encode("utf-8")
|
||||||
"data": {"answer": "**ERROR**: " + str(e)}},
|
|
||||||
ensure_ascii=False)).encode('utf-8')
|
|
||||||
|
|
||||||
resp = Response(stream_audio(), mimetype="audio/mpeg")
|
resp = Response(stream_audio(), mimetype="audio/mpeg")
|
||||||
resp.headers.add_header("Cache-Control", "no-cache")
|
resp.headers.add_header("Cache-Control", "no-cache")
|
||||||
@ -243,14 +288,14 @@ def tts():
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/delete_msg', methods=['POST'])
|
@manager.route("/delete_msg", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "message_id")
|
@validate_request("conversation_id", "message_id")
|
||||||
def delete_msg():
|
def delete_msg():
|
||||||
req = request.json
|
req = request.json
|
||||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
for i, msg in enumerate(conv["message"]):
|
for i, msg in enumerate(conv["message"]):
|
||||||
@ -266,47 +311,48 @@ def delete_msg():
|
|||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/thumbup', methods=['POST'])
|
@manager.route("/thumbup", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "message_id")
|
@validate_request("conversation_id", "message_id")
|
||||||
def thumbup():
|
def thumbup():
|
||||||
req = request.json
|
req = request.json
|
||||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
up_down = req.get("set")
|
up_down = req.get("thumbup")
|
||||||
feedback = req.get("feedback", "")
|
feedback = req.get("feedback", "")
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
for i, msg in enumerate(conv["message"]):
|
for i, msg in enumerate(conv["message"]):
|
||||||
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
||||||
if up_down:
|
if up_down:
|
||||||
msg["thumbup"] = True
|
msg["thumbup"] = True
|
||||||
if "feedback" in msg: del msg["feedback"]
|
if "feedback" in msg:
|
||||||
|
del msg["feedback"]
|
||||||
else:
|
else:
|
||||||
msg["thumbup"] = False
|
msg["thumbup"] = False
|
||||||
if feedback: msg["feedback"] = feedback
|
if feedback:
|
||||||
|
msg["feedback"] = feedback
|
||||||
break
|
break
|
||||||
|
|
||||||
ConversationService.update_by_id(conv["id"], conv)
|
ConversationService.update_by_id(conv["id"], conv)
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/ask', methods=['POST'])
|
@manager.route("/ask", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question", "kb_ids")
|
@validate_request("question", "kb_ids")
|
||||||
def ask_about():
|
def ask_about():
|
||||||
req = request.json
|
req = request.json
|
||||||
uid = current_user.id
|
uid = current_user.id
|
||||||
|
|
||||||
def stream():
|
def stream():
|
||||||
nonlocal req, uid
|
nonlocal req, uid
|
||||||
try:
|
try:
|
||||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
ensure_ascii=False) + "\n\n"
|
|
||||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
|
||||||
|
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
resp.headers.add_header("Cache-control", "no-cache")
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
@ -316,7 +362,7 @@ def ask_about():
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/mindmap', methods=['POST'])
|
@manager.route("/mindmap", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question", "kb_ids")
|
@validate_request("question", "kb_ids")
|
||||||
def mindmap():
|
def mindmap():
|
||||||
@ -324,21 +370,21 @@ def mindmap():
|
|||||||
kb_ids = req["kb_ids"]
|
kb_ids = req["kb_ids"]
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
return get_data_error_result(message="Knowledgebase not found!")
|
||||||
|
|
||||||
embd_mdl = TenantLLMService.model_instance(
|
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
|
||||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
|
||||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||||
ranks = retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
question = req["question"]
|
||||||
0.3, 0.3, aggs=False)
|
ranks = settings.retrievaler.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, 1, 12, 0.3, 0.3, aggs=False, rank_feature=label_question(question, [kb]))
|
||||||
mindmap = MindMapExtractor(chat_mdl)
|
mindmap = MindMapExtractor(chat_mdl)
|
||||||
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in ranks["chunks"]])
|
||||||
|
mind_map = mind_map.output
|
||||||
if "error" in mind_map:
|
if "error" in mind_map:
|
||||||
return server_error_response(Exception(mind_map["error"]))
|
return server_error_response(Exception(mind_map["error"]))
|
||||||
return get_json_result(data=mind_map)
|
return get_json_result(data=mind_map)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/related_questions', methods=['POST'])
|
@manager.route("/related_questions", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question")
|
@validate_request("question")
|
||||||
def related_questions():
|
def related_questions():
|
||||||
@ -346,31 +392,49 @@ def related_questions():
|
|||||||
question = req["question"]
|
question = req["question"]
|
||||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||||
prompt = """
|
prompt = """
|
||||||
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
|
Role: You are an AI language model assistant tasked with generating 5-10 related questions based on a user’s original query. These questions should help expand the search query scope and improve search relevance.
|
||||||
Instructions:
|
|
||||||
- Based on the keywords provided by the user, generate 5-10 related search terms.
|
|
||||||
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
|
|
||||||
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
|
|
||||||
- Keep the term length between 2-4 words, concise and clear.
|
|
||||||
- DO NOT translate, use the language of the original keywords.
|
|
||||||
|
|
||||||
### Example:
|
Instructions:
|
||||||
Keywords: Chinese football
|
Input: You are provided with a user’s question.
|
||||||
Related search terms:
|
Output: Generate 5-10 alternative questions that are related to the original user question. These alternatives should help retrieve a broader range of relevant documents from a vector database.
|
||||||
1. Current status of Chinese football
|
Context: Focus on rephrasing the original question in different ways, making sure the alternative questions are diverse but still connected to the topic of the original query. Do not create overly obscure, irrelevant, or unrelated questions.
|
||||||
2. Reform of Chinese football
|
Fallback: If you cannot generate any relevant alternatives, do not return any questions.
|
||||||
3. Youth training of Chinese football
|
Guidance:
|
||||||
4. Chinese football in the Asian Cup
|
1. Each alternative should be unique but still relevant to the original query.
|
||||||
5. Chinese football in the World Cup
|
2. Keep the phrasing clear, concise, and easy to understand.
|
||||||
|
3. Avoid overly technical jargon or specialized terms unless directly relevant.
|
||||||
|
4. Ensure that each question contributes towards improving search results by broadening the search angle, not narrowing it.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
Original Question: What are the benefits of electric vehicles?
|
||||||
|
|
||||||
|
Alternative Questions:
|
||||||
|
1. How do electric vehicles impact the environment?
|
||||||
|
2. What are the advantages of owning an electric car?
|
||||||
|
3. What is the cost-effectiveness of electric vehicles?
|
||||||
|
4. How do electric vehicles compare to traditional cars in terms of fuel efficiency?
|
||||||
|
5. What are the environmental benefits of switching to electric cars?
|
||||||
|
6. How do electric vehicles help reduce carbon emissions?
|
||||||
|
7. Why are electric vehicles becoming more popular?
|
||||||
|
8. What are the long-term savings of using electric vehicles?
|
||||||
|
9. How do electric vehicles contribute to sustainability?
|
||||||
|
10. What are the key benefits of electric vehicles for consumers?
|
||||||
|
|
||||||
Reason:
|
Reason:
|
||||||
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
|
Rephrasing the original query into multiple alternative questions helps the user explore different aspects of their search topic, improving the quality of search results.
|
||||||
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
|
These questions guide the search engine to provide a more comprehensive set of relevant documents.
|
||||||
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
ans = chat_mdl.chat(
|
||||||
|
prompt,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": f"""
|
||||||
Keywords: {question}
|
Keywords: {question}
|
||||||
Related search terms:
|
Related search terms:
|
||||||
"""}], {"temperature": 0.9})
|
""",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
{"temperature": 0.9},
|
||||||
|
)
|
||||||
return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||||
|
|||||||
@ -18,31 +18,32 @@ from flask import request
|
|||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.dialog_service import DialogService
|
from api.db.services.dialog_service import DialogService
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
|
from api.db.services.llm_service import TenantLLMService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api.settings import RetCode
|
from api import settings
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST'])
|
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def set_dialog():
|
def set_dialog():
|
||||||
req = request.json
|
req = request.json
|
||||||
dialog_id = req.get("dialog_id")
|
dialog_id = req.get("dialog_id")
|
||||||
name = req.get("name", "New Dialog")
|
name = req.get("name", "New Dialog")
|
||||||
description = req.get("description", "A helpful Dialog")
|
description = req.get("description", "A helpful dialog")
|
||||||
icon = req.get("icon", "")
|
icon = req.get("icon", "")
|
||||||
top_n = req.get("top_n", 6)
|
top_n = req.get("top_n", 6)
|
||||||
top_k = req.get("top_k", 1024)
|
top_k = req.get("top_k", 1024)
|
||||||
rerank_id = req.get("rerank_id", "")
|
rerank_id = req.get("rerank_id", "")
|
||||||
if not rerank_id: req["rerank_id"] = ""
|
if not rerank_id:
|
||||||
|
req["rerank_id"] = ""
|
||||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||||
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
|
||||||
llm_setting = req.get("llm_setting", {})
|
llm_setting = req.get("llm_setting", {})
|
||||||
default_prompt = {
|
default_prompt_with_dataset = {
|
||||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||||
以下是知识库:
|
以下是知识库:
|
||||||
{knowledge}
|
{knowledge}
|
||||||
@ -53,37 +54,47 @@ def set_dialog():
|
|||||||
],
|
],
|
||||||
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
||||||
}
|
}
|
||||||
prompt_config = req.get("prompt_config", default_prompt)
|
default_prompt_no_dataset = {
|
||||||
|
"system": """You are a helpful assistant.""",
|
||||||
|
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
||||||
|
"parameters": [
|
||||||
|
|
||||||
|
],
|
||||||
|
"empty_response": ""
|
||||||
|
}
|
||||||
|
prompt_config = req.get("prompt_config", default_prompt_with_dataset)
|
||||||
|
|
||||||
if not prompt_config["system"]:
|
if not prompt_config["system"]:
|
||||||
prompt_config["system"] = default_prompt["system"]
|
prompt_config["system"] = default_prompt_with_dataset["system"]
|
||||||
# if len(prompt_config["parameters"]) < 1:
|
|
||||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
if not req.get("kb_ids", []):
|
||||||
# for p in prompt_config["parameters"]:
|
if prompt_config['system'] == default_prompt_with_dataset['system'] or "{knowledge}" in prompt_config['system']:
|
||||||
# if p["key"] == "knowledge":break
|
prompt_config = default_prompt_no_dataset
|
||||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
|
||||||
|
|
||||||
for p in prompt_config["parameters"]:
|
for p in prompt_config["parameters"]:
|
||||||
if p["optional"]:
|
if p["optional"]:
|
||||||
continue
|
continue
|
||||||
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
message="Parameter '{}' is not used".format(p["key"]))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, tenant = TenantService.get_by_id(current_user.id)
|
e, tenant = TenantService.get_by_id(current_user.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids", []))
|
||||||
|
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||||
|
embd_count = len(set(embd_ids))
|
||||||
|
if embd_count > 1:
|
||||||
|
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
||||||
|
|
||||||
llm_id = req.get("llm_id", tenant.llm_id)
|
llm_id = req.get("llm_id", tenant.llm_id)
|
||||||
if not dialog_id:
|
if not dialog_id:
|
||||||
if not req.get("kb_ids"):
|
|
||||||
return get_data_error_result(
|
|
||||||
retmsg="Fail! Please select knowledgebase!")
|
|
||||||
dia = {
|
dia = {
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
"tenant_id": current_user.id,
|
"tenant_id": current_user.id,
|
||||||
"name": name,
|
"name": name,
|
||||||
"kb_ids": req["kb_ids"],
|
"kb_ids": req.get("kb_ids", []),
|
||||||
"description": description,
|
"description": description,
|
||||||
"llm_id": llm_id,
|
"llm_id": llm_id,
|
||||||
"llm_setting": llm_setting,
|
"llm_setting": llm_setting,
|
||||||
@ -96,35 +107,33 @@ def set_dialog():
|
|||||||
"icon": icon
|
"icon": icon
|
||||||
}
|
}
|
||||||
if not DialogService.save(**dia):
|
if not DialogService.save(**dia):
|
||||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
return get_data_error_result(message="Fail to new a dialog!")
|
||||||
e, dia = DialogService.get_by_id(dia["id"])
|
return get_json_result(data=dia)
|
||||||
if not e:
|
|
||||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
|
||||||
return get_json_result(data=dia.to_json())
|
|
||||||
else:
|
else:
|
||||||
del req["dialog_id"]
|
del req["dialog_id"]
|
||||||
if "kb_names" in req:
|
if "kb_names" in req:
|
||||||
del req["kb_names"]
|
del req["kb_names"]
|
||||||
if not DialogService.update_by_id(dialog_id, req):
|
if not DialogService.update_by_id(dialog_id, req):
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
e, dia = DialogService.get_by_id(dialog_id)
|
e, dia = DialogService.get_by_id(dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Fail to update a dialog!")
|
return get_data_error_result(message="Fail to update a dialog!")
|
||||||
dia = dia.to_dict()
|
dia = dia.to_dict()
|
||||||
|
dia.update(req)
|
||||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||||
return get_json_result(data=dia)
|
return get_json_result(data=dia)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET'])
|
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get():
|
def get():
|
||||||
dialog_id = request.args["dialog_id"]
|
dialog_id = request.args["dialog_id"]
|
||||||
try:
|
try:
|
||||||
e, dia = DialogService.get_by_id(dialog_id)
|
e, dia = DialogService.get_by_id(dialog_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
dia = dia.to_dict()
|
dia = dia.to_dict()
|
||||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||||
return get_json_result(data=dia)
|
return get_json_result(data=dia)
|
||||||
@ -143,7 +152,7 @@ def get_kb_names(kb_ids):
|
|||||||
return ids, nms
|
return ids, nms
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_dialogs():
|
def list_dialogs():
|
||||||
try:
|
try:
|
||||||
@ -160,7 +169,7 @@ def list_dialogs():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("dialog_ids")
|
@validate_request("dialog_ids")
|
||||||
def rm():
|
def rm():
|
||||||
@ -174,8 +183,8 @@ def rm():
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
data=False, message='Only owner of dialog authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
dialog_list.append({"id": id,"status":StatusEnum.INVALID.value})
|
dialog_list.append({"id": id,"status":StatusEnum.INVALID.value})
|
||||||
DialogService.update_many_by_id(dialog_list)
|
DialogService.update_many_by_id(dialog_list)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|||||||
@ -13,83 +13,87 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License
|
# limitations under the License
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
|
import os.path
|
||||||
import pathlib
|
import pathlib
|
||||||
import re
|
import re
|
||||||
|
|
||||||
import flask
|
import flask
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import current_user, login_required
|
||||||
|
|
||||||
from api.db.db_models import Task, File
|
from api import settings
|
||||||
|
from api.constants import IMG_BASE64_PREFIX
|
||||||
|
from api.db import VALID_FILE_TYPES, VALID_TASK_STATUS, FileSource, FileType, ParserType, TaskStatus
|
||||||
|
from api.db.db_models import File, Task
|
||||||
|
from api.db.services import duplicate_name
|
||||||
|
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.task_service import TaskService, queue_tasks
|
from api.db.services.task_service import TaskService, queue_tasks
|
||||||
from api.db.services.user_service import UserTenantService
|
from api.db.services.user_service import UserTenantService
|
||||||
from rag.nlp import search
|
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
from api.db.services import duplicate_name
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
from api.utils.api_utils import (
|
||||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
get_data_error_result,
|
||||||
from api.settings import RetCode
|
get_json_result,
|
||||||
from api.utils.api_utils import get_json_result
|
server_error_response,
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
validate_request,
|
||||||
from api.utils.file_utils import filename_type, thumbnail
|
)
|
||||||
|
from api.utils.file_utils import filename_type, get_project_base_directory, thumbnail
|
||||||
from api.utils.web_utils import html2pdf, is_valid_url
|
from api.utils.web_utils import html2pdf, is_valid_url
|
||||||
from api.contants import IMG_BASE64_PREFIX
|
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||||
|
from rag.nlp import search
|
||||||
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/upload', methods=['POST'])
|
@manager.route("/upload", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id")
|
@validate_request("kb_id")
|
||||||
def upload():
|
def upload():
|
||||||
kb_id = request.form.get("kb_id")
|
kb_id = request.form.get("kb_id")
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
if "file" not in request.files:
|
||||||
if 'file' not in request.files:
|
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
return get_json_result(
|
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
file_objs = request.files.getlist('file')
|
file_objs = request.files.getlist("file")
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
if file_obj.filename == '':
|
if file_obj.filename == "":
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
raise LookupError("Can't find this knowledgebase!")
|
raise LookupError("Can't find this knowledgebase!")
|
||||||
|
err, files = FileService.upload_document(kb, file_objs, current_user.id)
|
||||||
|
|
||||||
|
if not files:
|
||||||
|
return get_json_result(data=files, message="There seems to be an issue with your file format. Please verify it is correct and not corrupted.", code=settings.RetCode.DATA_ERROR)
|
||||||
|
files = [f[0] for f in files] # remove the blob
|
||||||
|
|
||||||
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
|
||||||
if err:
|
if err:
|
||||||
return get_json_result(
|
return get_json_result(data=files, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||||
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
return get_json_result(data=files)
|
||||||
return get_json_result(data=True)
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/web_crawl', methods=['POST'])
|
@manager.route("/web_crawl", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id", "name", "url")
|
@validate_request("kb_id", "name", "url")
|
||||||
def web_crawl():
|
def web_crawl():
|
||||||
kb_id = request.form.get("kb_id")
|
kb_id = request.form.get("kb_id")
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
name = request.form.get("name")
|
name = request.form.get("name")
|
||||||
url = request.form.get("url")
|
url = request.form.get("url")
|
||||||
if not is_valid_url(url):
|
if not is_valid_url(url):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="The URL format is invalid", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='The URL format is invalid', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
raise LookupError("Can't find this knowledgebase!")
|
raise LookupError("Can't find this knowledgebase!")
|
||||||
|
|
||||||
blob = html2pdf(url)
|
blob = html2pdf(url)
|
||||||
if not blob: return server_error_response(ValueError("Download failure."))
|
if not blob:
|
||||||
|
return server_error_response(ValueError("Download failure."))
|
||||||
|
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
root_folder = FileService.get_root_folder(current_user.id)
|
||||||
pf_id = root_folder["id"]
|
pf_id = root_folder["id"]
|
||||||
@ -98,10 +102,7 @@ def web_crawl():
|
|||||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
filename = duplicate_name(
|
filename = duplicate_name(DocumentService.query, name=name + ".pdf", kb_id=kb.id)
|
||||||
DocumentService.query,
|
|
||||||
name=name + ".pdf",
|
|
||||||
kb_id=kb.id)
|
|
||||||
filetype = filename_type(filename)
|
filetype = filename_type(filename)
|
||||||
if filetype == FileType.OTHER.value:
|
if filetype == FileType.OTHER.value:
|
||||||
raise RuntimeError("This type of file has not been supported yet!")
|
raise RuntimeError("This type of file has not been supported yet!")
|
||||||
@ -120,7 +121,7 @@ def web_crawl():
|
|||||||
"name": filename,
|
"name": filename,
|
||||||
"location": location,
|
"location": location,
|
||||||
"size": len(blob),
|
"size": len(blob),
|
||||||
"thumbnail": thumbnail(filename, blob)
|
"thumbnail": thumbnail(filename, blob),
|
||||||
}
|
}
|
||||||
if doc["type"] == FileType.VISUAL:
|
if doc["type"] == FileType.VISUAL:
|
||||||
doc["parser_id"] = ParserType.PICTURE.value
|
doc["parser_id"] = ParserType.PICTURE.value
|
||||||
@ -137,280 +138,275 @@ def web_crawl():
|
|||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['POST'])
|
@manager.route("/create", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("name", "kb_id")
|
@validate_request("name", "kb_id")
|
||||||
def create():
|
def create():
|
||||||
req = request.json
|
req = request.json
|
||||||
kb_id = req["kb_id"]
|
kb_id = req["kb_id"]
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(message="Can't find this knowledgebase!")
|
||||||
retmsg="Can't find this knowledgebase!")
|
|
||||||
|
|
||||||
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(message="Duplicated document name in the same knowledgebase.")
|
||||||
retmsg="Duplicated document name in the same knowledgebase.")
|
|
||||||
|
|
||||||
doc = DocumentService.insert({
|
doc = DocumentService.insert(
|
||||||
"id": get_uuid(),
|
{
|
||||||
"kb_id": kb.id,
|
"id": get_uuid(),
|
||||||
"parser_id": kb.parser_id,
|
"kb_id": kb.id,
|
||||||
"parser_config": kb.parser_config,
|
"parser_id": kb.parser_id,
|
||||||
"created_by": current_user.id,
|
"parser_config": kb.parser_config,
|
||||||
"type": FileType.VIRTUAL,
|
"created_by": current_user.id,
|
||||||
"name": req["name"],
|
"type": FileType.VIRTUAL,
|
||||||
"location": "",
|
"name": req["name"],
|
||||||
"size": 0
|
"location": "",
|
||||||
})
|
"size": 0,
|
||||||
|
}
|
||||||
|
)
|
||||||
return get_json_result(data=doc.to_json())
|
return get_json_result(data=doc.to_json())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route("/list", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_docs():
|
def list_docs():
|
||||||
kb_id = request.args.get("kb_id")
|
kb_id = request.args.get("kb_id")
|
||||||
if not kb_id:
|
if not kb_id:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
if KnowledgebaseService.query(
|
if KnowledgebaseService.query(tenant_id=tenant.tenant_id, id=kb_id):
|
||||||
tenant_id=tenant.tenant_id, id=kb_id):
|
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="Only owner of knowledgebase authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
|
||||||
keywords = request.args.get("keywords", "")
|
keywords = request.args.get("keywords", "")
|
||||||
|
|
||||||
page_number = int(request.args.get("page", 1))
|
page_number = int(request.args.get("page", 0))
|
||||||
items_per_page = int(request.args.get("page_size", 15))
|
items_per_page = int(request.args.get("page_size", 0))
|
||||||
orderby = request.args.get("orderby", "create_time")
|
orderby = request.args.get("orderby", "create_time")
|
||||||
desc = request.args.get("desc", True)
|
desc = request.args.get("desc", True)
|
||||||
|
|
||||||
|
req = request.get_json()
|
||||||
|
|
||||||
|
run_status = req.get("run_status", [])
|
||||||
|
if run_status:
|
||||||
|
invalid_status = {s for s in run_status if s not in VALID_TASK_STATUS}
|
||||||
|
if invalid_status:
|
||||||
|
return get_data_error_result(message=f"Invalid filter run status conditions: {', '.join(invalid_status)}")
|
||||||
|
|
||||||
|
types = req.get("types", [])
|
||||||
|
if types:
|
||||||
|
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||||
|
if invalid_types:
|
||||||
|
return get_data_error_result(message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
docs, tol = DocumentService.get_by_kb_id(
|
docs, tol = DocumentService.get_by_kb_id(kb_id, page_number, items_per_page, orderby, desc, keywords, run_status, types)
|
||||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
|
||||||
|
|
||||||
for doc_item in docs:
|
for doc_item in docs:
|
||||||
if doc_item['thumbnail'] and not doc_item['thumbnail'].startswith(IMG_BASE64_PREFIX):
|
if doc_item["thumbnail"] and not doc_item["thumbnail"].startswith(IMG_BASE64_PREFIX):
|
||||||
doc_item['thumbnail'] = f"/v1/document/image/{kb_id}-{doc_item['thumbnail']}"
|
doc_item["thumbnail"] = f"/v1/document/image/{kb_id}-{doc_item['thumbnail']}"
|
||||||
|
|
||||||
return get_json_result(data={"total": tol, "docs": docs})
|
return get_json_result(data={"total": tol, "docs": docs})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/infos', methods=['POST'])
|
@manager.route("/infos", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def docinfos():
|
def docinfos():
|
||||||
req = request.json
|
req = request.json
|
||||||
doc_ids = req["doc_ids"]
|
doc_ids = req["doc_ids"]
|
||||||
for doc_id in doc_ids:
|
for doc_id in doc_ids:
|
||||||
if not DocumentService.accessible(doc_id, current_user.id):
|
if not DocumentService.accessible(doc_id, current_user.id):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
data=False,
|
|
||||||
retmsg='No authorization.',
|
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR
|
|
||||||
)
|
|
||||||
docs = DocumentService.get_by_ids(doc_ids)
|
docs = DocumentService.get_by_ids(doc_ids)
|
||||||
return get_json_result(data=list(docs.dicts()))
|
return get_json_result(data=list(docs.dicts()))
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/thumbnails', methods=['GET'])
|
@manager.route("/thumbnails", methods=["GET"]) # noqa: F821
|
||||||
#@login_required
|
# @login_required
|
||||||
def thumbnails():
|
def thumbnails():
|
||||||
doc_ids = request.args.get("doc_ids").split(",")
|
doc_ids = request.args.get("doc_ids").split(",")
|
||||||
if not doc_ids:
|
if not doc_ids:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message='Lack of "Document ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='Lack of "Document ID"', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
docs = DocumentService.get_thumbnails(doc_ids)
|
docs = DocumentService.get_thumbnails(doc_ids)
|
||||||
|
|
||||||
for doc_item in docs:
|
for doc_item in docs:
|
||||||
if doc_item['thumbnail'] and not doc_item['thumbnail'].startswith(IMG_BASE64_PREFIX):
|
if doc_item["thumbnail"] and not doc_item["thumbnail"].startswith(IMG_BASE64_PREFIX):
|
||||||
doc_item['thumbnail'] = f"/v1/document/image/{doc_item['kb_id']}-{doc_item['thumbnail']}"
|
doc_item["thumbnail"] = f"/v1/document/image/{doc_item['kb_id']}-{doc_item['thumbnail']}"
|
||||||
|
|
||||||
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/change_status', methods=['POST'])
|
@manager.route("/change_status", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "status")
|
@validate_request("doc_id", "status")
|
||||||
def change_status():
|
def change_status():
|
||||||
req = request.json
|
req = request.json
|
||||||
if str(req["status"]) not in ["0", "1"]:
|
if str(req["status"]) not in ["0", "1"]:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message='"Status" must be either 0 or 1!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False,
|
|
||||||
retmsg='"Status" must be either 0 or 1!',
|
|
||||||
retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
data=False,
|
|
||||||
retmsg='No authorization.',
|
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(message="Can't find this knowledgebase!")
|
||||||
retmsg="Can't find this knowledgebase!")
|
|
||||||
|
|
||||||
if not DocumentService.update_by_id(
|
if not DocumentService.update_by_id(req["doc_id"], {"status": str(req["status"])}):
|
||||||
req["doc_id"], {"status": str(req["status"])}):
|
return get_data_error_result(message="Database error (Document update)!")
|
||||||
return get_data_error_result(
|
|
||||||
retmsg="Database error (Document update)!")
|
|
||||||
|
|
||||||
if str(req["status"]) == "0":
|
status = int(req["status"])
|
||||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
settings.docStoreConn.update({"doc_id": req["doc_id"]}, {"available_int": status}, search.index_name(kb.tenant_id), doc.kb_id)
|
||||||
scripts="ctx._source.available_int=0;",
|
|
||||||
idxnm=search.index_name(
|
|
||||||
kb.tenant_id)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
|
||||||
scripts="ctx._source.available_int=1;",
|
|
||||||
idxnm=search.index_name(
|
|
||||||
kb.tenant_id)
|
|
||||||
)
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route("/rm", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id")
|
@validate_request("doc_id")
|
||||||
def rm():
|
def rm():
|
||||||
req = request.json
|
req = request.json
|
||||||
doc_ids = req["doc_id"]
|
doc_ids = req["doc_id"]
|
||||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
if isinstance(doc_ids, str):
|
||||||
|
doc_ids = [doc_ids]
|
||||||
|
|
||||||
for doc_id in doc_ids:
|
for doc_id in doc_ids:
|
||||||
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
data=False,
|
|
||||||
retmsg='No authorization.',
|
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR
|
|
||||||
)
|
|
||||||
|
|
||||||
root_folder = FileService.get_root_folder(current_user.id)
|
root_folder = FileService.get_root_folder(current_user.id)
|
||||||
pf_id = root_folder["id"]
|
pf_id = root_folder["id"]
|
||||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||||
errors = ""
|
errors = ""
|
||||||
|
kb_table_num_map = {}
|
||||||
for doc_id in doc_ids:
|
for doc_id in doc_ids:
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||||
|
|
||||||
|
TaskService.filter_delete([Task.doc_id == doc_id])
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(message="Database error (Document removal)!")
|
||||||
retmsg="Database error (Document removal)!")
|
|
||||||
|
|
||||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
deleted_file_count = 0
|
||||||
|
if f2d:
|
||||||
|
deleted_file_count = FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
File2DocumentService.delete_by_document_id(doc_id)
|
File2DocumentService.delete_by_document_id(doc_id)
|
||||||
|
if deleted_file_count > 0:
|
||||||
|
STORAGE_IMPL.rm(b, n)
|
||||||
|
|
||||||
STORAGE_IMPL.rm(b, n)
|
doc_parser = doc.parser_id
|
||||||
|
if doc_parser == ParserType.TABLE:
|
||||||
|
kb_id = doc.kb_id
|
||||||
|
if kb_id not in kb_table_num_map:
|
||||||
|
counts = DocumentService.count_by_kb_id(kb_id=kb_id, keywords="", run_status=[TaskStatus.DONE], types=[])
|
||||||
|
kb_table_num_map[kb_id] = counts
|
||||||
|
kb_table_num_map[kb_id] -= 1
|
||||||
|
if kb_table_num_map[kb_id] <= 0:
|
||||||
|
KnowledgebaseService.delete_field_map(kb_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
errors += str(e)
|
errors += str(e)
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
return get_json_result(data=False, message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/run', methods=['POST'])
|
@manager.route("/run", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_ids", "run")
|
@validate_request("doc_ids", "run")
|
||||||
def run():
|
def run():
|
||||||
req = request.json
|
req = request.json
|
||||||
for doc_id in req["doc_ids"]:
|
for doc_id in req["doc_ids"]:
|
||||||
if not DocumentService.accessible(doc_id, current_user.id):
|
if not DocumentService.accessible(doc_id, current_user.id):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
data=False,
|
|
||||||
retmsg='No authorization.',
|
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
|
kb_table_num_map = {}
|
||||||
for id in req["doc_ids"]:
|
for id in req["doc_ids"]:
|
||||||
info = {"run": str(req["run"]), "progress": 0}
|
info = {"run": str(req["run"]), "progress": 0}
|
||||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
if str(req["run"]) == TaskStatus.RUNNING.value and req.get("delete", False):
|
||||||
info["progress_msg"] = ""
|
info["progress_msg"] = ""
|
||||||
info["chunk_num"] = 0
|
info["chunk_num"] = 0
|
||||||
info["token_num"] = 0
|
info["token_num"] = 0
|
||||||
DocumentService.update_by_id(id, info)
|
DocumentService.update_by_id(id, info)
|
||||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
|
||||||
tenant_id = DocumentService.get_tenant_id(id)
|
tenant_id = DocumentService.get_tenant_id(id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
ELASTICSEARCH.deleteByQuery(
|
e, doc = DocumentService.get_by_id(id)
|
||||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
if not e:
|
||||||
|
return get_data_error_result(message="Document not found!")
|
||||||
|
if req.get("delete", False):
|
||||||
|
TaskService.filter_delete([Task.doc_id == id])
|
||||||
|
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||||
|
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), doc.kb_id)
|
||||||
|
|
||||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||||
TaskService.filter_delete([Task.doc_id == id])
|
|
||||||
e, doc = DocumentService.get_by_id(id)
|
e, doc = DocumentService.get_by_id(id)
|
||||||
doc = doc.to_dict()
|
doc = doc.to_dict()
|
||||||
doc["tenant_id"] = tenant_id
|
doc["tenant_id"] = tenant_id
|
||||||
|
|
||||||
|
doc_parser = doc.get("parser_id", ParserType.NAIVE)
|
||||||
|
if doc_parser == ParserType.TABLE:
|
||||||
|
kb_id = doc.get("kb_id")
|
||||||
|
if not kb_id:
|
||||||
|
continue
|
||||||
|
if kb_id not in kb_table_num_map:
|
||||||
|
count = DocumentService.count_by_kb_id(kb_id=kb_id, keywords="", run_status=[TaskStatus.DONE], types=[])
|
||||||
|
kb_table_num_map[kb_id] = count
|
||||||
|
if kb_table_num_map[kb_id] <= 0:
|
||||||
|
KnowledgebaseService.delete_field_map(kb_id)
|
||||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||||
queue_tasks(doc, bucket, name)
|
queue_tasks(doc, bucket, name, 0)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rename', methods=['POST'])
|
@manager.route("/rename", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "name")
|
@validate_request("doc_id", "name")
|
||||||
def rename():
|
def rename():
|
||||||
req = request.json
|
req = request.json
|
||||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
data=False,
|
|
||||||
retmsg='No authorization.',
|
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(doc.name.lower()).suffix:
|
||||||
doc.name.lower()).suffix:
|
return get_json_result(data=False, message="The extension of file can't be changed", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
return get_json_result(
|
|
||||||
data=False,
|
|
||||||
retmsg="The extension of file can't be changed",
|
|
||||||
retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||||
if d.name == req["name"]:
|
if d.name == req["name"]:
|
||||||
return get_data_error_result(
|
return get_data_error_result(message="Duplicated document name in the same knowledgebase.")
|
||||||
retmsg="Duplicated document name in the same knowledgebase.")
|
|
||||||
|
|
||||||
if not DocumentService.update_by_id(
|
if not DocumentService.update_by_id(req["doc_id"], {"name": req["name"]}):
|
||||||
req["doc_id"], {"name": req["name"]}):
|
return get_data_error_result(message="Database error (Document rename)!")
|
||||||
return get_data_error_result(
|
|
||||||
retmsg="Database error (Document rename)!")
|
|
||||||
|
|
||||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||||
if informs:
|
if informs:
|
||||||
@ -422,13 +418,13 @@ def rename():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get/<doc_id>', methods=['GET'])
|
@manager.route("/get/<doc_id>", methods=["GET"]) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def get(doc_id):
|
def get(doc_id):
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
|
|
||||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||||
@ -436,33 +432,26 @@ def get(doc_id):
|
|||||||
ext = re.search(r"\.([^.]+)$", doc.name)
|
ext = re.search(r"\.([^.]+)$", doc.name)
|
||||||
if ext:
|
if ext:
|
||||||
if doc.type == FileType.VISUAL.value:
|
if doc.type == FileType.VISUAL.value:
|
||||||
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
response.headers.set("Content-Type", "image/%s" % ext.group(1))
|
||||||
else:
|
else:
|
||||||
response.headers.set(
|
response.headers.set("Content-Type", "application/%s" % ext.group(1))
|
||||||
'Content-Type',
|
|
||||||
'application/%s' %
|
|
||||||
ext.group(1))
|
|
||||||
return response
|
return response
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/change_parser', methods=['POST'])
|
@manager.route("/change_parser", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_id", "parser_id")
|
@validate_request("doc_id", "parser_id")
|
||||||
def change_parser():
|
def change_parser():
|
||||||
req = request.json
|
req = request.json
|
||||||
|
|
||||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
data=False,
|
|
||||||
retmsg='No authorization.',
|
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
if doc.parser_id.lower() == req["parser_id"].lower():
|
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||||
if "parser_config" in req:
|
if "parser_config" in req:
|
||||||
if req["parser_config"] == doc.parser_config:
|
if req["parser_config"] == doc.parser_config:
|
||||||
@ -470,60 +459,136 @@ def change_parser():
|
|||||||
else:
|
else:
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
if ((doc.type == FileType.VISUAL and req["parser_id"] != "picture")
|
if (doc.type == FileType.VISUAL and req["parser_id"] != "picture") or (re.search(r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation"):
|
||||||
or (re.search(
|
return get_data_error_result(message="Not supported yet!")
|
||||||
r"\.(ppt|pptx|pages)$", doc.name) and req["parser_id"] != "presentation")):
|
|
||||||
return get_data_error_result(retmsg="Not supported yet!")
|
|
||||||
|
|
||||||
e = DocumentService.update_by_id(doc.id,
|
e = DocumentService.update_by_id(doc.id, {"parser_id": req["parser_id"], "progress": 0, "progress_msg": "", "run": TaskStatus.UNSTART.value})
|
||||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
|
||||||
"run": TaskStatus.UNSTART.value})
|
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
if "parser_config" in req:
|
if "parser_config" in req:
|
||||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||||
if doc.token_num > 0:
|
if doc.token_num > 0:
|
||||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, doc.process_duation * -1)
|
||||||
doc.process_duation * -1)
|
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
ELASTICSEARCH.deleteByQuery(
|
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/image/<image_id>', methods=['GET'])
|
@manager.route("/image/<image_id>", methods=["GET"]) # noqa: F821
|
||||||
# @login_required
|
# @login_required
|
||||||
def get_image(image_id):
|
def get_image(image_id):
|
||||||
try:
|
try:
|
||||||
|
arr = image_id.split("-")
|
||||||
|
if len(arr) != 2:
|
||||||
|
return get_data_error_result(message="Image not found.")
|
||||||
bkt, nm = image_id.split("-")
|
bkt, nm = image_id.split("-")
|
||||||
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
||||||
response.headers.set('Content-Type', 'image/JPEG')
|
response.headers.set("Content-Type", "image/JPEG")
|
||||||
return response
|
return response
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/upload_and_parse', methods=['POST'])
|
@manager.route("/upload_and_parse", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id")
|
@validate_request("conversation_id")
|
||||||
def upload_and_parse():
|
def upload_and_parse():
|
||||||
if 'file' not in request.files:
|
if "file" not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
file_objs = request.files.getlist('file')
|
file_objs = request.files.getlist("file")
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
if file_obj.filename == '':
|
if file_obj.filename == "":
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
|
||||||
|
|
||||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
||||||
|
|
||||||
return get_json_result(data=doc_ids)
|
return get_json_result(data=doc_ids)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/parse", methods=["POST"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def parse():
|
||||||
|
url = request.json.get("url") if request.json else ""
|
||||||
|
if url:
|
||||||
|
if not is_valid_url(url):
|
||||||
|
return get_json_result(data=False, message="The URL format is invalid", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
download_path = os.path.join(get_project_base_directory(), "logs/downloads")
|
||||||
|
os.makedirs(download_path, exist_ok=True)
|
||||||
|
from seleniumwire.webdriver import Chrome, ChromeOptions
|
||||||
|
|
||||||
|
options = ChromeOptions()
|
||||||
|
options.add_argument("--headless")
|
||||||
|
options.add_argument("--disable-gpu")
|
||||||
|
options.add_argument("--no-sandbox")
|
||||||
|
options.add_argument("--disable-dev-shm-usage")
|
||||||
|
options.add_experimental_option("prefs", {"download.default_directory": download_path, "download.prompt_for_download": False, "download.directory_upgrade": True, "safebrowsing.enabled": True})
|
||||||
|
driver = Chrome(options=options)
|
||||||
|
driver.get(url)
|
||||||
|
res_headers = [r.response.headers for r in driver.requests if r and r.response]
|
||||||
|
if len(res_headers) > 1:
|
||||||
|
sections = RAGFlowHtmlParser().parser_txt(driver.page_source)
|
||||||
|
driver.quit()
|
||||||
|
return get_json_result(data="\n".join(sections))
|
||||||
|
|
||||||
|
class File:
|
||||||
|
filename: str
|
||||||
|
filepath: str
|
||||||
|
|
||||||
|
def __init__(self, filename, filepath):
|
||||||
|
self.filename = filename
|
||||||
|
self.filepath = filepath
|
||||||
|
|
||||||
|
def read(self):
|
||||||
|
with open(self.filepath, "rb") as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
|
r = re.search(r"filename=\"([^\"]+)\"", str(res_headers))
|
||||||
|
if not r or not r.group(1):
|
||||||
|
return get_json_result(data=False, message="Can't not identify downloaded file", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
f = File(r.group(1), os.path.join(download_path, r.group(1)))
|
||||||
|
txt = FileService.parse_docs([f], current_user.id)
|
||||||
|
return get_json_result(data=txt)
|
||||||
|
|
||||||
|
if "file" not in request.files:
|
||||||
|
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
file_objs = request.files.getlist("file")
|
||||||
|
txt = FileService.parse_docs(file_objs, current_user.id)
|
||||||
|
|
||||||
|
return get_json_result(data=txt)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/set_meta", methods=["POST"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
@validate_request("doc_id", "meta")
|
||||||
|
def set_meta():
|
||||||
|
req = request.json
|
||||||
|
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||||
|
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
|
try:
|
||||||
|
meta = json.loads(req["meta"])
|
||||||
|
except Exception as e:
|
||||||
|
return get_json_result(data=False, message=f"Json syntax error: {e}", code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
if not isinstance(meta, dict):
|
||||||
|
return get_json_result(data=False, message='Meta data should be in Json map format, like {"key": "value"}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
try:
|
||||||
|
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="Document not found!")
|
||||||
|
|
||||||
|
if not DocumentService.update_by_id(req["doc_id"], {"meta_fields": meta}):
|
||||||
|
return get_data_error_result(message="Database error (meta updates)!")
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
|||||||
@ -13,9 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License
|
# limitations under the License
|
||||||
#
|
#
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
|
|
||||||
from api.db.db_models import File2Document
|
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
|
|
||||||
@ -26,13 +24,11 @@ from api.utils.api_utils import server_error_response, get_data_error_result, va
|
|||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.db import FileType
|
from api.db import FileType
|
||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.settings import RetCode
|
from api import settings
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
from rag.nlp import search
|
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/convert', methods=['POST'])
|
@manager.route('/convert', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("file_ids", "kb_ids")
|
@validate_request("file_ids", "kb_ids")
|
||||||
def convert():
|
def convert():
|
||||||
@ -42,8 +38,12 @@ def convert():
|
|||||||
file2documents = []
|
file2documents = []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
files = FileService.get_by_ids(file_ids)
|
||||||
|
files_set = dict({file.id: file for file in files})
|
||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
e, file = FileService.get_by_id(file_id)
|
file = files_set[file_id]
|
||||||
|
if not file:
|
||||||
|
return get_data_error_result(message="File not found!")
|
||||||
file_ids_list = [file_id]
|
file_ids_list = [file_id]
|
||||||
if file.type == FileType.FOLDER.value:
|
if file.type == FileType.FOLDER.value:
|
||||||
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
|
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||||
@ -54,13 +54,13 @@ def convert():
|
|||||||
doc_id = inform.document_id
|
doc_id = inform.document_id
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
File2DocumentService.delete_by_file_id(id)
|
File2DocumentService.delete_by_file_id(id)
|
||||||
|
|
||||||
# insert
|
# insert
|
||||||
@ -68,11 +68,11 @@ def convert():
|
|||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
e, file = FileService.get_by_id(id)
|
e, file = FileService.get_by_id(id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this file!")
|
message="Can't find this file!")
|
||||||
|
|
||||||
doc = DocumentService.insert({
|
doc = DocumentService.insert({
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
@ -90,13 +90,14 @@ def convert():
|
|||||||
"file_id": id,
|
"file_id": id,
|
||||||
"document_id": doc.id,
|
"document_id": doc.id,
|
||||||
})
|
})
|
||||||
|
|
||||||
file2documents.append(file2document.to_json())
|
file2documents.append(file2document.to_json())
|
||||||
return get_json_result(data=file2documents)
|
return get_json_result(data=file2documents)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("file_ids")
|
@validate_request("file_ids")
|
||||||
def rm():
|
def rm():
|
||||||
@ -104,26 +105,26 @@ def rm():
|
|||||||
file_ids = req["file_ids"]
|
file_ids = req["file_ids"]
|
||||||
if not file_ids:
|
if not file_ids:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='Lack of "Files ID"', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='Lack of "Files ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
try:
|
try:
|
||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
informs = File2DocumentService.get_by_file_id(file_id)
|
informs = File2DocumentService.get_by_file_id(file_id)
|
||||||
if not informs:
|
if not informs:
|
||||||
return get_data_error_result(retmsg="Inform not found!")
|
return get_data_error_result(message="Inform not found!")
|
||||||
for inform in informs:
|
for inform in informs:
|
||||||
if not inform:
|
if not inform:
|
||||||
return get_data_error_result(retmsg="Inform not found!")
|
return get_data_error_result(message="Inform not found!")
|
||||||
File2DocumentService.delete_by_file_id(file_id)
|
File2DocumentService.delete_by_file_id(file_id)
|
||||||
doc_id = inform.document_id
|
doc_id = inform.document_id
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|||||||
@ -18,7 +18,6 @@ import pathlib
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
import flask
|
import flask
|
||||||
from elasticsearch_dsl import Q
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
@ -29,15 +28,13 @@ from api.utils import get_uuid
|
|||||||
from api.db import FileType, FileSource
|
from api.db import FileType, FileSource
|
||||||
from api.db.services import duplicate_name
|
from api.db.services import duplicate_name
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.settings import RetCode
|
from api import settings
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
from api.utils.file_utils import filename_type
|
from api.utils.file_utils import filename_type
|
||||||
from rag.nlp import search
|
|
||||||
from rag.utils.es_conn import ELASTICSEARCH
|
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/upload', methods=['POST'])
|
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
# @validate_request("parent_id")
|
# @validate_request("parent_id")
|
||||||
def upload():
|
def upload():
|
||||||
@ -49,29 +46,26 @@ def upload():
|
|||||||
|
|
||||||
if 'file' not in request.files:
|
if 'file' not in request.files:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
file_objs = request.files.getlist('file')
|
file_objs = request.files.getlist('file')
|
||||||
|
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
if file_obj.filename == '':
|
if file_obj.filename == '':
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
file_res = []
|
file_res = []
|
||||||
try:
|
try:
|
||||||
|
e, pf_folder = FileService.get_by_id(pf_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result( message="Can't find this folder!")
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
e, file = FileService.get_by_id(pf_id)
|
|
||||||
if not e:
|
|
||||||
return get_data_error_result(
|
|
||||||
retmsg="Can't find this folder!")
|
|
||||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
||||||
return get_data_error_result(
|
return get_data_error_result( message="Exceed the maximum file number of a free user!")
|
||||||
retmsg="Exceed the maximum file number of a free user!")
|
|
||||||
|
|
||||||
# split file name path
|
# split file name path
|
||||||
if not file_obj.filename:
|
if not file_obj.filename:
|
||||||
e, file = FileService.get_by_id(pf_id)
|
file_obj_names = [pf_folder.name, file_obj.filename]
|
||||||
file_obj_names = [file.name, file_obj.filename]
|
|
||||||
else:
|
else:
|
||||||
full_path = '/' + file_obj.filename
|
full_path = '/' + file_obj.filename
|
||||||
file_obj_names = full_path.split('/')
|
file_obj_names = full_path.split('/')
|
||||||
@ -85,13 +79,13 @@ def upload():
|
|||||||
if file_len != len_id_list:
|
if file_len != len_id_list:
|
||||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
|
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names,
|
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names,
|
||||||
len_id_list)
|
len_id_list)
|
||||||
else:
|
else:
|
||||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
|
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names,
|
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names,
|
||||||
len_id_list)
|
len_id_list)
|
||||||
|
|
||||||
@ -123,7 +117,7 @@ def upload():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['POST'])
|
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("name")
|
@validate_request("name")
|
||||||
def create():
|
def create():
|
||||||
@ -137,10 +131,10 @@ def create():
|
|||||||
try:
|
try:
|
||||||
if not FileService.is_parent_folder_exist(pf_id):
|
if not FileService.is_parent_folder_exist(pf_id):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg="Parent Folder Doesn't Exist!", retcode=RetCode.OPERATING_ERROR)
|
data=False, message="Parent Folder Doesn't Exist!", code=settings.RetCode.OPERATING_ERROR)
|
||||||
if FileService.query(name=req["name"], parent_id=pf_id):
|
if FileService.query(name=req["name"], parent_id=pf_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated folder name in the same folder.")
|
message="Duplicated folder name in the same folder.")
|
||||||
|
|
||||||
if input_file_type == FileType.FOLDER.value:
|
if input_file_type == FileType.FOLDER.value:
|
||||||
file_type = FileType.FOLDER.value
|
file_type = FileType.FOLDER.value
|
||||||
@ -163,7 +157,7 @@ def create():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_files():
|
def list_files():
|
||||||
pf_id = request.args.get("parent_id")
|
pf_id = request.args.get("parent_id")
|
||||||
@ -181,21 +175,21 @@ def list_files():
|
|||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(pf_id)
|
e, file = FileService.get_by_id(pf_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
|
|
||||||
files, total = FileService.get_by_pf_id(
|
files, total = FileService.get_by_pf_id(
|
||||||
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
||||||
|
|
||||||
parent_folder = FileService.get_parent_folder(pf_id)
|
parent_folder = FileService.get_parent_folder(pf_id)
|
||||||
if not FileService.get_parent_folder(pf_id):
|
if not parent_folder:
|
||||||
return get_json_result(retmsg="File not found!")
|
return get_json_result(message="File not found!")
|
||||||
|
|
||||||
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/root_folder', methods=['GET'])
|
@manager.route('/root_folder', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get_root_folder():
|
def get_root_folder():
|
||||||
try:
|
try:
|
||||||
@ -205,14 +199,14 @@ def get_root_folder():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/parent_folder', methods=['GET'])
|
@manager.route('/parent_folder', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get_parent_folder():
|
def get_parent_folder():
|
||||||
file_id = request.args.get("file_id")
|
file_id = request.args.get("file_id")
|
||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
|
|
||||||
parent_folder = FileService.get_parent_folder(file_id)
|
parent_folder = FileService.get_parent_folder(file_id)
|
||||||
return get_json_result(data={"parent_folder": parent_folder.to_json()})
|
return get_json_result(data={"parent_folder": parent_folder.to_json()})
|
||||||
@ -220,14 +214,14 @@ def get_parent_folder():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/all_parent_folder', methods=['GET'])
|
@manager.route('/all_parent_folder', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get_all_parent_folders():
|
def get_all_parent_folders():
|
||||||
file_id = request.args.get("file_id")
|
file_id = request.args.get("file_id")
|
||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Folder not found!")
|
return get_data_error_result(message="Folder not found!")
|
||||||
|
|
||||||
parent_folders = FileService.get_all_parent_folders(file_id)
|
parent_folders = FileService.get_all_parent_folders(file_id)
|
||||||
parent_folders_res = []
|
parent_folders_res = []
|
||||||
@ -238,7 +232,7 @@ def get_all_parent_folders():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST'])
|
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("file_ids")
|
@validate_request("file_ids")
|
||||||
def rm():
|
def rm():
|
||||||
@ -248,9 +242,9 @@ def rm():
|
|||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File or Folder not found!")
|
return get_data_error_result(message="File or Folder not found!")
|
||||||
if not file.tenant_id:
|
if not file.tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
if file.source_type == FileSource.KNOWLEDGEBASE:
|
if file.source_type == FileSource.KNOWLEDGEBASE:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -259,13 +253,14 @@ def rm():
|
|||||||
for inner_file_id in file_id_list:
|
for inner_file_id in file_id_list:
|
||||||
e, file = FileService.get_by_id(inner_file_id)
|
e, file = FileService.get_by_id(inner_file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File not found!")
|
return get_data_error_result(message="File not found!")
|
||||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||||
else:
|
else:
|
||||||
|
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||||
if not FileService.delete(file):
|
if not FileService.delete(file):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (File removal)!")
|
message="Database error (File removal)!")
|
||||||
|
|
||||||
# delete file2document
|
# delete file2document
|
||||||
informs = File2DocumentService.get_by_file_id(file_id)
|
informs = File2DocumentService.get_by_file_id(file_id)
|
||||||
@ -273,13 +268,13 @@ def rm():
|
|||||||
doc_id = inform.document_id
|
doc_id = inform.document_id
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
File2DocumentService.delete_by_file_id(file_id)
|
File2DocumentService.delete_by_file_id(file_id)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
@ -287,7 +282,7 @@ def rm():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rename', methods=['POST'])
|
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("file_id", "name")
|
@validate_request("file_id", "name")
|
||||||
def rename():
|
def rename():
|
||||||
@ -295,45 +290,50 @@ def rename():
|
|||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(req["file_id"])
|
e, file = FileService.get_by_id(req["file_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="File not found!")
|
return get_data_error_result(message="File not found!")
|
||||||
if file.type != FileType.FOLDER.value \
|
if file.type != FileType.FOLDER.value \
|
||||||
and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||||
file.name.lower()).suffix:
|
file.name.lower()).suffix:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
retmsg="The extension of file can't be changed",
|
message="The extension of file can't be changed",
|
||||||
retcode=RetCode.ARGUMENT_ERROR)
|
code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
for file in FileService.query(name=req["name"], pf_id=file.parent_id):
|
for file in FileService.query(name=req["name"], pf_id=file.parent_id):
|
||||||
if file.name == req["name"]:
|
if file.name == req["name"]:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated file name in the same folder.")
|
message="Duplicated file name in the same folder.")
|
||||||
|
|
||||||
if not FileService.update_by_id(
|
if not FileService.update_by_id(
|
||||||
req["file_id"], {"name": req["name"]}):
|
req["file_id"], {"name": req["name"]}):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (File rename)!")
|
message="Database error (File rename)!")
|
||||||
|
|
||||||
informs = File2DocumentService.get_by_file_id(req["file_id"])
|
informs = File2DocumentService.get_by_file_id(req["file_id"])
|
||||||
if informs:
|
if informs:
|
||||||
if not DocumentService.update_by_id(
|
if not DocumentService.update_by_id(
|
||||||
informs[0].document_id, {"name": req["name"]}):
|
informs[0].document_id, {"name": req["name"]}):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document rename)!")
|
message="Database error (Document rename)!")
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get/<file_id>', methods=['GET'])
|
@manager.route('/get/<file_id>', methods=['GET']) # noqa: F821
|
||||||
# @login_required
|
@login_required
|
||||||
def get(file_id):
|
def get(file_id):
|
||||||
try:
|
try:
|
||||||
e, file = FileService.get_by_id(file_id)
|
e, file = FileService.get_by_id(file_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Document not found!")
|
return get_data_error_result(message="Document not found!")
|
||||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
|
||||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
blob = STORAGE_IMPL.get(file.parent_id, file.location)
|
||||||
|
if not blob:
|
||||||
|
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||||
|
blob = STORAGE_IMPL.get(b, n)
|
||||||
|
|
||||||
|
response = flask.make_response(blob)
|
||||||
ext = re.search(r"\.([^.]+)$", file.name)
|
ext = re.search(r"\.([^.]+)$", file.name)
|
||||||
if ext:
|
if ext:
|
||||||
if file.type == FileType.VISUAL.value:
|
if file.type == FileType.VISUAL.value:
|
||||||
@ -348,7 +348,7 @@ def get(file_id):
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/mv', methods=['POST'])
|
@manager.route('/mv', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("src_file_ids", "dest_file_id")
|
@validate_request("src_file_ids", "dest_file_id")
|
||||||
def move():
|
def move():
|
||||||
@ -356,15 +356,20 @@ def move():
|
|||||||
try:
|
try:
|
||||||
file_ids = req["src_file_ids"]
|
file_ids = req["src_file_ids"]
|
||||||
parent_id = req["dest_file_id"]
|
parent_id = req["dest_file_id"]
|
||||||
|
files = FileService.get_by_ids(file_ids)
|
||||||
|
files_dict = {}
|
||||||
|
for file in files:
|
||||||
|
files_dict[file.id] = file
|
||||||
|
|
||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
e, file = FileService.get_by_id(file_id)
|
file = files_dict[file_id]
|
||||||
if not e:
|
if not file:
|
||||||
return get_data_error_result(retmsg="File or Folder not found!")
|
return get_data_error_result(message="File or Folder not found!")
|
||||||
if not file.tenant_id:
|
if not file.tenant_id:
|
||||||
return get_data_error_result(retmsg="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
fe, _ = FileService.get_by_id(parent_id)
|
fe, _ = FileService.get_by_id(parent_id)
|
||||||
if not fe:
|
if not fe:
|
||||||
return get_data_error_result(retmsg="Parent Folder not found!")
|
return get_data_error_result(message="Parent Folder not found!")
|
||||||
FileService.move_file(file_ids, parent_id)
|
FileService.move_file(file_ids, parent_id)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@ -13,6 +13,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
@ -21,33 +24,46 @@ from api.db.services.document_service import DocumentService
|
|||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request, not_allowed_parameters
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.db import StatusEnum, FileSource
|
from api.db import StatusEnum, FileSource
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.db_models import File
|
from api.db.db_models import File
|
||||||
from api.settings import RetCode
|
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
|
from api import settings
|
||||||
|
from rag.nlp import search
|
||||||
|
from api.constants import DATASET_NAME_LIMIT
|
||||||
|
from rag.settings import PAGERANK_FLD
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/create', methods=['post'])
|
@manager.route('/create', methods=['post']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("name")
|
@validate_request("name")
|
||||||
def create():
|
def create():
|
||||||
req = request.json
|
req = request.json
|
||||||
req["name"] = req["name"].strip()
|
dataset_name = req["name"]
|
||||||
req["name"] = duplicate_name(
|
if not isinstance(dataset_name, str):
|
||||||
|
return get_data_error_result(message="Dataset name must be string.")
|
||||||
|
if dataset_name == "":
|
||||||
|
return get_data_error_result(message="Dataset name can't be empty.")
|
||||||
|
if len(dataset_name) >= DATASET_NAME_LIMIT:
|
||||||
|
return get_data_error_result(
|
||||||
|
message=f"Dataset name length is {len(dataset_name)} which is large than {DATASET_NAME_LIMIT}")
|
||||||
|
|
||||||
|
dataset_name = dataset_name.strip()
|
||||||
|
dataset_name = duplicate_name(
|
||||||
KnowledgebaseService.query,
|
KnowledgebaseService.query,
|
||||||
name=req["name"],
|
name=dataset_name,
|
||||||
tenant_id=current_user.id,
|
tenant_id=current_user.id,
|
||||||
status=StatusEnum.VALID.value)
|
status=StatusEnum.VALID.value)
|
||||||
try:
|
try:
|
||||||
req["id"] = get_uuid()
|
req["id"] = get_uuid()
|
||||||
|
req["name"] = dataset_name
|
||||||
req["tenant_id"] = current_user.id
|
req["tenant_id"] = current_user.id
|
||||||
req["created_by"] = current_user.id
|
req["created_by"] = current_user.id
|
||||||
e, t = TenantService.get_by_id(current_user.id)
|
e, t = TenantService.get_by_id(current_user.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(retmsg="Tenant not found.")
|
return get_data_error_result(message="Tenant not found.")
|
||||||
req["embd_id"] = t.embd_id
|
req["embd_id"] = t.embd_id
|
||||||
if not KnowledgebaseService.save(**req):
|
if not KnowledgebaseService.save(**req):
|
||||||
return get_data_error_result()
|
return get_data_error_result()
|
||||||
@ -56,49 +72,70 @@ def create():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/update', methods=['post'])
|
@manager.route('/update', methods=['post']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
@validate_request("kb_id", "name", "description", "parser_id")
|
||||||
|
@not_allowed_parameters("id", "tenant_id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||||
def update():
|
def update():
|
||||||
req = request.json
|
req = request.json
|
||||||
req["name"] = req["name"].strip()
|
req["name"] = req["name"].strip()
|
||||||
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
retmsg='No authorization.',
|
message='No authorization.',
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
if not KnowledgebaseService.query(
|
if not KnowledgebaseService.query(
|
||||||
created_by=current_user.id, id=req["kb_id"]):
|
created_by=current_user.id, id=req["kb_id"]):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
|
|
||||||
|
if req.get("parser_id", "") == "tag" and os.environ.get('DOC_ENGINE', "elasticsearch") == "infinity":
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='The chunking method Tag has not been supported by Infinity yet.',
|
||||||
|
code=settings.RetCode.OPERATING_ERROR
|
||||||
|
)
|
||||||
|
|
||||||
if req["name"].lower() != kb.name.lower() \
|
if req["name"].lower() != kb.name.lower() \
|
||||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
and len(
|
||||||
|
KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Duplicated knowledgebase name.")
|
message="Duplicated knowledgebase name.")
|
||||||
|
|
||||||
del req["kb_id"]
|
del req["kb_id"]
|
||||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||||
return get_data_error_result()
|
return get_data_error_result()
|
||||||
|
|
||||||
|
if kb.pagerank != req.get("pagerank", 0):
|
||||||
|
if req.get("pagerank", 0) > 0:
|
||||||
|
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: req["pagerank"]},
|
||||||
|
search.index_name(kb.tenant_id), kb.id)
|
||||||
|
else:
|
||||||
|
# Elasticsearch requires PAGERANK_FLD be non-zero!
|
||||||
|
settings.docStoreConn.update({"exists": PAGERANK_FLD}, {"remove": PAGERANK_FLD},
|
||||||
|
search.index_name(kb.tenant_id), kb.id)
|
||||||
|
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Knowledgebase rename)!")
|
message="Database error (Knowledgebase rename)!")
|
||||||
|
kb = kb.to_dict()
|
||||||
|
kb.update(req)
|
||||||
|
|
||||||
return get_json_result(data=kb.to_json())
|
return get_json_result(data=kb)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/detail', methods=['GET'])
|
@manager.route('/detail', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def detail():
|
def detail():
|
||||||
kb_id = request.args["kb_id"]
|
kb_id = request.args["kb_id"]
|
||||||
@ -110,34 +147,51 @@ def detail():
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
retcode=RetCode.OPERATING_ERROR)
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
kb = KnowledgebaseService.get_detail(kb_id)
|
kb = KnowledgebaseService.get_detail(kb_id)
|
||||||
if not kb:
|
if not kb:
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Can't find this knowledgebase!")
|
message="Can't find this knowledgebase!")
|
||||||
|
kb["size"] = DocumentService.get_total_size_by_kb_id(kb_id=kb["id"],keywords="", run_status=[], types=[])
|
||||||
return get_json_result(data=kb)
|
return get_json_result(data=kb)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_kbs():
|
def list_kbs():
|
||||||
page_number = request.args.get("page", 1)
|
keywords = request.args.get("keywords", "")
|
||||||
items_per_page = request.args.get("page_size", 150)
|
page_number = int(request.args.get("page", 0))
|
||||||
|
items_per_page = int(request.args.get("page_size", 0))
|
||||||
|
parser_id = request.args.get("parser_id")
|
||||||
orderby = request.args.get("orderby", "create_time")
|
orderby = request.args.get("orderby", "create_time")
|
||||||
desc = request.args.get("desc", True)
|
desc = request.args.get("desc", True)
|
||||||
|
|
||||||
|
req = request.get_json()
|
||||||
|
owner_ids = req.get("owner_ids", [])
|
||||||
try:
|
try:
|
||||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
if not owner_ids:
|
||||||
kbs = KnowledgebaseService.get_by_tenant_ids(
|
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||||
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc)
|
tenants = [m["tenant_id"] for m in tenants]
|
||||||
return get_json_result(data=kbs)
|
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||||
|
tenants, current_user.id, page_number,
|
||||||
|
items_per_page, orderby, desc, keywords, parser_id)
|
||||||
|
else:
|
||||||
|
tenants = owner_ids
|
||||||
|
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||||
|
tenants, current_user.id, 0,
|
||||||
|
0, orderby, desc, keywords, parser_id)
|
||||||
|
kbs = [kb for kb in kbs if kb["tenant_id"] in tenants]
|
||||||
|
if page_number and items_per_page:
|
||||||
|
kbs = kbs[(page_number-1)*items_per_page:page_number*items_per_page]
|
||||||
|
total = len(kbs)
|
||||||
|
return get_json_result(data={"kbs": kbs, "total": total})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
@manager.route('/rm', methods=['post']) # noqa: F821
|
||||||
@manager.route('/rm', methods=['post'])
|
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id")
|
@validate_request("kb_id")
|
||||||
def rm():
|
def rm():
|
||||||
@ -145,27 +199,156 @@ def rm():
|
|||||||
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
if not KnowledgebaseService.accessible4deletion(req["kb_id"], current_user.id):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
retmsg='No authorization.',
|
message='No authorization.',
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
kbs = KnowledgebaseService.query(
|
kbs = KnowledgebaseService.query(
|
||||||
created_by=current_user.id, id=req["kb_id"])
|
created_by=current_user.id, id=req["kb_id"])
|
||||||
if not kbs:
|
if not kbs:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||||
|
code=settings.RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
||||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
if f2d:
|
||||||
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
File2DocumentService.delete_by_document_id(doc.id)
|
File2DocumentService.delete_by_document_id(doc.id)
|
||||||
|
FileService.filter_delete(
|
||||||
|
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||||
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
||||||
return get_data_error_result(
|
return get_data_error_result(
|
||||||
retmsg="Database error (Knowledgebase removal)!")
|
message="Database error (Knowledgebase removal)!")
|
||||||
|
for kb in kbs:
|
||||||
|
settings.docStoreConn.delete({"kb_id": kb.id}, search.index_name(kb.tenant_id), kb.id)
|
||||||
|
settings.docStoreConn.deleteIdx(search.index_name(kb.tenant_id), kb.id)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/<kb_id>/tags', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def list_tags(kb_id):
|
||||||
|
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
|
|
||||||
|
tags = settings.retrievaler.all_tags(current_user.id, [kb_id])
|
||||||
|
return get_json_result(data=tags)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/tags', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def list_tags_from_kbs():
|
||||||
|
kb_ids = request.args.get("kb_ids", "").split(",")
|
||||||
|
for kb_id in kb_ids:
|
||||||
|
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
|
|
||||||
|
tags = settings.retrievaler.all_tags(current_user.id, kb_ids)
|
||||||
|
return get_json_result(data=tags)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/<kb_id>/rm_tags', methods=['POST']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def rm_tags(kb_id):
|
||||||
|
req = request.json
|
||||||
|
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
|
|
||||||
|
for t in req["tags"]:
|
||||||
|
settings.docStoreConn.update({"tag_kwd": t, "kb_id": [kb_id]},
|
||||||
|
{"remove": {"tag_kwd": t}},
|
||||||
|
search.index_name(kb.tenant_id),
|
||||||
|
kb_id)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/<kb_id>/rename_tag', methods=['POST']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def rename_tags(kb_id):
|
||||||
|
req = request.json
|
||||||
|
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
|
|
||||||
|
settings.docStoreConn.update({"tag_kwd": req["from_tag"], "kb_id": [kb_id]},
|
||||||
|
{"remove": {"tag_kwd": req["from_tag"].strip()}, "add": {"tag_kwd": req["to_tag"]}},
|
||||||
|
search.index_name(kb.tenant_id),
|
||||||
|
kb_id)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/<kb_id>/knowledge_graph', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def knowledge_graph(kb_id):
|
||||||
|
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
|
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
|
req = {
|
||||||
|
"kb_id": [kb_id],
|
||||||
|
"knowledge_graph_kwd": ["graph"]
|
||||||
|
}
|
||||||
|
|
||||||
|
obj = {"graph": {}, "mind_map": {}}
|
||||||
|
if not settings.docStoreConn.indexExist(search.index_name(kb.tenant_id), kb_id):
|
||||||
|
return get_json_result(data=obj)
|
||||||
|
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [kb_id])
|
||||||
|
if not len(sres.ids):
|
||||||
|
return get_json_result(data=obj)
|
||||||
|
|
||||||
|
for id in sres.ids[:1]:
|
||||||
|
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||||
|
try:
|
||||||
|
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
obj[ty] = content_json
|
||||||
|
|
||||||
|
if "nodes" in obj["graph"]:
|
||||||
|
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||||
|
if "edges" in obj["graph"]:
|
||||||
|
node_id_set = { o["id"] for o in obj["graph"]["nodes"] }
|
||||||
|
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||||
|
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||||
|
return get_json_result(data=obj)
|
||||||
|
|
||||||
|
@manager.route('/<kb_id>/knowledge_graph', methods=['DELETE']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def delete_knowledge_graph(kb_id):
|
||||||
|
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
|
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
|
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]}, search.index_name(kb.tenant_id), kb_id)
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|||||||
97
api/apps/langfuse_app.py
Normal file
97
api/apps/langfuse_app.py
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from flask import request
|
||||||
|
from flask_login import current_user, login_required
|
||||||
|
from langfuse import Langfuse
|
||||||
|
|
||||||
|
from api.db.db_models import DB
|
||||||
|
from api.db.services.langfuse_service import TenantLangfuseService
|
||||||
|
from api.utils.api_utils import get_error_data_result, get_json_result, server_error_response, validate_request
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/api_key", methods=["POST", "PUT"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
@validate_request("secret_key", "public_key", "host")
|
||||||
|
def set_api_key():
|
||||||
|
req = request.get_json()
|
||||||
|
secret_key = req.get("secret_key", "")
|
||||||
|
public_key = req.get("public_key", "")
|
||||||
|
host = req.get("host", "")
|
||||||
|
if not all([secret_key, public_key, host]):
|
||||||
|
return get_error_data_result(message="Missing required fields")
|
||||||
|
|
||||||
|
langfuse_keys = dict(
|
||||||
|
tenant_id=current_user.id,
|
||||||
|
secret_key=secret_key,
|
||||||
|
public_key=public_key,
|
||||||
|
host=host,
|
||||||
|
)
|
||||||
|
|
||||||
|
langfuse = Langfuse(public_key=langfuse_keys["public_key"], secret_key=langfuse_keys["secret_key"], host=langfuse_keys["host"])
|
||||||
|
if not langfuse.auth_check():
|
||||||
|
return get_error_data_result(message="Invalid Langfuse keys")
|
||||||
|
|
||||||
|
langfuse_entry = TenantLangfuseService.filter_by_tenant(tenant_id=current_user.id)
|
||||||
|
with DB.atomic():
|
||||||
|
try:
|
||||||
|
if not langfuse_entry:
|
||||||
|
TenantLangfuseService.save(**langfuse_keys)
|
||||||
|
else:
|
||||||
|
TenantLangfuseService.update_by_tenant(tenant_id=current_user.id, langfuse_keys=langfuse_keys)
|
||||||
|
return get_json_result(data=langfuse_keys)
|
||||||
|
except Exception as e:
|
||||||
|
server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/api_key", methods=["GET"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
@validate_request()
|
||||||
|
def get_api_key():
|
||||||
|
langfuse_entry = TenantLangfuseService.filter_by_tenant_with_info(tenant_id=current_user.id)
|
||||||
|
if not langfuse_entry:
|
||||||
|
return get_json_result(message="Have not record any Langfuse keys.")
|
||||||
|
|
||||||
|
langfuse = Langfuse(public_key=langfuse_entry["public_key"], secret_key=langfuse_entry["secret_key"], host=langfuse_entry["host"])
|
||||||
|
try:
|
||||||
|
if not langfuse.auth_check():
|
||||||
|
return get_error_data_result(message="Invalid Langfuse keys loaded")
|
||||||
|
except langfuse.api.core.api_error.ApiError as api_err:
|
||||||
|
return get_json_result(message=f"Error from Langfuse: {api_err}")
|
||||||
|
except Exception as e:
|
||||||
|
server_error_response(e)
|
||||||
|
|
||||||
|
langfuse_entry["project_id"] = langfuse.api.projects.get().dict()["data"][0]["id"]
|
||||||
|
langfuse_entry["project_name"] = langfuse.api.projects.get().dict()["data"][0]["name"]
|
||||||
|
|
||||||
|
return get_json_result(data=langfuse_entry)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/api_key", methods=["DELETE"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
@validate_request()
|
||||||
|
def delete_api_key():
|
||||||
|
langfuse_entry = TenantLangfuseService.filter_by_tenant(tenant_id=current_user.id)
|
||||||
|
if not langfuse_entry:
|
||||||
|
return get_json_result(message="Have not record any Langfuse keys.")
|
||||||
|
|
||||||
|
with DB.atomic():
|
||||||
|
try:
|
||||||
|
TenantLangfuseService.delete_model(langfuse_entry)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
server_error_response(e)
|
||||||
@ -13,21 +13,22 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
||||||
from api.settings import LIGHTEN
|
from api import settings
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||||
from api.db import StatusEnum, LLMType
|
from api.db import StatusEnum, LLMType
|
||||||
from api.db.db_models import TenantLLM
|
from api.db.db_models import TenantLLM
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_json_result
|
||||||
|
from api.utils.file_utils import get_project_base_directory
|
||||||
from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
||||||
import requests
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/factories', methods=['GET'])
|
@manager.route('/factories', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def factories():
|
def factories():
|
||||||
try:
|
try:
|
||||||
@ -49,7 +50,7 @@ def factories():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set_api_key', methods=['POST'])
|
@manager.route('/set_api_key', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("llm_factory", "api_key")
|
@validate_request("llm_factory", "api_key")
|
||||||
def set_api_key():
|
def set_api_key():
|
||||||
@ -60,6 +61,7 @@ def set_api_key():
|
|||||||
msg = ""
|
msg = ""
|
||||||
for llm in LLMService.query(fid=factory):
|
for llm in LLMService.query(fid=factory):
|
||||||
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
||||||
|
assert factory in EmbeddingModel, f"Embedding model from {factory} is not supported yet."
|
||||||
mdl = EmbeddingModel[factory](
|
mdl = EmbeddingModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
try:
|
try:
|
||||||
@ -70,18 +72,20 @@ def set_api_key():
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
||||||
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
||||||
|
assert factory in ChatModel, f"Chat model from {factory} is not supported yet."
|
||||||
mdl = ChatModel[factory](
|
mdl = ChatModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
try:
|
try:
|
||||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
||||||
{"temperature": 0.9,'max_tokens':50})
|
{"temperature": 0.9, 'max_tokens': 50})
|
||||||
if m.find("**ERROR**") >=0:
|
if m.find("**ERROR**") >= 0:
|
||||||
raise Exception(m)
|
raise Exception(m)
|
||||||
chat_passed = True
|
chat_passed = True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||||
e)
|
e)
|
||||||
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
||||||
|
assert factory in RerankModel, f"Re-rank model from {factory} is not supported yet."
|
||||||
mdl = RerankModel[factory](
|
mdl = RerankModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
try:
|
try:
|
||||||
@ -89,7 +93,7 @@ def set_api_key():
|
|||||||
if len(arr) == 0 or tc == 0:
|
if len(arr) == 0 or tc == 0:
|
||||||
raise Exception("Fail")
|
raise Exception("Fail")
|
||||||
rerank_passed = True
|
rerank_passed = True
|
||||||
print(f'passed model rerank{llm.llm_name}',flush=True)
|
logging.debug(f'passed model rerank {llm.llm_name}')
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||||
e)
|
e)
|
||||||
@ -98,7 +102,7 @@ def set_api_key():
|
|||||||
break
|
break
|
||||||
|
|
||||||
if msg:
|
if msg:
|
||||||
return get_data_error_result(retmsg=msg)
|
return get_data_error_result(message=msg)
|
||||||
|
|
||||||
llm_config = {
|
llm_config = {
|
||||||
"api_key": req["api_key"],
|
"api_key": req["api_key"],
|
||||||
@ -109,6 +113,7 @@ def set_api_key():
|
|||||||
llm_config[n] = req[n]
|
llm_config[n] = req[n]
|
||||||
|
|
||||||
for llm in LLMService.query(fid=factory):
|
for llm in LLMService.query(fid=factory):
|
||||||
|
llm_config["max_tokens"]=llm.max_tokens
|
||||||
if not TenantLLMService.filter_update(
|
if not TenantLLMService.filter_update(
|
||||||
[TenantLLM.tenant_id == current_user.id,
|
[TenantLLM.tenant_id == current_user.id,
|
||||||
TenantLLM.llm_factory == factory,
|
TenantLLM.llm_factory == factory,
|
||||||
@ -120,18 +125,21 @@ def set_api_key():
|
|||||||
llm_name=llm.llm_name,
|
llm_name=llm.llm_name,
|
||||||
model_type=llm.model_type,
|
model_type=llm.model_type,
|
||||||
api_key=llm_config["api_key"],
|
api_key=llm_config["api_key"],
|
||||||
api_base=llm_config["api_base"]
|
api_base=llm_config["api_base"],
|
||||||
|
max_tokens=llm_config["max_tokens"]
|
||||||
)
|
)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/add_llm', methods=['POST'])
|
@manager.route('/add_llm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("llm_factory")
|
@validate_request("llm_factory")
|
||||||
def add_llm():
|
def add_llm():
|
||||||
req = request.json
|
req = request.json
|
||||||
factory = req["llm_factory"]
|
factory = req["llm_factory"]
|
||||||
|
api_key = req.get("api_key", "x")
|
||||||
|
llm_name = req.get("llm_name")
|
||||||
|
|
||||||
def apikey_json(keys):
|
def apikey_json(keys):
|
||||||
nonlocal req
|
nonlocal req
|
||||||
@ -140,7 +148,6 @@ def add_llm():
|
|||||||
if factory == "VolcEngine":
|
if factory == "VolcEngine":
|
||||||
# For VolcEngine, due to its special authentication method
|
# For VolcEngine, due to its special authentication method
|
||||||
# Assemble ark_api_key endpoint_id into api_key
|
# Assemble ark_api_key endpoint_id into api_key
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["ark_api_key", "endpoint_id"])
|
api_key = apikey_json(["ark_api_key", "endpoint_id"])
|
||||||
|
|
||||||
elif factory == "Tencent Hunyuan":
|
elif factory == "Tencent Hunyuan":
|
||||||
@ -149,165 +156,159 @@ def add_llm():
|
|||||||
|
|
||||||
elif factory == "Tencent Cloud":
|
elif factory == "Tencent Cloud":
|
||||||
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
||||||
|
return set_api_key()
|
||||||
|
|
||||||
elif factory == "Bedrock":
|
elif factory == "Bedrock":
|
||||||
# For Bedrock, due to its special authentication method
|
# For Bedrock, due to its special authentication method
|
||||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
||||||
|
|
||||||
elif factory == "LocalAI":
|
elif factory == "LocalAI":
|
||||||
llm_name = req["llm_name"]+"___LocalAI"
|
llm_name += "___LocalAI"
|
||||||
api_key = "xxxxxxxxxxxxxxx"
|
|
||||||
|
|
||||||
elif factory == "HuggingFace":
|
elif factory == "HuggingFace":
|
||||||
llm_name = req["llm_name"]+"___HuggingFace"
|
llm_name += "___HuggingFace"
|
||||||
api_key = "xxxxxxxxxxxxxxx"
|
|
||||||
|
|
||||||
elif factory == "OpenAI-API-Compatible":
|
elif factory == "OpenAI-API-Compatible":
|
||||||
llm_name = req["llm_name"]+"___OpenAI-API"
|
llm_name += "___OpenAI-API"
|
||||||
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
|
||||||
|
|
||||||
elif factory =="XunFei Spark":
|
elif factory == "VLLM":
|
||||||
llm_name = req["llm_name"]
|
llm_name += "___VLLM"
|
||||||
|
|
||||||
|
elif factory == "XunFei Spark":
|
||||||
if req["model_type"] == "chat":
|
if req["model_type"] == "chat":
|
||||||
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
api_key = req.get("spark_api_password", "")
|
||||||
elif req["model_type"] == "tts":
|
elif req["model_type"] == "tts":
|
||||||
api_key = apikey_json(["spark_app_id", "spark_api_secret","spark_api_key"])
|
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
||||||
|
|
||||||
elif factory == "BaiduYiyan":
|
elif factory == "BaiduYiyan":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["yiyan_ak", "yiyan_sk"])
|
api_key = apikey_json(["yiyan_ak", "yiyan_sk"])
|
||||||
|
|
||||||
elif factory == "Fish Audio":
|
elif factory == "Fish Audio":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["fish_audio_ak", "fish_audio_refid"])
|
api_key = apikey_json(["fish_audio_ak", "fish_audio_refid"])
|
||||||
|
|
||||||
elif factory == "Google Cloud":
|
elif factory == "Google Cloud":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
||||||
|
|
||||||
elif factory == "Azure-OpenAI":
|
elif factory == "Azure-OpenAI":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["api_key", "api_version"])
|
api_key = apikey_json(["api_key", "api_version"])
|
||||||
|
|
||||||
else:
|
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
|
||||||
|
|
||||||
llm = {
|
llm = {
|
||||||
"tenant_id": current_user.id,
|
"tenant_id": current_user.id,
|
||||||
"llm_factory": factory,
|
"llm_factory": factory,
|
||||||
"model_type": req["model_type"],
|
"model_type": req["model_type"],
|
||||||
"llm_name": llm_name,
|
"llm_name": llm_name,
|
||||||
"api_base": req.get("api_base", ""),
|
"api_base": req.get("api_base", ""),
|
||||||
"api_key": api_key
|
"api_key": api_key,
|
||||||
|
"max_tokens": req.get("max_tokens")
|
||||||
}
|
}
|
||||||
|
|
||||||
msg = ""
|
msg = ""
|
||||||
|
mdl_nm = llm["llm_name"].split("___")[0]
|
||||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||||
|
assert factory in EmbeddingModel, f"Embedding model from {factory} is not supported yet."
|
||||||
mdl = EmbeddingModel[factory](
|
mdl = EmbeddingModel[factory](
|
||||||
key=llm['api_key'],
|
key=llm['api_key'],
|
||||||
model_name=llm["llm_name"],
|
model_name=mdl_nm,
|
||||||
base_url=llm["api_base"])
|
base_url=llm["api_base"])
|
||||||
try:
|
try:
|
||||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||||
if len(arr[0]) == 0 or tc == 0:
|
if len(arr[0]) == 0:
|
||||||
raise Exception("Fail")
|
raise Exception("Fail")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
msg += f"\nFail to access embedding model({mdl_nm})." + str(e)
|
||||||
elif llm["model_type"] == LLMType.CHAT.value:
|
elif llm["model_type"] == LLMType.CHAT.value:
|
||||||
|
assert factory in ChatModel, f"Chat model from {factory} is not supported yet."
|
||||||
mdl = ChatModel[factory](
|
mdl = ChatModel[factory](
|
||||||
key=llm['api_key'],
|
key=llm['api_key'],
|
||||||
model_name=llm["llm_name"],
|
model_name=mdl_nm,
|
||||||
base_url=llm["api_base"]
|
base_url=llm["api_base"]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||||
"temperature": 0.9})
|
"temperature": 0.9})
|
||||||
if not tc:
|
if not tc and m.find("**ERROR**:") >= 0:
|
||||||
raise Exception(m)
|
raise Exception(m)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||||
e)
|
e)
|
||||||
elif llm["model_type"] == LLMType.RERANK:
|
elif llm["model_type"] == LLMType.RERANK:
|
||||||
mdl = RerankModel[factory](
|
assert factory in RerankModel, f"RE-rank model from {factory} is not supported yet."
|
||||||
key=llm["api_key"],
|
|
||||||
model_name=llm["llm_name"],
|
|
||||||
base_url=llm["api_base"]
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!"])
|
mdl = RerankModel[factory](
|
||||||
if len(arr) == 0 or tc == 0:
|
key=llm["api_key"],
|
||||||
|
model_name=mdl_nm,
|
||||||
|
base_url=llm["api_base"]
|
||||||
|
)
|
||||||
|
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||||
|
if len(arr) == 0:
|
||||||
raise Exception("Not known.")
|
raise Exception("Not known.")
|
||||||
|
except KeyError:
|
||||||
|
msg += f"{factory} dose not support this model({mdl_nm})"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||||
e)
|
e)
|
||||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||||
|
assert factory in CvModel, f"Image to text model from {factory} is not supported yet."
|
||||||
mdl = CvModel[factory](
|
mdl = CvModel[factory](
|
||||||
key=llm["api_key"],
|
key=llm["api_key"],
|
||||||
model_name=llm["llm_name"],
|
model_name=mdl_nm,
|
||||||
base_url=llm["api_base"]
|
base_url=llm["api_base"]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
img_url = (
|
with open(os.path.join(get_project_base_directory(), "web/src/assets/yay.jpg"), "rb") as f:
|
||||||
"https://upload.wikimedia.org/wikipedia/comm"
|
m, tc = mdl.describe(f.read())
|
||||||
"ons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/256"
|
if not m and not tc:
|
||||||
"0px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
|
||||||
)
|
|
||||||
res = requests.get(img_url)
|
|
||||||
if res.status_code == 200:
|
|
||||||
m, tc = mdl.describe(res.content)
|
|
||||||
if not tc:
|
|
||||||
raise Exception(m)
|
raise Exception(m)
|
||||||
else:
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||||
elif llm["model_type"] == LLMType.TTS:
|
elif llm["model_type"] == LLMType.TTS:
|
||||||
|
assert factory in TTSModel, f"TTS model from {factory} is not supported yet."
|
||||||
mdl = TTSModel[factory](
|
mdl = TTSModel[factory](
|
||||||
key=llm["api_key"], model_name=llm["llm_name"], base_url=llm["api_base"]
|
key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
for resp in mdl.tts("Hello~ Ragflower!"):
|
for resp in mdl.tts("Hello~ Ragflower!"):
|
||||||
pass
|
pass
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||||
else:
|
else:
|
||||||
# TODO: check other type of models
|
# TODO: check other type of models
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if msg:
|
if msg:
|
||||||
return get_data_error_result(retmsg=msg)
|
return get_data_error_result(message=msg)
|
||||||
|
|
||||||
if not TenantLLMService.filter_update(
|
if not TenantLLMService.filter_update(
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory,
|
||||||
|
TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||||
TenantLLMService.save(**llm)
|
TenantLLMService.save(**llm)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/delete_llm', methods=['POST'])
|
@manager.route('/delete_llm', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("llm_factory", "llm_name")
|
@validate_request("llm_factory", "llm_name")
|
||||||
def delete_llm():
|
def delete_llm():
|
||||||
req = request.json
|
req = request.json
|
||||||
TenantLLMService.filter_delete(
|
TenantLLMService.filter_delete(
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"],
|
||||||
|
TenantLLM.llm_name == req["llm_name"]])
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/delete_factory', methods=['POST'])
|
@manager.route('/delete_factory', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("llm_factory")
|
@validate_request("llm_factory")
|
||||||
def delete_factory():
|
def delete_factory():
|
||||||
req = request.json
|
req = request.json
|
||||||
TenantLLMService.filter_delete(
|
TenantLLMService.filter_delete(
|
||||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
|
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/my_llms', methods=['GET'])
|
@manager.route('/my_llms', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def my_llms():
|
def my_llms():
|
||||||
try:
|
try:
|
||||||
@ -328,11 +329,11 @@ def my_llms():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET'])
|
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_app():
|
def list_app():
|
||||||
self_deploied = ["Youdao","FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
self_deployed = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"]
|
||||||
weighted = ["Youdao","FastEmbed", "BAAI"] if LIGHTEN != 0 else []
|
weighted = ["Youdao", "FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
|
||||||
model_type = request.args.get("model_type")
|
model_type = request.args.get("model_type")
|
||||||
try:
|
try:
|
||||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||||
@ -341,17 +342,17 @@ def list_app():
|
|||||||
llms = [m.to_dict()
|
llms = [m.to_dict()
|
||||||
for m in llms if m.status == StatusEnum.VALID.value and m.fid not in weighted]
|
for m in llms if m.status == StatusEnum.VALID.value and m.fid not in weighted]
|
||||||
for m in llms:
|
for m in llms:
|
||||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
|
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deployed
|
||||||
|
|
||||||
llm_set = set([m["llm_name"]+"@"+m["fid"] for m in llms])
|
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||||
for o in objs:
|
for o in objs:
|
||||||
if not o.api_key:continue
|
if o.llm_name + "@" + o.llm_factory in llm_set:
|
||||||
if o.llm_name+"@"+o.llm_factory in llm_set:continue
|
continue
|
||||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||||
|
|
||||||
res = {}
|
res = {}
|
||||||
for m in llms:
|
for m in llms:
|
||||||
if model_type and m["model_type"].find(model_type)<0:
|
if model_type and m["model_type"].find(model_type) < 0:
|
||||||
continue
|
continue
|
||||||
if m["fid"] not in res:
|
if m["fid"] not in res:
|
||||||
res[m["fid"]] = []
|
res[m["fid"]] = []
|
||||||
|
|||||||
12
api/apps/plugin_app.py
Normal file
12
api/apps/plugin_app.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
from flask import Response
|
||||||
|
from flask_login import login_required
|
||||||
|
from api.utils.api_utils import get_json_result
|
||||||
|
from plugin import GlobalPluginManager
|
||||||
|
|
||||||
|
@manager.route('/llm_tools', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def llm_tools() -> Response:
|
||||||
|
tools = GlobalPluginManager.get_llm_tools()
|
||||||
|
tools_metadata = [t.get_metadata() for t in tools]
|
||||||
|
|
||||||
|
return get_json_result(data=tools_metadata)
|
||||||
128
api/apps/sdk/agent.py
Normal file
128
api/apps/sdk/agent.py
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
from typing import Any, cast
|
||||||
|
from api.db.services.canvas_service import UserCanvasService
|
||||||
|
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||||
|
from api.settings import RetCode
|
||||||
|
from api.utils import get_uuid
|
||||||
|
from api.utils.api_utils import get_data_error_result, get_error_data_result, get_json_result, token_required
|
||||||
|
from api.utils.api_utils import get_result
|
||||||
|
from flask import request
|
||||||
|
|
||||||
|
@manager.route('/agents', methods=['GET']) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def list_agents(tenant_id):
|
||||||
|
id = request.args.get("id")
|
||||||
|
title = request.args.get("title")
|
||||||
|
if id or title:
|
||||||
|
canvas = UserCanvasService.query(id=id, title=title, user_id=tenant_id)
|
||||||
|
if not canvas:
|
||||||
|
return get_error_data_result("The agent doesn't exist.")
|
||||||
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 30))
|
||||||
|
orderby = request.args.get("orderby", "update_time")
|
||||||
|
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||||
|
desc = False
|
||||||
|
else:
|
||||||
|
desc = True
|
||||||
|
canvas = UserCanvasService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,title)
|
||||||
|
return get_result(data=canvas)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/agents", methods=["POST"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def create_agent(tenant_id: str):
|
||||||
|
req: dict[str, Any] = cast(dict[str, Any], request.json)
|
||||||
|
req["user_id"] = tenant_id
|
||||||
|
|
||||||
|
if req.get("dsl") is not None:
|
||||||
|
if not isinstance(req["dsl"], str):
|
||||||
|
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||||
|
|
||||||
|
req["dsl"] = json.loads(req["dsl"])
|
||||||
|
else:
|
||||||
|
return get_json_result(data=False, message="No DSL data in request.", code=RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
if req.get("title") is not None:
|
||||||
|
req["title"] = req["title"].strip()
|
||||||
|
else:
|
||||||
|
return get_json_result(data=False, message="No title in request.", code=RetCode.ARGUMENT_ERROR)
|
||||||
|
|
||||||
|
if UserCanvasService.query(user_id=tenant_id, title=req["title"]):
|
||||||
|
return get_data_error_result(message=f"Agent with title {req['title']} already exists.")
|
||||||
|
|
||||||
|
agent_id = get_uuid()
|
||||||
|
req["id"] = agent_id
|
||||||
|
|
||||||
|
if not UserCanvasService.save(**req):
|
||||||
|
return get_data_error_result(message="Fail to create agent.")
|
||||||
|
|
||||||
|
UserCanvasVersionService.insert(
|
||||||
|
user_canvas_id=agent_id,
|
||||||
|
title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")),
|
||||||
|
dsl=req["dsl"]
|
||||||
|
)
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/agents/<agent_id>", methods=["PUT"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def update_agent(tenant_id: str, agent_id: str):
|
||||||
|
req: dict[str, Any] = {k: v for k, v in cast(dict[str, Any], request.json).items() if v is not None}
|
||||||
|
req["user_id"] = tenant_id
|
||||||
|
|
||||||
|
if req.get("dsl") is not None:
|
||||||
|
if not isinstance(req["dsl"], str):
|
||||||
|
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||||
|
|
||||||
|
req["dsl"] = json.loads(req["dsl"])
|
||||||
|
|
||||||
|
if req.get("title") is not None:
|
||||||
|
req["title"] = req["title"].strip()
|
||||||
|
|
||||||
|
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message="Only owner of canvas authorized for this operation.",
|
||||||
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
UserCanvasService.update_by_id(agent_id, req)
|
||||||
|
|
||||||
|
if req.get("dsl") is not None:
|
||||||
|
UserCanvasVersionService.insert(
|
||||||
|
user_canvas_id=agent_id,
|
||||||
|
title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")),
|
||||||
|
dsl=req["dsl"]
|
||||||
|
)
|
||||||
|
|
||||||
|
UserCanvasVersionService.delete_all_versions(agent_id)
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/agents/<agent_id>", methods=["DELETE"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def delete_agent(tenant_id: str, agent_id: str):
|
||||||
|
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message="Only owner of canvas authorized for this operation.",
|
||||||
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
|
||||||
|
UserCanvasService.delete_by_id(agent_id)
|
||||||
|
return get_json_result(data=True)
|
||||||
@ -13,58 +13,57 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from api.settings import RetCode
|
|
||||||
|
from api import settings
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
from api.db.services.dialog_service import DialogService
|
from api.db.services.dialog_service import DialogService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import TenantLLMService
|
from api.db.services.llm_service import TenantLLMService
|
||||||
from api.db.services.user_service import TenantService
|
from api.db.services.user_service import TenantService
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_error_data_result, token_required
|
from api.utils.api_utils import check_duplicate_ids, get_error_data_result, get_result, token_required
|
||||||
from api.utils.api_utils import get_result
|
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/chats", methods=["POST"]) # noqa: F821
|
||||||
@manager.route('/chats', methods=['POST'])
|
|
||||||
@token_required
|
@token_required
|
||||||
def create(tenant_id):
|
def create(tenant_id):
|
||||||
req=request.json
|
req = request.json
|
||||||
ids= req.get("dataset_ids")
|
ids = [i for i in req.get("dataset_ids", []) if i]
|
||||||
if not ids:
|
|
||||||
return get_error_data_result(retmsg="`dataset_ids` is required")
|
|
||||||
for kb_id in ids:
|
for kb_id in ids:
|
||||||
kbs = KnowledgebaseService.query(id=kb_id,tenant_id=tenant_id)
|
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||||
kb=kbs[0]
|
kbs = KnowledgebaseService.query(id=kb_id)
|
||||||
|
kb = kbs[0]
|
||||||
if kb.chunk_num == 0:
|
if kb.chunk_num == 0:
|
||||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
|
||||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
kbs = KnowledgebaseService.get_by_ids(ids) if ids else []
|
||||||
if len(embd_count) != 1:
|
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||||
return get_result(retmsg='Datasets use different embedding models."',retcode=RetCode.AUTHENTICATION_ERROR)
|
embd_count = list(set(embd_ids))
|
||||||
|
if len(embd_count) > 1:
|
||||||
|
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
req["kb_ids"] = ids
|
req["kb_ids"] = ids
|
||||||
# llm
|
# llm
|
||||||
llm = req.get("llm")
|
llm = req.get("llm")
|
||||||
if llm:
|
if llm:
|
||||||
if "model_name" in llm:
|
if "model_name" in llm:
|
||||||
req["llm_id"] = llm.pop("model_name")
|
req["llm_id"] = llm.pop("model_name")
|
||||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
if req.get("llm_id") is not None:
|
||||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(req["llm_id"])
|
||||||
|
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory, model_type="chat"):
|
||||||
|
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||||
req["llm_setting"] = req.pop("llm")
|
req["llm_setting"] = req.pop("llm")
|
||||||
e, tenant = TenantService.get_by_id(tenant_id)
|
e, tenant = TenantService.get_by_id(tenant_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_error_data_result(retmsg="Tenant not found!")
|
return get_error_data_result(message="Tenant not found!")
|
||||||
# prompt
|
# prompt
|
||||||
prompt = req.get("prompt")
|
prompt = req.get("prompt")
|
||||||
key_mapping = {"parameters": "variables",
|
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
|
||||||
"prologue": "opener",
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
|
||||||
"quote": "show_quote",
|
|
||||||
"system": "prompt",
|
|
||||||
"rerank_id": "rerank_model",
|
|
||||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
|
||||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
|
||||||
if prompt:
|
if prompt:
|
||||||
for new_key, old_key in key_mapping.items():
|
for new_key, old_key in key_mapping.items():
|
||||||
if old_key in prompt:
|
if old_key in prompt:
|
||||||
@ -81,17 +80,18 @@ def create(tenant_id):
|
|||||||
req["top_k"] = req.get("top_k", 1024)
|
req["top_k"] = req.get("top_k", 1024)
|
||||||
req["rerank_id"] = req.get("rerank_id", "")
|
req["rerank_id"] = req.get("rerank_id", "")
|
||||||
if req.get("rerank_id"):
|
if req.get("rerank_id"):
|
||||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||||
|
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
|
||||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||||
if not req.get("llm_id"):
|
if not req.get("llm_id"):
|
||||||
req["llm_id"] = tenant.llm_id
|
req["llm_id"] = tenant.llm_id
|
||||||
if not req.get("name"):
|
if not req.get("name"):
|
||||||
return get_error_data_result(retmsg="`name` is required.")
|
return get_error_data_result(message="`name` is required.")
|
||||||
if DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
if DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
return get_error_data_result(retmsg="Duplicated chat name in creating chat.")
|
return get_error_data_result(message="Duplicated chat name in creating chat.")
|
||||||
# tenant_id
|
# tenant_id
|
||||||
if req.get("tenant_id"):
|
if req.get("tenant_id"):
|
||||||
return get_error_data_result(retmsg="`tenant_id` must not be provided.")
|
return get_error_data_result(message="`tenant_id` must not be provided.")
|
||||||
req["tenant_id"] = tenant_id
|
req["tenant_id"] = tenant_id
|
||||||
# prompt more parameter
|
# prompt more parameter
|
||||||
default_prompt = {
|
default_prompt = {
|
||||||
@ -100,31 +100,31 @@ def create(tenant_id):
|
|||||||
{knowledge}
|
{knowledge}
|
||||||
The above is the knowledge base.""",
|
The above is the knowledge base.""",
|
||||||
"prologue": "Hi! I'm your assistant, what can I do for you?",
|
"prologue": "Hi! I'm your assistant, what can I do for you?",
|
||||||
"parameters": [
|
"parameters": [{"key": "knowledge", "optional": False}],
|
||||||
{"key": "knowledge", "optional": False}
|
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
|
||||||
],
|
"quote": True,
|
||||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!"
|
"tts": False,
|
||||||
|
"refine_multiturn": True,
|
||||||
}
|
}
|
||||||
key_list_2 = ["system", "prologue", "parameters", "empty_response"]
|
key_list_2 = ["system", "prologue", "parameters", "empty_response", "quote", "tts", "refine_multiturn"]
|
||||||
if "prompt_config" not in req:
|
if "prompt_config" not in req:
|
||||||
req['prompt_config'] = {}
|
req["prompt_config"] = {}
|
||||||
for key in key_list_2:
|
for key in key_list_2:
|
||||||
temp = req['prompt_config'].get(key)
|
temp = req["prompt_config"].get(key)
|
||||||
if not temp:
|
if (not temp and key == "system") or (key not in req["prompt_config"]):
|
||||||
req['prompt_config'][key] = default_prompt[key]
|
req["prompt_config"][key] = default_prompt[key]
|
||||||
for p in req['prompt_config']["parameters"]:
|
for p in req["prompt_config"]["parameters"]:
|
||||||
if p["optional"]:
|
if p["optional"]:
|
||||||
continue
|
continue
|
||||||
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
|
if req["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||||
return get_error_data_result(
|
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
|
||||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
|
||||||
# save
|
# save
|
||||||
if not DialogService.save(**req):
|
if not DialogService.save(**req):
|
||||||
return get_error_data_result(retmsg="Fail to new a chat!")
|
return get_error_data_result(message="Fail to new a chat!")
|
||||||
# response
|
# response
|
||||||
e, res = DialogService.get_by_id(req["id"])
|
e, res = DialogService.get_by_id(req["id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_error_data_result(retmsg="Fail to new a chat!")
|
return get_error_data_result(message="Fail to new a chat!")
|
||||||
res = res.to_json()
|
res = res.to_json()
|
||||||
renamed_dict = {}
|
renamed_dict = {}
|
||||||
for key, value in res["prompt_config"].items():
|
for key, value in res["prompt_config"].items():
|
||||||
@ -132,10 +132,7 @@ def create(tenant_id):
|
|||||||
renamed_dict[new_key] = value
|
renamed_dict[new_key] = value
|
||||||
res["prompt"] = renamed_dict
|
res["prompt"] = renamed_dict
|
||||||
del res["prompt_config"]
|
del res["prompt_config"]
|
||||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
|
||||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
|
||||||
"top_n": res["top_n"],
|
|
||||||
"rerank_model": res['rerank_id']}
|
|
||||||
res["prompt"].update(new_dict)
|
res["prompt"].update(new_dict)
|
||||||
for key in key_list:
|
for key in key_list:
|
||||||
del res[key]
|
del res[key]
|
||||||
@ -146,55 +143,46 @@ def create(tenant_id):
|
|||||||
res["avatar"] = res.pop("icon")
|
res["avatar"] = res.pop("icon")
|
||||||
return get_result(data=res)
|
return get_result(data=res)
|
||||||
|
|
||||||
@manager.route('/chats/<chat_id>', methods=['PUT'])
|
|
||||||
|
@manager.route("/chats/<chat_id>", methods=["PUT"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def update(tenant_id,chat_id):
|
def update(tenant_id, chat_id):
|
||||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||||
return get_error_data_result(retmsg='You do not own the chat')
|
return get_error_data_result(message="You do not own the chat")
|
||||||
req =request.json
|
req = request.json
|
||||||
ids = req.get("dataset_ids")
|
ids = req.get("dataset_ids")
|
||||||
if "show_quotation" in req:
|
if "show_quotation" in req:
|
||||||
req["do_refer"]=req.pop("show_quotation")
|
req["do_refer"] = req.pop("show_quotation")
|
||||||
if "dataset_ids" in req:
|
if ids is not None:
|
||||||
if not ids:
|
for kb_id in ids:
|
||||||
return get_error_data_result("`datasets` can't be empty")
|
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||||
if ids:
|
if not kbs:
|
||||||
for kb_id in ids:
|
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||||
kbs = KnowledgebaseService.query(id=kb_id, tenant_id=tenant_id)
|
kbs = KnowledgebaseService.query(id=kb_id)
|
||||||
if not kbs:
|
kb = kbs[0]
|
||||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
if kb.chunk_num == 0:
|
||||||
kb = kbs[0]
|
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||||
if kb.chunk_num == 0:
|
|
||||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||||
embd_count=list(set([kb.embd_id for kb in kbs]))
|
embd_count = list(set(embd_ids))
|
||||||
if len(embd_count) != 1 :
|
if len(embd_count) != 1:
|
||||||
return get_result(
|
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
retmsg='Datasets use different embedding models."',
|
req["kb_ids"] = ids
|
||||||
retcode=RetCode.AUTHENTICATION_ERROR)
|
|
||||||
req["kb_ids"] = ids
|
|
||||||
llm = req.get("llm")
|
llm = req.get("llm")
|
||||||
if llm:
|
if llm:
|
||||||
if "model_name" in llm:
|
if "model_name" in llm:
|
||||||
req["llm_id"] = llm.pop("model_name")
|
req["llm_id"] = llm.pop("model_name")
|
||||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
|
||||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||||
req["llm_setting"] = req.pop("llm")
|
req["llm_setting"] = req.pop("llm")
|
||||||
e, tenant = TenantService.get_by_id(tenant_id)
|
e, tenant = TenantService.get_by_id(tenant_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_error_data_result(retmsg="Tenant not found!")
|
return get_error_data_result(message="Tenant not found!")
|
||||||
if req.get("rerank_model"):
|
|
||||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_model"),model_type="rerank"):
|
|
||||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_model')} doesn't exist")
|
|
||||||
# prompt
|
# prompt
|
||||||
prompt = req.get("prompt")
|
prompt = req.get("prompt")
|
||||||
key_mapping = {"parameters": "variables",
|
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
|
||||||
"prologue": "opener",
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
|
||||||
"quote": "show_quote",
|
|
||||||
"system": "prompt",
|
|
||||||
"rerank_id": "rerank_model",
|
|
||||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
|
||||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
|
||||||
if prompt:
|
if prompt:
|
||||||
for new_key, old_key in key_mapping.items():
|
for new_key, old_key in key_mapping.items():
|
||||||
if old_key in prompt:
|
if old_key in prompt:
|
||||||
@ -205,20 +193,22 @@ def update(tenant_id,chat_id):
|
|||||||
req["prompt_config"] = req.pop("prompt")
|
req["prompt_config"] = req.pop("prompt")
|
||||||
e, res = DialogService.get_by_id(chat_id)
|
e, res = DialogService.get_by_id(chat_id)
|
||||||
res = res.to_json()
|
res = res.to_json()
|
||||||
|
if req.get("rerank_id"):
|
||||||
|
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||||
|
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
|
||||||
|
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||||
if "name" in req:
|
if "name" in req:
|
||||||
if not req.get("name"):
|
if not req.get("name"):
|
||||||
return get_error_data_result(retmsg="`name` is not empty.")
|
return get_error_data_result(message="`name` cannot be empty.")
|
||||||
if req["name"].lower() != res["name"].lower() \
|
if req["name"].lower() != res["name"].lower() and len(DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||||
and len(
|
return get_error_data_result(message="Duplicated chat name in updating chat.")
|
||||||
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
|
||||||
return get_error_data_result(retmsg="Duplicated chat name in updating dataset.")
|
|
||||||
if "prompt_config" in req:
|
if "prompt_config" in req:
|
||||||
res["prompt_config"].update(req["prompt_config"])
|
res["prompt_config"].update(req["prompt_config"])
|
||||||
for p in res["prompt_config"]["parameters"]:
|
for p in res["prompt_config"]["parameters"]:
|
||||||
if p["optional"]:
|
if p["optional"]:
|
||||||
continue
|
continue
|
||||||
if res["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
if res["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||||
return get_error_data_result(retmsg="Parameter '{}' is not used".format(p["key"]))
|
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
|
||||||
if "llm_setting" in req:
|
if "llm_setting" in req:
|
||||||
res["llm_setting"].update(req["llm_setting"])
|
res["llm_setting"].update(req["llm_setting"])
|
||||||
req["prompt_config"] = res["prompt_config"]
|
req["prompt_config"] = res["prompt_config"]
|
||||||
@ -229,70 +219,91 @@ def update(tenant_id,chat_id):
|
|||||||
if "dataset_ids" in req:
|
if "dataset_ids" in req:
|
||||||
req.pop("dataset_ids")
|
req.pop("dataset_ids")
|
||||||
if not DialogService.update_by_id(chat_id, req):
|
if not DialogService.update_by_id(chat_id, req):
|
||||||
return get_error_data_result(retmsg="Chat not found!")
|
return get_error_data_result(message="Chat not found!")
|
||||||
return get_result()
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chats', methods=['DELETE'])
|
@manager.route("/chats", methods=["DELETE"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def delete(tenant_id):
|
def delete(tenant_id):
|
||||||
|
errors = []
|
||||||
|
success_count = 0
|
||||||
req = request.json
|
req = request.json
|
||||||
if not req:
|
if not req:
|
||||||
ids=None
|
ids = None
|
||||||
else:
|
else:
|
||||||
ids=req.get("ids")
|
ids = req.get("ids")
|
||||||
if not ids:
|
if not ids:
|
||||||
id_list = []
|
id_list = []
|
||||||
dias=DialogService.query(tenant_id=tenant_id,status=StatusEnum.VALID.value)
|
dias = DialogService.query(tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||||
for dia in dias:
|
for dia in dias:
|
||||||
id_list.append(dia.id)
|
id_list.append(dia.id)
|
||||||
else:
|
else:
|
||||||
id_list=ids
|
id_list = ids
|
||||||
for id in id_list:
|
|
||||||
|
unique_id_list, duplicate_messages = check_duplicate_ids(id_list, "assistant")
|
||||||
|
|
||||||
|
for id in unique_id_list:
|
||||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||||
return get_error_data_result(retmsg=f"You don't own the chat {id}")
|
errors.append(f"Assistant({id}) not found.")
|
||||||
|
continue
|
||||||
temp_dict = {"status": StatusEnum.INVALID.value}
|
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||||
DialogService.update_by_id(id, temp_dict)
|
DialogService.update_by_id(id, temp_dict)
|
||||||
|
success_count += 1
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} chats with {len(errors)} errors")
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message="; ".join(errors))
|
||||||
|
|
||||||
|
if duplicate_messages:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(message=f"Partially deleted {success_count} chats with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message=";".join(duplicate_messages))
|
||||||
|
|
||||||
return get_result()
|
return get_result()
|
||||||
|
|
||||||
@manager.route('/chats', methods=['GET'])
|
|
||||||
|
@manager.route("/chats", methods=["GET"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def list_chat(tenant_id):
|
def list_chat(tenant_id):
|
||||||
id = request.args.get("id")
|
id = request.args.get("id")
|
||||||
name = request.args.get("name")
|
name = request.args.get("name")
|
||||||
chat = DialogService.query(id=id,name=name,status=StatusEnum.VALID.value)
|
if id or name:
|
||||||
if not chat:
|
chat = DialogService.query(id=id, name=name, status=StatusEnum.VALID.value, tenant_id=tenant_id)
|
||||||
return get_error_data_result(retmsg="The chat doesn't exist")
|
if not chat:
|
||||||
|
return get_error_data_result(message="The chat doesn't exist")
|
||||||
page_number = int(request.args.get("page", 1))
|
page_number = int(request.args.get("page", 1))
|
||||||
items_per_page = int(request.args.get("page_size", 1024))
|
items_per_page = int(request.args.get("page_size", 30))
|
||||||
orderby = request.args.get("orderby", "create_time")
|
orderby = request.args.get("orderby", "create_time")
|
||||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||||
desc = False
|
desc = False
|
||||||
else:
|
else:
|
||||||
desc = True
|
desc = True
|
||||||
chats = DialogService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,name)
|
chats = DialogService.get_list(tenant_id, page_number, items_per_page, orderby, desc, id, name)
|
||||||
if not chats:
|
if not chats:
|
||||||
return get_result(data=[])
|
return get_result(data=[])
|
||||||
list_assts = []
|
list_assts = []
|
||||||
renamed_dict = {}
|
key_mapping = {
|
||||||
key_mapping = {"parameters": "variables",
|
"parameters": "variables",
|
||||||
"prologue": "opener",
|
"prologue": "opener",
|
||||||
"quote": "show_quote",
|
"quote": "show_quote",
|
||||||
"system": "prompt",
|
"system": "prompt",
|
||||||
"rerank_id": "rerank_model",
|
"rerank_id": "rerank_model",
|
||||||
"vector_similarity_weight": "keywords_similarity_weight",
|
"vector_similarity_weight": "keywords_similarity_weight",
|
||||||
"do_refer":"show_quotation"}
|
"do_refer": "show_quotation",
|
||||||
|
}
|
||||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||||
for res in chats:
|
for res in chats:
|
||||||
|
renamed_dict = {}
|
||||||
for key, value in res["prompt_config"].items():
|
for key, value in res["prompt_config"].items():
|
||||||
new_key = key_mapping.get(key, key)
|
new_key = key_mapping.get(key, key)
|
||||||
renamed_dict[new_key] = value
|
renamed_dict[new_key] = value
|
||||||
res["prompt"] = renamed_dict
|
res["prompt"] = renamed_dict
|
||||||
del res["prompt_config"]
|
del res["prompt_config"]
|
||||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
|
||||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
|
||||||
"top_n": res["top_n"],
|
|
||||||
"rerank_model": res['rerank_id']}
|
|
||||||
res["prompt"].update(new_dict)
|
res["prompt"].update(new_dict)
|
||||||
for key in key_list:
|
for key in key_list:
|
||||||
del res[key]
|
del res[key]
|
||||||
@ -301,8 +312,9 @@ def list_chat(tenant_id):
|
|||||||
kb_list = []
|
kb_list = []
|
||||||
for kb_id in res["kb_ids"]:
|
for kb_id in res["kb_ids"]:
|
||||||
kb = KnowledgebaseService.query(id=kb_id)
|
kb = KnowledgebaseService.query(id=kb_id)
|
||||||
if not kb :
|
if not kb:
|
||||||
return get_error_data_result(retmsg=f"Don't exist the kb {kb_id}")
|
logging.warning(f"The kb {kb_id} does not exist.")
|
||||||
|
continue
|
||||||
kb_list.append(kb[0].to_json())
|
kb_list.append(kb[0].to_json())
|
||||||
del res["kb_ids"]
|
del res["kb_ids"]
|
||||||
res["datasets"] = kb_list
|
res["datasets"] = kb_list
|
||||||
|
|||||||
@ -1,232 +1,491 @@
|
|||||||
#
|
#
|
||||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from flask import request
|
|
||||||
from api.db import StatusEnum, FileSource
|
import logging
|
||||||
from api.db.db_models import File
|
|
||||||
from api.db.services.document_service import DocumentService
|
from flask import request
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from peewee import OperationalError
|
||||||
from api.db.services.file_service import FileService
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db import FileSource, StatusEnum
|
||||||
from api.db.services.llm_service import TenantLLMService,LLMService
|
from api.db.db_models import File
|
||||||
from api.db.services.user_service import TenantService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.settings import RetCode
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.utils import get_uuid
|
from api.db.services.file_service import FileService
|
||||||
from api.utils.api_utils import get_result, token_required, get_error_data_result, valid,get_parser_config
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from api.utils import get_uuid
|
||||||
@manager.route('/datasets', methods=['POST'])
|
from api.utils.api_utils import (
|
||||||
@token_required
|
deep_merge,
|
||||||
def create(tenant_id):
|
get_error_argument_result,
|
||||||
req = request.json
|
get_error_data_result,
|
||||||
e, t = TenantService.get_by_id(tenant_id)
|
get_error_operating_result,
|
||||||
permission = req.get("permission")
|
get_error_permission_result,
|
||||||
language = req.get("language")
|
get_parser_config,
|
||||||
chunk_method = req.get("chunk_method")
|
get_result,
|
||||||
parser_config = req.get("parser_config")
|
remap_dictionary_keys,
|
||||||
valid_permission = ["me", "team"]
|
token_required,
|
||||||
valid_language =["Chinese", "English"]
|
verify_embedding_availability,
|
||||||
valid_chunk_method = ["naive","manual","qa","table","paper","book","laws","presentation","picture","one","knowledge_graph","email"]
|
)
|
||||||
check_validation=valid(permission,valid_permission,language,valid_language,chunk_method,valid_chunk_method)
|
from api.utils.validation_utils import (
|
||||||
if check_validation:
|
CreateDatasetReq,
|
||||||
return check_validation
|
DeleteDatasetReq,
|
||||||
req["parser_config"]=get_parser_config(chunk_method,parser_config)
|
ListDatasetReq,
|
||||||
if "tenant_id" in req:
|
UpdateDatasetReq,
|
||||||
return get_error_data_result(
|
validate_and_parse_json_request,
|
||||||
retmsg="`tenant_id` must not be provided")
|
validate_and_parse_request_args,
|
||||||
if "chunk_count" in req or "document_count" in req:
|
)
|
||||||
return get_error_data_result(retmsg="`chunk_count` or `document_count` must not be provided")
|
|
||||||
if "name" not in req:
|
|
||||||
return get_error_data_result(
|
@manager.route("/datasets", methods=["POST"]) # noqa: F821
|
||||||
retmsg="`name` is not empty!")
|
@token_required
|
||||||
req['id'] = get_uuid()
|
def create(tenant_id):
|
||||||
req["name"] = req["name"].strip()
|
"""
|
||||||
if req["name"] == "":
|
Create a new dataset.
|
||||||
return get_error_data_result(
|
---
|
||||||
retmsg="`name` is not empty string!")
|
tags:
|
||||||
if KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
- Datasets
|
||||||
return get_error_data_result(
|
security:
|
||||||
retmsg="Duplicated dataset name in creating dataset.")
|
- ApiKeyAuth: []
|
||||||
req["tenant_id"] = req['created_by'] = tenant_id
|
parameters:
|
||||||
if not req.get("embedding_model"):
|
- in: header
|
||||||
req['embedding_model'] = t.embd_id
|
name: Authorization
|
||||||
else:
|
type: string
|
||||||
valid_embedding_models=["BAAI/bge-large-zh-v1.5","BAAI/bge-base-en-v1.5","BAAI/bge-large-en-v1.5","BAAI/bge-small-en-v1.5",
|
required: true
|
||||||
"BAAI/bge-small-zh-v1.5","jinaai/jina-embeddings-v2-base-en","jinaai/jina-embeddings-v2-small-en",
|
description: Bearer token for authentication.
|
||||||
"nomic-ai/nomic-embed-text-v1.5","sentence-transformers/all-MiniLM-L6-v2","text-embedding-v2",
|
- in: body
|
||||||
"text-embedding-v3","maidalun1020/bce-embedding-base_v1"]
|
name: body
|
||||||
embd_model=LLMService.query(llm_name=req["embedding_model"],model_type="embedding")
|
description: Dataset creation parameters.
|
||||||
if not embd_model:
|
required: true
|
||||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
schema:
|
||||||
if embd_model:
|
type: object
|
||||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model")):
|
required:
|
||||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
- name
|
||||||
key_mapping = {
|
properties:
|
||||||
"chunk_num": "chunk_count",
|
name:
|
||||||
"doc_num": "document_count",
|
type: string
|
||||||
"parser_id": "chunk_method",
|
description: Name of the dataset.
|
||||||
"embd_id": "embedding_model"
|
avatar:
|
||||||
}
|
type: string
|
||||||
mapped_keys = {new_key: req[old_key] for new_key, old_key in key_mapping.items() if old_key in req}
|
description: Base64 encoding of the avatar.
|
||||||
req.update(mapped_keys)
|
description:
|
||||||
if not KnowledgebaseService.save(**req):
|
type: string
|
||||||
return get_error_data_result(retmsg="Create dataset error.(Database error)")
|
description: Description of the dataset.
|
||||||
renamed_data = {}
|
embedding_model:
|
||||||
e, k = KnowledgebaseService.get_by_id(req["id"])
|
type: string
|
||||||
for key, value in k.to_dict().items():
|
description: Embedding model Name.
|
||||||
new_key = key_mapping.get(key, key)
|
permission:
|
||||||
renamed_data[new_key] = value
|
type: string
|
||||||
return get_result(data=renamed_data)
|
enum: ['me', 'team']
|
||||||
|
description: Dataset permission.
|
||||||
@manager.route('/datasets', methods=['DELETE'])
|
chunk_method:
|
||||||
@token_required
|
type: string
|
||||||
def delete(tenant_id):
|
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||||
req = request.json
|
"picture", "presentation", "qa", "table", "tag"
|
||||||
if not req:
|
]
|
||||||
ids=None
|
description: Chunking method.
|
||||||
else:
|
pagerank:
|
||||||
ids=req.get("ids")
|
type: integer
|
||||||
if not ids:
|
description: Set page rank.
|
||||||
id_list = []
|
parser_config:
|
||||||
kbs=KnowledgebaseService.query(tenant_id=tenant_id)
|
type: object
|
||||||
for kb in kbs:
|
description: Parser configuration.
|
||||||
id_list.append(kb.id)
|
responses:
|
||||||
else:
|
200:
|
||||||
id_list=ids
|
description: Successful operation.
|
||||||
for id in id_list:
|
schema:
|
||||||
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
type: object
|
||||||
if not kbs:
|
properties:
|
||||||
return get_error_data_result(retmsg=f"You don't own the dataset {id}")
|
data:
|
||||||
for doc in DocumentService.query(kb_id=id):
|
type: object
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
"""
|
||||||
return get_error_data_result(
|
# Field name transformations during model dump:
|
||||||
retmsg="Remove document error.(Database error)")
|
# | Original | Dump Output |
|
||||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
# |----------------|-------------|
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
# | embedding_model| embd_id |
|
||||||
File2DocumentService.delete_by_document_id(doc.id)
|
# | chunk_method | parser_id |
|
||||||
if not KnowledgebaseService.delete_by_id(id):
|
req, err = validate_and_parse_json_request(request, CreateDatasetReq)
|
||||||
return get_error_data_result(
|
if err is not None:
|
||||||
retmsg="Delete dataset error.(Database error)")
|
return get_error_argument_result(err)
|
||||||
return get_result(retcode=RetCode.SUCCESS)
|
|
||||||
|
try:
|
||||||
@manager.route('/datasets/<dataset_id>', methods=['PUT'])
|
if KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
@token_required
|
return get_error_operating_result(message=f"Dataset name '{req['name']}' already exists")
|
||||||
def update(tenant_id,dataset_id):
|
except OperationalError as e:
|
||||||
if not KnowledgebaseService.query(id=dataset_id,tenant_id=tenant_id):
|
logging.exception(e)
|
||||||
return get_error_data_result(retmsg="You don't own the dataset")
|
return get_error_data_result(message="Database operation failed")
|
||||||
req = request.json
|
|
||||||
e, t = TenantService.get_by_id(tenant_id)
|
req["parser_config"] = get_parser_config(req["parser_id"], req["parser_config"])
|
||||||
invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id"}
|
req["id"] = get_uuid()
|
||||||
if any(key in req for key in invalid_keys):
|
req["tenant_id"] = tenant_id
|
||||||
return get_error_data_result(retmsg="The input parameters are invalid.")
|
req["created_by"] = tenant_id
|
||||||
permission = req.get("permission")
|
|
||||||
language = req.get("language")
|
try:
|
||||||
chunk_method = req.get("chunk_method")
|
ok, t = TenantService.get_by_id(tenant_id)
|
||||||
parser_config = req.get("parser_config")
|
if not ok:
|
||||||
valid_permission = ["me", "team"]
|
return get_error_permission_result(message="Tenant not found")
|
||||||
valid_language = ["Chinese", "English"]
|
except OperationalError as e:
|
||||||
valid_chunk_method = ["naive", "manual", "qa", "table", "paper", "book", "laws", "presentation", "picture", "one",
|
logging.exception(e)
|
||||||
"knowledge_graph", "email"]
|
return get_error_data_result(message="Database operation failed")
|
||||||
check_validation = valid(permission, valid_permission, language, valid_language, chunk_method, valid_chunk_method)
|
|
||||||
if check_validation:
|
if not req.get("embd_id"):
|
||||||
return check_validation
|
req["embd_id"] = t.embd_id
|
||||||
if "tenant_id" in req:
|
else:
|
||||||
if req["tenant_id"] != tenant_id:
|
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||||
return get_error_data_result(
|
if not ok:
|
||||||
retmsg="Can't change `tenant_id`.")
|
return err
|
||||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
|
||||||
if "parser_config" in req:
|
try:
|
||||||
temp_dict=kb.parser_config
|
if not KnowledgebaseService.save(**req):
|
||||||
temp_dict.update(req["parser_config"])
|
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||||
req["parser_config"] = temp_dict
|
except OperationalError as e:
|
||||||
if "chunk_count" in req:
|
logging.exception(e)
|
||||||
if req["chunk_count"] != kb.chunk_num:
|
return get_error_data_result(message="Database operation failed")
|
||||||
return get_error_data_result(
|
|
||||||
retmsg="Can't change `chunk_count`.")
|
try:
|
||||||
req.pop("chunk_count")
|
ok, k = KnowledgebaseService.get_by_id(req["id"])
|
||||||
if "document_count" in req:
|
if not ok:
|
||||||
if req['document_count'] != kb.doc_num:
|
return get_error_data_result(message="Dataset created failed")
|
||||||
return get_error_data_result(
|
except OperationalError as e:
|
||||||
retmsg="Can't change `document_count`.")
|
logging.exception(e)
|
||||||
req.pop("document_count")
|
return get_error_data_result(message="Database operation failed")
|
||||||
if "chunk_method" in req:
|
|
||||||
if kb.chunk_num != 0 and req['chunk_method'] != kb.parser_id:
|
response_data = remap_dictionary_keys(k.to_dict())
|
||||||
return get_error_data_result(
|
return get_result(data=response_data)
|
||||||
retmsg="If `chunk_count` is not 0, `chunk_method` is not changeable.")
|
|
||||||
req['parser_id'] = req.pop('chunk_method')
|
|
||||||
if req['parser_id'] != kb.parser_id:
|
@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
|
||||||
if not req.get("parser_config"):
|
@token_required
|
||||||
req["parser_config"] = get_parser_config(chunk_method, parser_config)
|
def delete(tenant_id):
|
||||||
if "embedding_model" in req:
|
"""
|
||||||
if kb.chunk_num != 0 and req['embedding_model'] != kb.embd_id:
|
Delete datasets.
|
||||||
return get_error_data_result(
|
---
|
||||||
retmsg="If `chunk_count` is not 0, `embedding_model` is not changeable.")
|
tags:
|
||||||
if not req.get("embedding_model"):
|
- Datasets
|
||||||
return get_error_data_result("`embedding_model` can't be empty")
|
security:
|
||||||
valid_embedding_models=["BAAI/bge-large-zh-v1.5","BAAI/bge-base-en-v1.5","BAAI/bge-large-en-v1.5","BAAI/bge-small-en-v1.5",
|
- ApiKeyAuth: []
|
||||||
"BAAI/bge-small-zh-v1.5","jinaai/jina-embeddings-v2-base-en","jinaai/jina-embeddings-v2-small-en",
|
parameters:
|
||||||
"nomic-ai/nomic-embed-text-v1.5","sentence-transformers/all-MiniLM-L6-v2","text-embedding-v2",
|
- in: header
|
||||||
"text-embedding-v3","maidalun1020/bce-embedding-base_v1"]
|
name: Authorization
|
||||||
embd_model=LLMService.query(llm_name=req["embedding_model"],model_type="embedding")
|
type: string
|
||||||
if not embd_model:
|
required: true
|
||||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
description: Bearer token for authentication.
|
||||||
if embd_model:
|
- in: body
|
||||||
if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model")):
|
name: body
|
||||||
return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
|
description: Dataset deletion parameters.
|
||||||
req['embd_id'] = req.pop('embedding_model')
|
required: true
|
||||||
if "name" in req:
|
schema:
|
||||||
req["name"] = req["name"].strip()
|
type: object
|
||||||
if req["name"].lower() != kb.name.lower() \
|
required:
|
||||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id,
|
- ids
|
||||||
status=StatusEnum.VALID.value)) > 0:
|
properties:
|
||||||
return get_error_data_result(
|
ids:
|
||||||
retmsg="Duplicated dataset name in updating dataset.")
|
type: array or null
|
||||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
items:
|
||||||
return get_error_data_result(retmsg="Update dataset error.(Database error)")
|
type: string
|
||||||
return get_result(retcode=RetCode.SUCCESS)
|
description: |
|
||||||
|
Specifies the datasets to delete:
|
||||||
@manager.route('/datasets', methods=['GET'])
|
- If `null`, all datasets will be deleted.
|
||||||
@token_required
|
- If an array of IDs, only the specified datasets will be deleted.
|
||||||
def list(tenant_id):
|
- If an empty array, no datasets will be deleted.
|
||||||
id = request.args.get("id")
|
responses:
|
||||||
name = request.args.get("name")
|
200:
|
||||||
kbs = KnowledgebaseService.query(id=id,name=name,status=1)
|
description: Successful operation.
|
||||||
if not kbs:
|
schema:
|
||||||
return get_error_data_result(retmsg="The dataset doesn't exist")
|
type: object
|
||||||
page_number = int(request.args.get("page", 1))
|
"""
|
||||||
items_per_page = int(request.args.get("page_size", 1024))
|
req, err = validate_and_parse_json_request(request, DeleteDatasetReq)
|
||||||
orderby = request.args.get("orderby", "create_time")
|
if err is not None:
|
||||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false" :
|
return get_error_argument_result(err)
|
||||||
desc = False
|
|
||||||
else:
|
kb_id_instance_pairs = []
|
||||||
desc = True
|
if req["ids"] is None:
|
||||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
try:
|
||||||
kbs = KnowledgebaseService.get_list(
|
kbs = KnowledgebaseService.query(tenant_id=tenant_id)
|
||||||
[m["tenant_id"] for m in tenants], tenant_id, page_number, items_per_page, orderby, desc, id, name)
|
for kb in kbs:
|
||||||
renamed_list = []
|
kb_id_instance_pairs.append((kb.id, kb))
|
||||||
for kb in kbs:
|
except OperationalError as e:
|
||||||
key_mapping = {
|
logging.exception(e)
|
||||||
"chunk_num": "chunk_count",
|
return get_error_data_result(message="Database operation failed")
|
||||||
"doc_num": "document_count",
|
else:
|
||||||
"parser_id": "chunk_method",
|
error_kb_ids = []
|
||||||
"embd_id": "embedding_model"
|
for kb_id in req["ids"]:
|
||||||
}
|
try:
|
||||||
renamed_data = {}
|
kb = KnowledgebaseService.get_or_none(id=kb_id, tenant_id=tenant_id)
|
||||||
for key, value in kb.items():
|
if kb is None:
|
||||||
new_key = key_mapping.get(key, key)
|
error_kb_ids.append(kb_id)
|
||||||
renamed_data[new_key] = value
|
continue
|
||||||
renamed_list.append(renamed_data)
|
kb_id_instance_pairs.append((kb_id, kb))
|
||||||
return get_result(data=renamed_list)
|
except OperationalError as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return get_error_data_result(message="Database operation failed")
|
||||||
|
if len(error_kb_ids) > 0:
|
||||||
|
return get_error_permission_result(message=f"""User '{tenant_id}' lacks permission for datasets: '{", ".join(error_kb_ids)}'""")
|
||||||
|
|
||||||
|
errors = []
|
||||||
|
success_count = 0
|
||||||
|
for kb_id, kb in kb_id_instance_pairs:
|
||||||
|
try:
|
||||||
|
for doc in DocumentService.query(kb_id=kb_id):
|
||||||
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
|
errors.append(f"Remove document '{doc.id}' error for dataset '{kb_id}'")
|
||||||
|
continue
|
||||||
|
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||||
|
FileService.filter_delete(
|
||||||
|
[
|
||||||
|
File.source_type == FileSource.KNOWLEDGEBASE,
|
||||||
|
File.id == f2d[0].file_id,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
File2DocumentService.delete_by_document_id(doc.id)
|
||||||
|
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kb.name])
|
||||||
|
if not KnowledgebaseService.delete_by_id(kb_id):
|
||||||
|
errors.append(f"Delete dataset error for {kb_id}")
|
||||||
|
continue
|
||||||
|
success_count += 1
|
||||||
|
except OperationalError as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return get_error_data_result(message="Database operation failed")
|
||||||
|
|
||||||
|
if not errors:
|
||||||
|
return get_result()
|
||||||
|
|
||||||
|
error_message = f"Successfully deleted {success_count} datasets, {len(errors)} failed. Details: {'; '.join(errors)[:128]}..."
|
||||||
|
if success_count == 0:
|
||||||
|
return get_error_data_result(message=error_message)
|
||||||
|
|
||||||
|
return get_result(data={"success_count": success_count, "errors": errors[:5]}, message=error_message)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def update(tenant_id, dataset_id):
|
||||||
|
"""
|
||||||
|
Update a dataset.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- Datasets
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
parameters:
|
||||||
|
- in: path
|
||||||
|
name: dataset_id
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: ID of the dataset to update.
|
||||||
|
- in: header
|
||||||
|
name: Authorization
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: Bearer token for authentication.
|
||||||
|
- in: body
|
||||||
|
name: body
|
||||||
|
description: Dataset update parameters.
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: New name of the dataset.
|
||||||
|
avatar:
|
||||||
|
type: string
|
||||||
|
description: Updated base64 encoding of the avatar.
|
||||||
|
description:
|
||||||
|
type: string
|
||||||
|
description: Updated description of the dataset.
|
||||||
|
embedding_model:
|
||||||
|
type: string
|
||||||
|
description: Updated embedding model Name.
|
||||||
|
permission:
|
||||||
|
type: string
|
||||||
|
enum: ['me', 'team']
|
||||||
|
description: Updated dataset permission.
|
||||||
|
chunk_method:
|
||||||
|
type: string
|
||||||
|
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||||
|
"picture", "presentation", "qa", "table", "tag"
|
||||||
|
]
|
||||||
|
description: Updated chunking method.
|
||||||
|
pagerank:
|
||||||
|
type: integer
|
||||||
|
description: Updated page rank.
|
||||||
|
parser_config:
|
||||||
|
type: object
|
||||||
|
description: Updated parser configuration.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Successful operation.
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
|
# Field name transformations during model dump:
|
||||||
|
# | Original | Dump Output |
|
||||||
|
# |----------------|-------------|
|
||||||
|
# | embedding_model| embd_id |
|
||||||
|
# | chunk_method | parser_id |
|
||||||
|
extras = {"dataset_id": dataset_id}
|
||||||
|
req, err = validate_and_parse_json_request(request, UpdateDatasetReq, extras=extras, exclude_unset=True)
|
||||||
|
if err is not None:
|
||||||
|
return get_error_argument_result(err)
|
||||||
|
|
||||||
|
if not req:
|
||||||
|
return get_error_argument_result(message="No properties were modified")
|
||||||
|
|
||||||
|
try:
|
||||||
|
kb = KnowledgebaseService.get_or_none(id=dataset_id, tenant_id=tenant_id)
|
||||||
|
if kb is None:
|
||||||
|
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{dataset_id}'")
|
||||||
|
except OperationalError as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return get_error_data_result(message="Database operation failed")
|
||||||
|
|
||||||
|
if req.get("parser_config"):
|
||||||
|
req["parser_config"] = deep_merge(kb.parser_config, req["parser_config"])
|
||||||
|
|
||||||
|
if (chunk_method := req.get("parser_id")) and chunk_method != kb.parser_id:
|
||||||
|
if not req.get("parser_config"):
|
||||||
|
req["parser_config"] = get_parser_config(chunk_method, None)
|
||||||
|
elif "parser_config" in req and not req["parser_config"]:
|
||||||
|
del req["parser_config"]
|
||||||
|
|
||||||
|
if "name" in req and req["name"].lower() != kb.name.lower():
|
||||||
|
try:
|
||||||
|
exists = KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||||
|
if exists:
|
||||||
|
return get_error_data_result(message=f"Dataset name '{req['name']}' already exists")
|
||||||
|
except OperationalError as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return get_error_data_result(message="Database operation failed")
|
||||||
|
|
||||||
|
if "embd_id" in req:
|
||||||
|
if kb.chunk_num != 0 and req["embd_id"] != kb.embd_id:
|
||||||
|
return get_error_data_result(message=f"When chunk_num ({kb.chunk_num}) > 0, embedding_model must remain {kb.embd_id}")
|
||||||
|
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||||
|
if not ok:
|
||||||
|
return err
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||||
|
return get_error_data_result(message="Update dataset error.(Database error)")
|
||||||
|
except OperationalError as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return get_error_data_result(message="Database operation failed")
|
||||||
|
|
||||||
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def list_datasets(tenant_id):
|
||||||
|
"""
|
||||||
|
List datasets.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- Datasets
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
parameters:
|
||||||
|
- in: query
|
||||||
|
name: id
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
description: Dataset ID to filter.
|
||||||
|
- in: query
|
||||||
|
name: name
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
description: Dataset name to filter.
|
||||||
|
- in: query
|
||||||
|
name: page
|
||||||
|
type: integer
|
||||||
|
required: false
|
||||||
|
default: 1
|
||||||
|
description: Page number.
|
||||||
|
- in: query
|
||||||
|
name: page_size
|
||||||
|
type: integer
|
||||||
|
required: false
|
||||||
|
default: 30
|
||||||
|
description: Number of items per page.
|
||||||
|
- in: query
|
||||||
|
name: orderby
|
||||||
|
type: string
|
||||||
|
required: false
|
||||||
|
default: "create_time"
|
||||||
|
description: Field to order by.
|
||||||
|
- in: query
|
||||||
|
name: desc
|
||||||
|
type: boolean
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
description: Order in descending.
|
||||||
|
- in: header
|
||||||
|
name: Authorization
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
description: Bearer token for authentication.
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Successful operation.
|
||||||
|
schema:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: object
|
||||||
|
"""
|
||||||
|
args, err = validate_and_parse_request_args(request, ListDatasetReq)
|
||||||
|
if err is not None:
|
||||||
|
return get_error_argument_result(err)
|
||||||
|
|
||||||
|
kb_id = request.args.get("id")
|
||||||
|
name = args.get("name")
|
||||||
|
if kb_id:
|
||||||
|
try:
|
||||||
|
kbs = KnowledgebaseService.get_kb_by_id(kb_id, tenant_id)
|
||||||
|
except OperationalError as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return get_error_data_result(message="Database operation failed")
|
||||||
|
if not kbs:
|
||||||
|
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{kb_id}'")
|
||||||
|
if name:
|
||||||
|
try:
|
||||||
|
kbs = KnowledgebaseService.get_kb_by_name(name, tenant_id)
|
||||||
|
except OperationalError as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return get_error_data_result(message="Database operation failed")
|
||||||
|
if not kbs:
|
||||||
|
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{name}'")
|
||||||
|
|
||||||
|
try:
|
||||||
|
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||||
|
kbs = KnowledgebaseService.get_list(
|
||||||
|
[m["tenant_id"] for m in tenants],
|
||||||
|
tenant_id,
|
||||||
|
args["page"],
|
||||||
|
args["page_size"],
|
||||||
|
args["orderby"],
|
||||||
|
args["desc"],
|
||||||
|
kb_id,
|
||||||
|
name,
|
||||||
|
)
|
||||||
|
except OperationalError as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return get_error_data_result(message="Database operation failed")
|
||||||
|
|
||||||
|
response_data_list = []
|
||||||
|
for kb in kbs:
|
||||||
|
response_data_list.append(remap_dictionary_keys(kb))
|
||||||
|
return get_result(data=response_data_list)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user