mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
654 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 94181a990b | |||
| 03672df691 | |||
| e9669e7fb1 | |||
| 9a1ac8020d | |||
| b44bbd11b8 | |||
| 1e91318445 | |||
| f35ff65c36 | |||
| ba0e363d5a | |||
| dde8c26feb | |||
| 64dd187498 | |||
| 67dee2d74e | |||
| bcac195a0c | |||
| 8fca8faa7d | |||
| 1cc17eb611 | |||
| c8194f5fd0 | |||
| f2c9ffc056 | |||
| 10432a1be7 | |||
| e7f83b13ca | |||
| ad220a0a3c | |||
| 91c5a5c08f | |||
| 8362ab405c | |||
| 68b9dae6c0 | |||
| 9b956ac1a9 | |||
| d4dbdfb61d | |||
| 487aed419e | |||
| 8b8a2f2949 | |||
| 42e236f464 | |||
| 1b4016317e | |||
| b1798bafb0 | |||
| 86f76df586 | |||
| db82c15de4 | |||
| 627fd002ae | |||
| 9e7d052c8d | |||
| d9927f5185 | |||
| 5d253e0a34 | |||
| de5727f90a | |||
| 9c2dd70839 | |||
| e0e78112a2 | |||
| 48730e00a8 | |||
| e5f9d148e7 | |||
| f6b280e372 | |||
| 5af2d57086 | |||
| 7a34159737 | |||
| b1fa5a0754 | |||
| 018ff4dd0a | |||
| ed352710ec | |||
| 0a0c1edce3 | |||
| 18eb76f6b8 | |||
| ed5f81b02e | |||
| 23c5ce48d1 | |||
| de766ba628 | |||
| 5aae73c230 | |||
| b578451e6a | |||
| 53c653b099 | |||
| b70abe52b2 | |||
| 98670c3755 | |||
| 9b789c2ae9 | |||
| ffb9f01bea | |||
| ed7244f5f5 | |||
| e54c0e39b5 | |||
| 056ea68e52 | |||
| d9266ed65a | |||
| 6051abb4a3 | |||
| 4b125f0ffe | |||
| 43cf321942 | |||
| 9283e91aa0 | |||
| dc59aba132 | |||
| 8fb5edd927 | |||
| 3bb1e012e6 | |||
| 22758a2763 | |||
| a008b38cf5 | |||
| d0897312ac | |||
| aa99c6b896 | |||
| ae107f31d9 | |||
| 9d9f2dacd2 | |||
| 08bc5d3521 | |||
| 6e7fb75618 | |||
| c26c38ee12 | |||
| dc2c74b249 | |||
| a20439bf81 | |||
| a1fb32908d | |||
| 0b89458eb8 | |||
| 14a3efd756 | |||
| d64c6870bb | |||
| dc87c91f9d | |||
| d4574ffb49 | |||
| 5a8c479ff3 | |||
| c6b26a3159 | |||
| 2a5ad74ac6 | |||
| 2caf15b24c | |||
| f49588756e | |||
| 57e760883e | |||
| b213e88cca | |||
| e8f46c9207 | |||
| cded812b97 | |||
| 2acb02366e | |||
| 9ecc78feeb | |||
| fdc410e743 | |||
| 5b5558300a | |||
| b5918e7158 | |||
| 58f8026632 | |||
| a73fbc61ff | |||
| 0d1c5fdd2f | |||
| 6c77ef5a5e | |||
| e7a2a4b7ff | |||
| 724a36fcdb | |||
| 9ce6521582 | |||
| 160bf4ccb3 | |||
| aa25d09b0c | |||
| 2471a6e115 | |||
| fc02929946 | |||
| 3ae1e9e3c4 | |||
| 117f18240d | |||
| 31296ad70f | |||
| 132eae9d5b | |||
| ead5f7aba9 | |||
| 58e6e7b668 | |||
| 20b8ccd1e9 | |||
| d0dca16fee | |||
| fc21dd0a4a | |||
| 61c0dfab70 | |||
| 67330833af | |||
| ece59034f7 | |||
| 0a42e5777e | |||
| e2b66628f4 | |||
| 46b5e32cd7 | |||
| 7d9dd1e5d3 | |||
| 1985ff7918 | |||
| 60b9c027c8 | |||
| 2793c8e4fe | |||
| 805a8f1f47 | |||
| d4a3e9a7cc | |||
| ad4e59edb2 | |||
| aca4cf4369 | |||
| 9aa047257a | |||
| 65a8cd1772 | |||
| 563a84beaf | |||
| d32a35d8fd | |||
| 2632493c8b | |||
| c61df5dd25 | |||
| 1fbc4870f0 | |||
| f304492716 | |||
| f35c226ce7 | |||
| 0b48a2e0d1 | |||
| fd614a7aef | |||
| 0758c04941 | |||
| fe0396bbb9 | |||
| 974a467cf6 | |||
| 36b62e0fab | |||
| d2043ff9f2 | |||
| ecc9605a32 | |||
| 70dc56d26b | |||
| 82ccbd2cba | |||
| c4998d0e09 | |||
| 5eabfe3912 | |||
| df3890827d | |||
| 6599db1e99 | |||
| b7d7ad536a | |||
| 24d8ff7425 | |||
| 735d9dd949 | |||
| cc5f4a5efa | |||
| 93c26ae1ef | |||
| cc8029a732 | |||
| 6bf26e2a81 | |||
| 7a677cb095 | |||
| 12ad746ee6 | |||
| 163e71d06f | |||
| c8c91fd827 | |||
| d17970ebd0 | |||
| bf483fdf02 | |||
| b2b7ed8927 | |||
| 0a79dfd5cf | |||
| 1d73baf3d8 | |||
| f3ae4a3bae | |||
| 814a210f5d | |||
| 60c3a253ad | |||
| 384b6549a6 | |||
| b2ec39c59d | |||
| 095fc84cf2 | |||
| 542cf16292 | |||
| 27989eb9a5 | |||
| 05997e8215 | |||
| 5d9afce12d | |||
| ee6a0bd9db | |||
| b6f3242c6c | |||
| 390086c6ab | |||
| a40c5aea83 | |||
| f691b4ddd2 | |||
| 3c57a9986c | |||
| 5e0a77df2b | |||
| 66e557b6c0 | |||
| 200b6f55c6 | |||
| b77ce4e846 | |||
| 85eb367ede | |||
| 0b63346a1a | |||
| 85eb3775d6 | |||
| e4c8d703b5 | |||
| 60afb63d44 | |||
| ee5aa51d43 | |||
| a6aed0da46 | |||
| d77380f024 | |||
| efc4796f01 | |||
| d869e4d43f | |||
| 8eefc8b5fe | |||
| 4091af4560 | |||
| 394d1a86f6 | |||
| d88964f629 | |||
| 0e0ebaac5f | |||
| 8b7e53e643 | |||
| 979cdc3626 | |||
| a2a4bfe3e3 | |||
| 85480f6292 | |||
| f537b6ca00 | |||
| b5471978b0 | |||
| efdfb39a33 | |||
| 7cc5603a82 | |||
| 9ed004e90d | |||
| d83911b632 | |||
| bc58ecbfd7 | |||
| 221eae2c59 | |||
| 37303e38ec | |||
| b754bd523a | |||
| 1bb990719e | |||
| 7f80d7304d | |||
| ca9c3e59fa | |||
| 674f94228b | |||
| ef7e96e486 | |||
| dba0caa00b | |||
| 1d9ca172e3 | |||
| f0c4b28c6b | |||
| 6784e0dfee | |||
| 95497b4aab | |||
| 5b04b7d972 | |||
| 4eb3a8e1cc | |||
| 9611185eb4 | |||
| e4380843c4 | |||
| 046f0bba74 | |||
| e0c436b616 | |||
| dbf2ee56c6 | |||
| 1d6760dd84 | |||
| 344727f9ba | |||
| d17ec26c56 | |||
| 4236d81cfc | |||
| bb869aca33 | |||
| 9cad60fa6d | |||
| 42e89e4a92 | |||
| 8daec9a4c5 | |||
| 53ac27c3ff | |||
| e689532e6e | |||
| c2302abaf1 | |||
| 8157285a79 | |||
| c6e1a2ca8a | |||
| 41e112294b | |||
| 49086964b8 | |||
| dd81c30976 | |||
| 1d8daad223 | |||
| f540559c41 | |||
| d16033dd2c | |||
| 7eb417b24f | |||
| 9515ed401f | |||
| f982771131 | |||
| a087d13ccb | |||
| 6e5cbd0196 | |||
| 6e8d0e3177 | |||
| 5cf610af40 | |||
| 897fe85b5c | |||
| 57cbefa589 | |||
| 09291db805 | |||
| e9a6675c40 | |||
| 1333d3c02a | |||
| 222a2c8fa5 | |||
| 5841aa8189 | |||
| 1b9f63f799 | |||
| 1b130546f8 | |||
| 7e4d693054 | |||
| b0b4b7ba33 | |||
| d0eda83697 | |||
| 503e5829bb | |||
| 79482ff672 | |||
| 3a99c2b5f4 | |||
| 45fe02c8b3 | |||
| 2c3c4274be | |||
| 501c017a26 | |||
| d36420a87a | |||
| 5983803c8b | |||
| fabc5e9259 | |||
| 5748d58c74 | |||
| bfa8d342b3 | |||
| 37f3486483 | |||
| 3e19044dee | |||
| 8495036ff9 | |||
| 7f701a5756 | |||
| 634e7a41c5 | |||
| d1d651080a | |||
| 0fa44c5dd3 | |||
| 89a69eed72 | |||
| 1842ca0334 | |||
| e5a8b23684 | |||
| 4fffee6695 | |||
| 485bc7d7d6 | |||
| b5ba8b783a | |||
| d7774cf049 | |||
| 9d94acbedb | |||
| b77e844fc3 | |||
| a6ab2c71c3 | |||
| 5c8ad6702a | |||
| f0601afa75 | |||
| 56e984f657 | |||
| 5d75b6be62 | |||
| 12c3023a22 | |||
| 56b228f187 | |||
| 42eb99554f | |||
| c85b468b8d | |||
| 7463241896 | |||
| c00def5b71 | |||
| f16418ccf7 | |||
| 2d4a60cae6 | |||
| 47926f7d21 | |||
| 940072592f | |||
| 4ff609b6a8 | |||
| 0a877941f4 | |||
| baf3b9be7c | |||
| 4df4bf68a2 | |||
| 471bd92b4c | |||
| 3af1063737 | |||
| 9c8060f619 | |||
| e213873852 | |||
| 56acb340d2 | |||
| e05cdc2f9c | |||
| 3571270191 | |||
| bd5eb47441 | |||
| 7cd37c37cd | |||
| d660f6b9a5 | |||
| 80389ae61e | |||
| 6e13922bdc | |||
| c57f16d16f | |||
| 3c43a7aee8 | |||
| dd8779b257 | |||
| 46bdfb9661 | |||
| e3ea4b7ec2 | |||
| 41c67ce8dd | |||
| 870a6e93da | |||
| 80f87913bb | |||
| 45123dcc0a | |||
| 49d560583f | |||
| 1c663b32b9 | |||
| caecaa7562 | |||
| ed11be23bf | |||
| 7bd5a52019 | |||
| 87763ef0a0 | |||
| 939e668096 | |||
| 45318e7575 | |||
| 8250b9f6b0 | |||
| 1abf03351d | |||
| 46b95d5cfe | |||
| 59ba4777ee | |||
| d44739283c | |||
| 9c953a67a6 | |||
| bd3fa317e7 | |||
| 715e2b48ca | |||
| 90d18143ba | |||
| 4b6809b32d | |||
| 7b96146d3f | |||
| 21c55a2e0f | |||
| 8e965040ce | |||
| 780ee2b2be | |||
| 6f9cd96ec5 | |||
| 47e244ee9f | |||
| df11fe75d3 | |||
| bf0d516e49 | |||
| b18da35da6 | |||
| 8ba1e6c183 | |||
| d4f84f0b54 | |||
| 6ec6ca6971 | |||
| 1163e9e409 | |||
| 15736c57c3 | |||
| fa817a8ab3 | |||
| 8b99635eb3 | |||
| 1919780880 | |||
| 82f5d901c8 | |||
| dc4d4342cd | |||
| e05658685c | |||
| b29539b442 | |||
| b1a46d5adc | |||
| 50c510d16b | |||
| 8a84d1048c | |||
| 2ad852d8df | |||
| ca39f5204d | |||
| 5b0e38060a | |||
| 66938e0b68 | |||
| 64c6cc4cf3 | |||
| 3418984848 | |||
| 3c79990934 | |||
| da3f279495 | |||
| b1bbb9e210 | |||
| 0e3e129a83 | |||
| c87b58511e | |||
| 8d61dcc8ab | |||
| 06b29d7da4 | |||
| 5229a76f68 | |||
| 4f9504305a | |||
| 27153dde85 | |||
| 9fc7174612 | |||
| 8fb8374dfc | |||
| ff35c140dc | |||
| df9b7b2fe9 | |||
| 48f3f49e80 | |||
| 94d7af00b8 | |||
| 251ba7f058 | |||
| 28296955f1 | |||
| 1b2fc3cc9a | |||
| b8da2eeb69 | |||
| 5f62f0c9d7 | |||
| a54843cc65 | |||
| 4326873af6 | |||
| a64f4539e7 | |||
| ec68ab1c8c | |||
| e5041749a2 | |||
| 78b2e0be89 | |||
| b6aded378d | |||
| 11e3f5e8b2 | |||
| f65c3ae62b | |||
| 02c955babb | |||
| ca04ae9540 | |||
| b0c21b00d9 | |||
| 47684fa17c | |||
| 148a7e7002 | |||
| 76e8285904 | |||
| 555c70672e | |||
| 850e218051 | |||
| fb4b5b0a06 | |||
| f256e1a59a | |||
| 9816b868f9 | |||
| 6e828f0fcb | |||
| 4d6484b03e | |||
| afe9269534 | |||
| 688cb8f19d | |||
| f6dd2cd1af | |||
| 69dc14f5d6 | |||
| 202acbd628 | |||
| a283fefd18 | |||
| d9bbaf5d6c | |||
| 1075b975c5 | |||
| c813c1ff4c | |||
| abac2ca2c5 | |||
| 64e9702a26 | |||
| 76cb4cd174 | |||
| 65d7c19979 | |||
| b67697b6f2 | |||
| 131f272e69 | |||
| 03d1265cfd | |||
| c190086707 | |||
| 5d89a8010b | |||
| 7a81fa00e9 | |||
| 606ed0c8ab | |||
| 8b1a4365ed | |||
| 8a2542157f | |||
| d6836444c9 | |||
| 3b30799b7e | |||
| e61da33672 | |||
| 6a71314d70 | |||
| 06e0c7d1a9 | |||
| 7600ebd263 | |||
| 21943ce0e2 | |||
| aa313e112a | |||
| 2c7428e2ee | |||
| 014f2ef900 | |||
| b418ce5643 | |||
| fe1c48178e | |||
| 35f13e882e | |||
| 85924e898e | |||
| 622b72db4b | |||
| a0a7b46cff | |||
| 37aacb3960 | |||
| 79bc9d97c9 | |||
| f150687dbc | |||
| b2a5482d2c | |||
| 5fdfb8d465 | |||
| 8b2c04abc4 | |||
| 83d0949498 | |||
| 244cf49ba4 | |||
| 651422127c | |||
| 11de7599e5 | |||
| 7a6e70d6b3 | |||
| 230865c4f7 | |||
| 4c9a3e918f | |||
| 5beb022ee1 | |||
| 170abf9b7f | |||
| afaa7144a5 | |||
| eaa1adb3b2 | |||
| fa76974e24 | |||
| f372bd8809 | |||
| 0284248c93 | |||
| d9dd1171a3 | |||
| fefea3a2a5 | |||
| 0e920a91dd | |||
| 63e3398f49 | |||
| cdcaae17c6 | |||
| 96e9d50060 | |||
| 5cab6c4ccb | |||
| b3b341173f | |||
| a9e4695b74 | |||
| 4f40f685d9 | |||
| ffb4cda475 | |||
| 5859a3df72 | |||
| 5c6a7cb4b8 | |||
| 4e2afcd3b8 | |||
| 11e6d84d46 | |||
| 53b9e7b52f | |||
| e5e9ca0015 | |||
| 150ab9c6a4 | |||
| f789463982 | |||
| 955801db2e | |||
| 93b2e80eb8 | |||
| 1a41b92f77 | |||
| 58a8f1f1b0 | |||
| daddfc9e1b | |||
| ecf5f6976f | |||
| e2448fb6dd | |||
| 9c9f2dbe3f | |||
| b3d579e2c1 | |||
| eb72d598b1 | |||
| 033a4cf21e | |||
| fda9b58ab7 | |||
| ca865df87f | |||
| f9f75aa119 | |||
| db42d0e0ae | |||
| df3d0f61bd | |||
| c6bc69cbc5 | |||
| 8c9df482ab | |||
| 1137b04154 | |||
| ec96426c00 | |||
| 4d22daefa7 | |||
| bcc92e04c9 | |||
| 9aa222f738 | |||
| 605cfdb8dc | |||
| 041d72b755 | |||
| 569e40544d | |||
| 3d605a23fe | |||
| 4f2816c01c | |||
| a0b461a18e | |||
| 7ce675030b | |||
| 217caecfda | |||
| ef8847eda7 | |||
| d78010c376 | |||
| 3444cb15e3 | |||
| 0151d42156 | |||
| 392f28882f | |||
| cdb3e6434a | |||
| bf5f6ec262 | |||
| 1a755e75c5 | |||
| 46ff897107 | |||
| f5d63bb7df | |||
| c54ec09519 | |||
| 7b3d700d5f | |||
| 744ff55c62 | |||
| c326f14fed | |||
| 07ddb8fcff | |||
| 84bcd8b3bc | |||
| f52970b038 | |||
| 39b96849a9 | |||
| f298e55ded | |||
| ed943b1b5b | |||
| 0c6d787f92 | |||
| a4f9aa2172 | |||
| c432ce6be5 | |||
| c5b32b2211 | |||
| 24efa86f26 | |||
| 38e551cc3d | |||
| ef95f08c48 | |||
| 3ced290eb5 | |||
| fab0f07379 | |||
| 8525f55ad0 | |||
| e6c024f8bf | |||
| c28bc41a96 | |||
| 29a59ed7e2 | |||
| f8b80f3f93 | |||
| 189007e44d | |||
| 3cffadc7a2 | |||
| 18e43831bc | |||
| 3356de55ed | |||
| 375e727f9a | |||
| a2b8ba472f | |||
| 00c7ddbc9b | |||
| 3e0bc9e36b | |||
| d6ba4bd255 | |||
| 84b4b38cbb | |||
| 4694604836 | |||
| 224c5472c8 | |||
| 409310aae9 | |||
| 9ff825f39d | |||
| 7b5d831296 | |||
| 42ee209084 | |||
| e4096fbc33 | |||
| 3aa5c2a699 | |||
| 2ddf278e2d | |||
| f46448d04c | |||
| ab17606e79 | |||
| 7c90b87715 | |||
| d2929e432e | |||
| 88daa349f9 | |||
| f29da49893 | |||
| 194e8ea696 | |||
| 810f997276 | |||
| 6daae7f226 | |||
| f9fe6ac642 | |||
| b4ad565df6 | |||
| 754d5ea364 | |||
| 26add87c3d | |||
| 986062a604 | |||
| 29ceeba95f | |||
| 849d9eb463 | |||
| dce7053c24 | |||
| 042f4c90c6 | |||
| c1583a3e1d | |||
| 17fa2e9e8e | |||
| ff237f2dbc | |||
| 50c99599f2 | |||
| 891ee85fa6 | |||
| a03f5dd9f6 | |||
| 415c4b7ed5 | |||
| d599707154 | |||
| 7f06712a30 | |||
| b08bb56f6c | |||
| 9bcccadebd | |||
| 1287558f24 | |||
| 6b389e01b5 | |||
| 8fcca1b958 | |||
| a1cf792245 | |||
| 978b580dcf | |||
| d197f33646 | |||
| 521d25d4e6 | |||
| ca1648052a | |||
| f34b913bd8 | |||
| 0d3ed37b48 | |||
| bc68f18c48 | |||
| 6e42687e65 | |||
| e4bd879686 | |||
| 78982d88e0 | |||
| fa5c7edab4 | |||
| 6fa34d5532 | |||
| 9e5427dc6e | |||
| a357190eff | |||
| bfcc2abe47 | |||
| f64ae9dc33 | |||
| 5a51bdd824 | |||
| b48c85dcf9 | |||
| f374dd38b6 | |||
| ccb72e6787 | |||
| 55823dbdf6 | |||
| 588207d7c1 | |||
| 2aa0cdde8f | |||
| 44d798d8f0 | |||
| 4150805073 |
18
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
18
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,15 +1,21 @@
|
|||||||
name: Bug Report
|
name: "🐞 Bug Report"
|
||||||
description: Create a bug issue for RAGFlow
|
description: Create a bug issue for RAGFlow
|
||||||
title: "[Bug]: "
|
title: "[Bug]: "
|
||||||
labels: [bug]
|
labels: ["🐞 bug"]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for the same bug?
|
label: Self Checks
|
||||||
description: Please check if an issue already exists for the bug you encountered.
|
description: "Please check the following in order to be responded in time :)"
|
||||||
options:
|
options:
|
||||||
- label: I have checked the existing issues.
|
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||||
required: true
|
required: true
|
||||||
|
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||||
|
required: true
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: "Please provide the following information to help us understand the issue."
|
value: "Please provide the following information to help us understand the issue."
|
||||||
|
|||||||
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
title: '[Feature Request]: '
|
|
||||||
about: Suggest an idea for RAGFlow
|
|
||||||
labels: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
**Summary**
|
|
||||||
|
|
||||||
Description for this feature.
|
|
||||||
16
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
16
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@ -1,14 +1,20 @@
|
|||||||
name: Feature request
|
name: "💞 Feature request"
|
||||||
description: Propose a feature request for RAGFlow.
|
description: Propose a feature request for RAGFlow.
|
||||||
title: "[Feature Request]: "
|
title: "[Feature Request]: "
|
||||||
labels: [feature request]
|
labels: ["💞 feature"]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for the same feature request?
|
label: Self Checks
|
||||||
description: Please check if an issue already exists for the feature you request.
|
description: "Please check the following in order to be responded in time :)"
|
||||||
options:
|
options:
|
||||||
- label: I have checked the existing issues.
|
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||||
|
required: true
|
||||||
|
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
17
.github/ISSUE_TEMPLATE/question.yml
vendored
17
.github/ISSUE_TEMPLATE/question.yml
vendored
@ -1,8 +1,21 @@
|
|||||||
name: Question
|
name: "🙋♀️ Question"
|
||||||
description: Ask questions on RAGFlow
|
description: Ask questions on RAGFlow
|
||||||
title: "[Question]: "
|
title: "[Question]: "
|
||||||
labels: [question]
|
labels: ["🙋♀️ question"]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Self Checks
|
||||||
|
description: "Please check the following in order to be responded in time :)"
|
||||||
|
options:
|
||||||
|
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||||
|
required: true
|
||||||
|
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||||
|
required: true
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
|
|||||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@ -75,12 +75,6 @@ jobs:
|
|||||||
# The body field does not support environment variable substitution directly.
|
# The body field does not support environment variable substitution directly.
|
||||||
body_path: release_body.md
|
body_path: release_body.md
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
# https://github.com/marketplace/actions/docker-login
|
# https://github.com/marketplace/actions/docker-login
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
|
|||||||
32
.github/workflows/tests.yml
vendored
32
.github/workflows/tests.yml
vendored
@ -32,12 +32,9 @@ jobs:
|
|||||||
# https://github.com/hmarr/debug-action
|
# https://github.com/hmarr/debug-action
|
||||||
#- uses: hmarr/debug-action@v2
|
#- uses: hmarr/debug-action@v2
|
||||||
|
|
||||||
- name: Show PR labels
|
- name: Show who triggered this workflow
|
||||||
run: |
|
run: |
|
||||||
echo "Workflow triggered by ${{ github.event_name }}"
|
echo "Workflow triggered by ${{ github.event_name }}"
|
||||||
if [[ ${{ github.event_name }} == 'pull_request' ]]; then
|
|
||||||
echo "PR labels: ${{ join(github.event.pull_request.labels.*.name, ', ') }}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Ensure workspace ownership
|
- name: Ensure workspace ownership
|
||||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||||
@ -54,7 +51,7 @@ jobs:
|
|||||||
uses: astral-sh/ruff-action@v2
|
uses: astral-sh/ruff-action@v2
|
||||||
with:
|
with:
|
||||||
version: ">=0.8.2"
|
version: ">=0.8.2"
|
||||||
args: "check --ignore E402"
|
args: "check"
|
||||||
|
|
||||||
- name: Build ragflow:nightly-slim
|
- name: Build ragflow:nightly-slim
|
||||||
run: |
|
run: |
|
||||||
@ -68,7 +65,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Start ragflow:nightly-slim
|
- name: Start ragflow:nightly-slim
|
||||||
run: |
|
run: |
|
||||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||||
sudo docker compose -f docker/docker-compose.yml up -d
|
sudo docker compose -f docker/docker-compose.yml up -d
|
||||||
|
|
||||||
- name: Stop ragflow:nightly-slim
|
- name: Stop ragflow:nightly-slim
|
||||||
@ -78,7 +75,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Start ragflow:nightly
|
- name: Start ragflow:nightly
|
||||||
run: |
|
run: |
|
||||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||||
sudo docker compose -f docker/docker-compose.yml up -d
|
sudo docker compose -f docker/docker-compose.yml up -d
|
||||||
|
|
||||||
- name: Run sdk tests against Elasticsearch
|
- name: Run sdk tests against Elasticsearch
|
||||||
@ -99,8 +96,17 @@ jobs:
|
|||||||
echo "Waiting for service to be available..."
|
echo "Waiting for service to be available..."
|
||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||||
|
|
||||||
|
- name: Run http api tests against Elasticsearch
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short -m "not slow"
|
||||||
|
|
||||||
- name: Stop ragflow:nightly
|
- name: Stop ragflow:nightly
|
||||||
if: always() # always run this step even if previous steps failed
|
if: always() # always run this step even if previous steps failed
|
||||||
@ -131,6 +137,16 @@ jobs:
|
|||||||
done
|
done
|
||||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||||
|
|
||||||
|
- name: Run http api tests against Infinity
|
||||||
|
run: |
|
||||||
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
|
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||||
|
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
|
echo "Waiting for service to be available..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_http_api && DOC_ENGINE=infinity pytest -s --tb=short -m "not slow"
|
||||||
|
|
||||||
- name: Stop ragflow:nightly
|
- name: Stop ragflow:nightly
|
||||||
if: always() # always run this step even if previous steps failed
|
if: always() # always run this step even if previous steps failed
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -38,3 +38,7 @@ sdk/python/dist/
|
|||||||
sdk/python/ragflow_sdk.egg-info/
|
sdk/python/ragflow_sdk.egg-info/
|
||||||
huggingface.co/
|
huggingface.co/
|
||||||
nltk_data/
|
nltk_data/
|
||||||
|
|
||||||
|
# Exclude hash-like temporary files like 9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||||
|
*[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]*
|
||||||
|
.lh/
|
||||||
|
|||||||
20
Dockerfile
20
Dockerfile
@ -21,9 +21,7 @@ RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co
|
|||||||
if [ "$LIGHTEN" != "1" ]; then \
|
if [ "$LIGHTEN" != "1" ]; then \
|
||||||
(tar -cf - \
|
(tar -cf - \
|
||||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
|
||||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
|
||||||
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -46,7 +44,8 @@ ENV DEBIAN_FRONTEND=noninteractive
|
|||||||
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
||||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
sed -i 's|http://ports.ubuntu.com|http://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||||
|
sed -i 's|http://archive.ubuntu.com|http://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||||
fi; \
|
fi; \
|
||||||
rm -f /etc/apt/apt.conf.d/docker-clean && \
|
rm -f /etc/apt/apt.conf.d/docker-clean && \
|
||||||
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache && \
|
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache && \
|
||||||
@ -59,14 +58,15 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
|||||||
apt install -y default-jdk && \
|
apt install -y default-jdk && \
|
||||||
apt install -y libatk-bridge2.0-0 && \
|
apt install -y libatk-bridge2.0-0 && \
|
||||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||||
|
apt install -y libjemalloc-dev && \
|
||||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||||
|
|
||||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
|
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||||
pip3 config set global.trusted-host pypi.tuna.tsinghua.edu.cn; \
|
pip3 config set global.trusted-host mirrors.aliyun.com; \
|
||||||
mkdir -p /etc/uv && \
|
mkdir -p /etc/uv && \
|
||||||
echo "[[index]]" > /etc/uv/uv.toml && \
|
echo "[[index]]" > /etc/uv/uv.toml && \
|
||||||
echo 'url = "https://pypi.tuna.tsinghua.edu.cn/simple"' >> /etc/uv/uv.toml && \
|
echo 'url = "https://mirrors.aliyun.com/pypi/simple"' >> /etc/uv/uv.toml && \
|
||||||
echo "default = true" >> /etc/uv/uv.toml; \
|
echo "default = true" >> /etc/uv/uv.toml; \
|
||||||
fi; \
|
fi; \
|
||||||
pipx install uv
|
pipx install uv
|
||||||
@ -150,9 +150,9 @@ COPY pyproject.toml uv.lock ./
|
|||||||
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
||||||
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
||||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
sed -i 's|pypi.org|pypi.tuna.tsinghua.edu.cn|g' uv.lock; \
|
sed -i 's|pypi.org|mirrors.aliyun.com/pypi|g' uv.lock; \
|
||||||
else \
|
else \
|
||||||
sed -i 's|pypi.tuna.tsinghua.edu.cn|pypi.org|g' uv.lock; \
|
sed -i 's|mirrors.aliyun.com/pypi|pypi.org|g' uv.lock; \
|
||||||
fi; \
|
fi; \
|
||||||
if [ "$LIGHTEN" == "1" ]; then \
|
if [ "$LIGHTEN" == "1" ]; then \
|
||||||
uv sync --python 3.10 --frozen; \
|
uv sync --python 3.10 --frozen; \
|
||||||
@ -196,10 +196,12 @@ COPY deepdoc deepdoc
|
|||||||
COPY rag rag
|
COPY rag rag
|
||||||
COPY agent agent
|
COPY agent agent
|
||||||
COPY graphrag graphrag
|
COPY graphrag graphrag
|
||||||
|
COPY agentic_reasoning agentic_reasoning
|
||||||
COPY pyproject.toml uv.lock ./
|
COPY pyproject.toml uv.lock ./
|
||||||
|
COPY mcp mcp
|
||||||
|
|
||||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||||
COPY docker/entrypoint.sh docker/entrypoint-parser.sh ./
|
COPY docker/entrypoint.sh ./
|
||||||
RUN chmod +x ./entrypoint*.sh
|
RUN chmod +x ./entrypoint*.sh
|
||||||
|
|
||||||
# Copy compiled web pages
|
# Copy compiled web pages
|
||||||
|
|||||||
54
README.md
54
README.md
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -36,7 +36,7 @@
|
|||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -78,11 +78,10 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
## 🔥 Latest Updates
|
## 🔥 Latest Updates
|
||||||
|
|
||||||
- 2025-02-05 Updates the model list of 'SILICONFLOW' and adds support for Deepseek-R1/DeepSeek-V3.
|
- 2025-03-19 Supports using a multi-modal model to make sense of images within PDF or DOCX files.
|
||||||
|
- 2025-02-28 Combined with Internet search (Tavily), supports reasoning like Deep Research for any LLMs.
|
||||||
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
||||||
- 2024-12-18 Upgrades Document Layout Analysis model in Deepdoc.
|
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
||||||
- 2024-12-04 Adds support for pagerank score in knowledge base.
|
|
||||||
- 2024-11-22 Adds more variables to Agent.
|
|
||||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||||
- 2024-08-22 Support text to SQL statements through RAG.
|
- 2024-08-22 Support text to SQL statements through RAG.
|
||||||
|
|
||||||
@ -173,19 +172,27 @@ releases! 🌟
|
|||||||
|
|
||||||
3. Start up the server using the pre-built Docker images:
|
3. Start up the server using the pre-built Docker images:
|
||||||
|
|
||||||
> The command below downloads the `v0.16.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download an RAGFlow edition different from `v0.16.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` for the full edition `v0.16.0`.
|
> [!CAUTION]
|
||||||
|
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||||
|
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||||
|
|
||||||
|
> The command below downloads the `v0.18.0-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.18.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` for the full edition `v0.18.0`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow
|
$ cd ragflow/docker
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|-------------------|-----------------|-----------------------|--------------------------|
|
|-------------------|-----------------|-----------------------|--------------------------|
|
||||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
4. Check the server status after having the server up and running:
|
4. Check the server status after having the server up and running:
|
||||||
|
|
||||||
@ -204,9 +211,6 @@ releases! 🌟
|
|||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||||
@ -240,7 +244,7 @@ to `<YOUR_SERVING_PORT>:80`.
|
|||||||
Updates to the above configurations require a reboot of all containers to take effect:
|
Updates to the above configurations require a reboot of all containers to take effect:
|
||||||
|
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
### Switch doc engine from Elasticsearch to Infinity
|
### Switch doc engine from Elasticsearch to Infinity
|
||||||
@ -253,12 +257,15 @@ RAGFlow uses Elasticsearch by default for storing full text and vectors. To swit
|
|||||||
$ docker compose -f docker/docker-compose.yml down -v
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> `-v` will delete the docker container volumes, and the existing data will be cleared.
|
||||||
|
|
||||||
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||||
|
|
||||||
3. Start the containers:
|
3. Start the containers:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
@ -271,7 +278,7 @@ This image is approximately 2 GB in size and relies on external LLM and embeddin
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 Build a Docker image including embedding models
|
## 🔧 Build a Docker image including embedding models
|
||||||
@ -281,7 +288,7 @@ This image is approximately 9 GB in size. As it includes embedding models, it re
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 Launch service from source for development
|
## 🔨 Launch service from source for development
|
||||||
@ -344,9 +351,12 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
|||||||
## 📚 Documentation
|
## 📚 Documentation
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 Roadmap
|
## 📜 Roadmap
|
||||||
|
|
||||||
@ -354,7 +364,7 @@ See the [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214
|
|||||||
|
|
||||||
## 🏄 Community
|
## 🏄 Community
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
|||||||
69
README_id.md
69
README_id.md
@ -22,7 +22,7 @@
|
|||||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||||
@ -36,12 +36,12 @@
|
|||||||
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Peta Jalan</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Peta Jalan</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<details open>
|
<details open>
|
||||||
<summary></b>📕 Daftar Isi</b></summary>
|
<summary><b>📕 Daftar Isi </b> </summary>
|
||||||
|
|
||||||
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
||||||
- 🎮 [Demo](#-demo)
|
- 🎮 [Demo](#-demo)
|
||||||
@ -75,11 +75,10 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
## 🔥 Pembaruan Terbaru
|
## 🔥 Pembaruan Terbaru
|
||||||
|
|
||||||
- 2025-02-05 Memperbarui daftar model 'SILICONFLOW' dan menambahkan dukungan untuk Deepseek-R1/DeepSeek-V3.
|
- 2025-03-19 Mendukung penggunaan model multi-modal untuk memahami gambar di dalam file PDF atau DOCX.
|
||||||
|
- 2025-02-28 dikombinasikan dengan pencarian Internet (TAVILY), mendukung penelitian mendalam untuk LLM apa pun.
|
||||||
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
||||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di Deepdoc.
|
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||||
- 2024-12-04 Mendukung skor pagerank ke basis pengetahuan.
|
|
||||||
- 2024-11-22 Peningkatan definisi dan penggunaan variabel di Agen.
|
|
||||||
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||||
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||||
|
|
||||||
@ -166,21 +165,29 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
3. Bangun image Docker pre-built dan jalankan server:
|
3. Bangun image Docker pre-built dan jalankan server:
|
||||||
|
|
||||||
> Perintah di bawah ini mengunduh edisi v0.16.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.16.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0 untuk edisi lengkap v0.16.0.
|
> [!CAUTION]
|
||||||
|
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||||
|
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||||
|
|
||||||
```bash
|
> Perintah di bawah ini mengunduh edisi v0.18.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.18.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0 untuk edisi lengkap v0.18.0.
|
||||||
$ cd ragflow
|
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
```bash
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
$ cd ragflow/docker
|
||||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
$ docker compose -f docker-compose.yml up -d
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
|
||||||
|
|
||||||
4. Periksa status server setelah server aktif dan berjalan:
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
|
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||||
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
|
1. Periksa status server setelah server aktif dan berjalan:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f ragflow-server
|
||||||
@ -197,18 +204,15 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||||
> karena RAGFlow mungkin belum sepenuhnya siap.
|
> karena RAGFlow mungkin belum sepenuhnya siap.
|
||||||
|
|
||||||
5. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||||
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||||
6. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
3. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||||
bidang `API_KEY` dengan kunci API yang sesuai.
|
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||||
|
|
||||||
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||||
@ -230,7 +234,7 @@ menjadi `<YOUR_SERVING_PORT>:80`.
|
|||||||
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||||
|
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## 🔧 Membangun Docker Image tanpa Model Embedding
|
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||||
@ -240,7 +244,7 @@ Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
||||||
@ -250,7 +254,7 @@ Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hany
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||||
@ -313,9 +317,12 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
|||||||
## 📚 Dokumentasi
|
## 📚 Dokumentasi
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [Panduan Pengguna](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
- [Referensi](https://ragflow.io/docs/dev/category/references)
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 Roadmap
|
## 📜 Roadmap
|
||||||
|
|
||||||
@ -323,7 +330,7 @@ Lihat [Roadmap RAGFlow 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
|||||||
|
|
||||||
## 🏄 Komunitas
|
## 🏄 Komunitas
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
|||||||
60
README_ja.md
60
README_ja.md
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -36,7 +36,7 @@
|
|||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -55,11 +55,10 @@
|
|||||||
|
|
||||||
## 🔥 最新情報
|
## 🔥 最新情報
|
||||||
|
|
||||||
- 2025-02-05 シリコン フローの St およびモデル リストを更新し、Deep Seek-R1/Deep Seek-V3 のサポートを追加しました。
|
- 2025-03-19 PDFまたはDOCXファイル内の画像を理解するために、多モーダルモデルを使用することをサポートします。
|
||||||
|
- 2025-02-28 インターネット検索 (TAVILY) と組み合わせて、あらゆる LLM の詳細な調査をサポートします。
|
||||||
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
||||||
- 2024-12-18 Deepdoc のドキュメント レイアウト分析モデルをアップグレードします。
|
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||||
- 2024-12-04 ナレッジ ベースへのページランク スコアをサポートしました。
|
|
||||||
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
|
||||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||||
|
|
||||||
@ -146,21 +145,29 @@
|
|||||||
|
|
||||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||||
|
|
||||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.16.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.16.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.16.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0 と設定します。
|
> [!CAUTION]
|
||||||
|
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||||
|
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||||
|
|
||||||
|
> 以下のコマンドは、RAGFlow Docker イメージの v0.18.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.18.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.18.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0 と設定します。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow
|
$ cd ragflow/docker
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f ragflow-server
|
||||||
@ -176,16 +183,13 @@
|
|||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||||
|
|
||||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
2. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||||
|
|
||||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||||
|
|
||||||
@ -208,7 +212,7 @@
|
|||||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
||||||
@ -219,13 +223,14 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
|||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml down -v
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
Note: `-v` は docker コンテナのボリュームを削除し、既存のデータをクリアします。
|
||||||
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
||||||
|
|
||||||
3. 起動コンテナ:
|
3. 起動コンテナ:
|
||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||||
|
|
||||||
## 🔧 ソースコードで Docker イメージを作成(埋め込みモデルなし)
|
## 🔧 ソースコードで Docker イメージを作成(埋め込みモデルなし)
|
||||||
@ -235,7 +240,7 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 ソースコードをコンパイルした Docker イメージ(埋め込みモデルを含む)
|
## 🔧 ソースコードをコンパイルした Docker イメージ(埋め込みモデルを含む)
|
||||||
@ -245,7 +250,7 @@ docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-s
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 ソースコードからサービスを起動する方法
|
## 🔨 ソースコードからサービスを起動する方法
|
||||||
@ -308,9 +313,12 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
|||||||
## 📚 ドキュメンテーション
|
## 📚 ドキュメンテーション
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 ロードマップ
|
## 📜 ロードマップ
|
||||||
|
|
||||||
@ -318,7 +326,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
|||||||
|
|
||||||
## 🏄 コミュニティ
|
## 🏄 コミュニティ
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
|||||||
57
README_ko.md
57
README_ko.md
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -36,7 +36,7 @@
|
|||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -55,12 +55,10 @@
|
|||||||
|
|
||||||
## 🔥 업데이트
|
## 🔥 업데이트
|
||||||
|
|
||||||
- 2025-02-05 'SILICONFLOW' 모델 목록을 업데이트하고 Deepseek-R1/DeepSeek-V3에 대한 지원을 추가합니다.
|
- 2025-03-19 PDF 또는 DOCX 파일 내의 이미지를 이해하기 위해 다중 모드 모델을 사용하는 것을 지원합니다.
|
||||||
|
- 2025-02-28 인터넷 검색(TAVILY)과 결합되어 모든 LLM에 대한 심층 연구를 지원합니다.
|
||||||
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
||||||
- 2024-12-18 Deepdoc의 문서 레이아웃 분석 모델 업그레이드.
|
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||||
- 2024-12-04 지식베이스에 대한 페이지랭크 점수를 지원합니다.
|
|
||||||
|
|
||||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
|
||||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||||
|
|
||||||
@ -147,21 +145,29 @@
|
|||||||
|
|
||||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||||
|
|
||||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.16.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.16.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.16.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0로 설정합니다.
|
> [!CAUTION]
|
||||||
|
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||||
|
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||||
|
|
||||||
|
> 아래 명령어는 RAGFlow Docker 이미지의 v0.18.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.18.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.18.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0로 설정합니다.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow
|
$ cd ragflow/docker
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
1. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f ragflow-server
|
||||||
@ -177,16 +183,13 @@
|
|||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||||
|
|
||||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
2. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||||
|
|
||||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||||
|
|
||||||
@ -209,7 +212,7 @@
|
|||||||
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
||||||
@ -220,6 +223,7 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
|||||||
```bash
|
```bash
|
||||||
$docker compose-f docker/docker-compose.yml down -v
|
$docker compose-f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
Note: `-v` 는 docker 컨테이너의 볼륨을 삭제하고 기존 데이터를 지우며, 이 작업은 컨테이너를 중지하는 것과 동일합니다.
|
||||||
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||||
3. 컨테이너 부팅:
|
3. 컨테이너 부팅:
|
||||||
```bash
|
```bash
|
||||||
@ -235,7 +239,7 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
||||||
@ -245,7 +249,7 @@ docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-s
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||||
@ -308,9 +312,12 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
|||||||
## 📚 문서
|
## 📚 문서
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 로드맵
|
## 📜 로드맵
|
||||||
|
|
||||||
@ -318,7 +325,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
|||||||
|
|
||||||
## 🏄 커뮤니티
|
## 🏄 커뮤니티
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
|||||||
@ -22,7 +22,7 @@
|
|||||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||||
@ -36,12 +36,12 @@
|
|||||||
<a href="https://ragflow.io/docs/dev/">Documentação</a> |
|
<a href="https://ragflow.io/docs/dev/">Documentação</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<details open>
|
<details open>
|
||||||
<summary></b>📕 Índice</b></summary>
|
<summary><b>📕 Índice</b></summary>
|
||||||
|
|
||||||
- 💡 [O que é o RAGFlow?](#-o-que-é-o-ragflow)
|
- 💡 [O que é o RAGFlow?](#-o-que-é-o-ragflow)
|
||||||
- 🎮 [Demo](#-demo)
|
- 🎮 [Demo](#-demo)
|
||||||
@ -75,11 +75,10 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
## 🔥 Últimas Atualizações
|
## 🔥 Últimas Atualizações
|
||||||
|
|
||||||
- 05-02-2025 Atualiza a lista de modelos de 'SILICONFLOW' e adiciona suporte para Deepseek-R1/DeepSeek-V3.
|
- 19-03-2025 Suporta o uso de um modelo multi-modal para entender imagens dentro de arquivos PDF ou DOCX.
|
||||||
|
- 28-02-2025 combinado com a pesquisa na Internet (T AVI LY), suporta pesquisas profundas para qualquer LLM.
|
||||||
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
||||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no Deepdoc.
|
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||||
- 04-12-2024 Adiciona suporte para pontuação de pagerank na base de conhecimento.
|
|
||||||
- 22-11-2024 Adiciona mais variáveis para o Agente.
|
|
||||||
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
||||||
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
||||||
|
|
||||||
@ -166,19 +165,27 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
||||||
|
|
||||||
> O comando abaixo baixa a edição `v0.16.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.16.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` para a edição completa `v0.16.0`.
|
> [!CAUTION]
|
||||||
|
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||||
|
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||||
|
|
||||||
|
> O comando abaixo baixa a edição `v0.18.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.18.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` para a edição completa `v0.18.0`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow
|
$ cd ragflow/docker
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
||||||
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
||||||
| v0.16.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
| v0.18.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||||
| v0.16.0-slim | ~2 | ❌ | Lançamento estável |
|
| v0.18.0-slim | ~2 | ❌ | Lançamento estável |
|
||||||
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
||||||
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
||||||
|
|
||||||
4. Verifique o status do servidor após tê-lo iniciado:
|
4. Verifique o status do servidor após tê-lo iniciado:
|
||||||
|
|
||||||
@ -196,9 +203,6 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Rodando em todos os endereços (0.0.0.0)
|
* Rodando em todos os endereços (0.0.0.0)
|
||||||
* Rodando em http://127.0.0.1:9380
|
|
||||||
* Rodando em http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Pressione CTRL+C para sair
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||||
@ -228,7 +232,7 @@ Para atualizar a porta HTTP de serviço padrão (80), vá até [docker-compose.y
|
|||||||
Atualizações nas configurações acima exigem um reinício de todos os contêineres para que tenham efeito:
|
Atualizações nas configurações acima exigem um reinício de todos os contêineres para que tenham efeito:
|
||||||
|
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
### Mudar o mecanismo de documentos de Elasticsearch para Infinity
|
### Mudar o mecanismo de documentos de Elasticsearch para Infinity
|
||||||
@ -240,13 +244,13 @@ O RAGFlow usa o Elasticsearch por padrão para armazenar texto completo e vetore
|
|||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml down -v
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
Note: `-v` irá deletar os volumes do contêiner, e os dados existentes serão apagados.
|
||||||
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
||||||
|
|
||||||
3. Inicie os contêineres:
|
3. Inicie os contêineres:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> [!ATENÇÃO]
|
> [!ATENÇÃO]
|
||||||
@ -259,7 +263,7 @@ Esta imagem tem cerca de 2 GB de tamanho e depende de serviços externos de LLM
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 Criar uma imagem Docker incluindo modelos de incorporação
|
## 🔧 Criar uma imagem Docker incluindo modelos de incorporação
|
||||||
@ -269,7 +273,7 @@ Esta imagem tem cerca de 9 GB de tamanho. Como inclui modelos de incorporação,
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
||||||
@ -333,10 +337,13 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
|||||||
|
|
||||||
## 📚 Documentação
|
## 📚 Documentação
|
||||||
|
|
||||||
- [Início rápido](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [Guia do usuário](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
- [Referências](https://ragflow.io/docs/dev/category/references)
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 Roadmap
|
## 📜 Roadmap
|
||||||
|
|
||||||
@ -344,7 +351,7 @@ Veja o [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
|||||||
|
|
||||||
## 🏄 Comunidade
|
## 🏄 Comunidade
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
|||||||
@ -21,7 +21,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -35,7 +35,7 @@
|
|||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -54,11 +54,10 @@
|
|||||||
|
|
||||||
## 🔥 近期更新
|
## 🔥 近期更新
|
||||||
|
|
||||||
- 2025-02-05 更新「SILICONFLOW」的型號清單並新增 Deepseek-R1/DeepSeek-V3 的支援。
|
- 2025-03-19 PDF和DOCX中的圖支持用多模態大模型去解析得到描述.
|
||||||
|
- 2025-02-28 結合網路搜尋(Tavily),對於任意大模型實現類似 Deep Research 的推理功能.
|
||||||
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
||||||
- 2024-12-18 升級了 Deepdoc 的文檔佈局分析模型。
|
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||||
- 2024-12-04 支援知識庫的 Pagerank 分數。
|
|
||||||
- 2024-11-22 完善了 Agent 中的變數定義和使用。
|
|
||||||
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
||||||
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
||||||
|
|
||||||
@ -145,19 +144,27 @@
|
|||||||
|
|
||||||
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
||||||
|
|
||||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.16.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.16.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` 來下載 RAGFlow 鏡像的 `v0.16.0` 完整發行版。
|
> [!CAUTION]
|
||||||
|
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||||
|
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||||
|
|
||||||
|
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.18.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.18.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` 來下載 RAGFlow 鏡像的 `v0.18.0` 完整發行版。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow
|
$ cd ragflow/docker
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> 如果你遇到 Docker 映像檔拉不下來的問題,可以在 **docker/.env** 檔案內根據變數 `RAGFLOW_IMAGE` 的註解提示選擇華為雲或阿里雲的對應映像。
|
> 如果你遇到 Docker 映像檔拉不下來的問題,可以在 **docker/.env** 檔案內根據變數 `RAGFLOW_IMAGE` 的註解提示選擇華為雲或阿里雲的對應映像。
|
||||||
@ -181,9 +188,6 @@
|
|||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||||
@ -200,7 +204,7 @@
|
|||||||
|
|
||||||
系統配置涉及以下三份文件:
|
系統配置涉及以下三份文件:
|
||||||
|
|
||||||
- [.env](./docker/.env):存放一些基本的系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
- [.env](./docker/.env):存放一些系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template):設定各類別後台服務。
|
- [service_conf.yaml.template](./docker/service_conf.yaml.template):設定各類別後台服務。
|
||||||
- [docker-compose.yml](./docker/docker-compose.yml): 系統依賴該檔案完成啟動。
|
- [docker-compose.yml](./docker/docker-compose.yml): 系統依賴該檔案完成啟動。
|
||||||
|
|
||||||
@ -215,7 +219,7 @@
|
|||||||
> 所有系統配置都需要透過系統重新啟動生效:
|
> 所有系統配置都需要透過系統重新啟動生效:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
###把文檔引擎從 Elasticsearch 切換成為 Infinity
|
###把文檔引擎從 Elasticsearch 切換成為 Infinity
|
||||||
@ -227,13 +231,14 @@ RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換
|
|||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml down -v
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
Note: `-v` 將會刪除 docker 容器的 volumes,已有的資料會被清空。
|
||||||
|
|
||||||
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
||||||
|
|
||||||
3. 啟動容器:
|
3. 啟動容器:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
@ -246,7 +251,7 @@ RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 原始碼編譯 Docker 映像(包含 embedding 模型)
|
## 🔧 原始碼編譯 Docker 映像(包含 embedding 模型)
|
||||||
@ -256,7 +261,7 @@ docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t in
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 以原始碼啟動服務
|
## 🔨 以原始碼啟動服務
|
||||||
@ -265,7 +270,7 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv
|
pipx install uv
|
||||||
export UV_INDEX=https://pypi.tuna.tsinghua.edu.cn/simple
|
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 下載原始碼並安裝 Python 依賴:
|
2. 下載原始碼並安裝 Python 依賴:
|
||||||
@ -322,9 +327,12 @@ npm install
|
|||||||
## 📚 技術文檔
|
## 📚 技術文檔
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 路線圖
|
## 📜 路線圖
|
||||||
|
|
||||||
@ -332,7 +340,7 @@ npm install
|
|||||||
|
|
||||||
## 🏄 開源社群
|
## 🏄 開源社群
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/zd4qPW6t)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
|||||||
64
README_zh.md
64
README_zh.md
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.16.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.16.0">
|
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.18.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.18.0">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -36,7 +36,7 @@
|
|||||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
@ -55,11 +55,10 @@
|
|||||||
|
|
||||||
## 🔥 近期更新
|
## 🔥 近期更新
|
||||||
|
|
||||||
- 2025-02-05 更新硅基流动的模型列表,增加了对 Deepseek-R1/DeepSeek-V3 的支持。
|
- 2025-03-19 PDF和DOCX中的图支持用多模态大模型去解析得到描述.
|
||||||
|
- 2025-02-28 结合互联网搜索(Tavily),对于任意大模型实现类似 Deep Research 的推理功能.
|
||||||
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
||||||
- 2024-12-18 升级了 Deepdoc 的文档布局分析模型。
|
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||||
- 2024-12-04 支持知识库的 Pagerank 分数。
|
|
||||||
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
|
||||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||||
|
|
||||||
@ -146,19 +145,27 @@
|
|||||||
|
|
||||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||||
|
|
||||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.16.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.16.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.16.0` 来下载 RAGFlow 镜像的 `v0.16.0` 完整发行版。
|
> [!CAUTION]
|
||||||
|
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||||
|
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||||
|
|
||||||
|
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.18.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.18.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.18.0` 来下载 RAGFlow 镜像的 `v0.18.0` 完整发行版。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow
|
$ cd ragflow/docker
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
# Use CPU for embedding and DeepDoc tasks:
|
||||||
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||||
|
# docker compose -f docker-compose-gpu.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
| v0.16.0 | ≈9 | :heavy_check_mark: | Stable release |
|
| v0.18.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||||
| v0.16.0-slim | ≈2 | ❌ | Stable release |
|
| v0.18.0-slim | ≈2 | ❌ | Stable release |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
||||||
@ -182,12 +189,9 @@
|
|||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
* Running on all addresses (0.0.0.0)
|
* Running on all addresses (0.0.0.0)
|
||||||
* Running on http://127.0.0.1:9380
|
|
||||||
* Running on http://x.x.x.x:9380
|
|
||||||
INFO:werkzeug:Press CTRL+C to quit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
> 如果您在没有看到上面的提示信息出来之前,就尝试登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`。
|
||||||
|
|
||||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||||
@ -216,7 +220,7 @@
|
|||||||
> 所有系统配置都需要通过系统重启生效:
|
> 所有系统配置都需要通过系统重启生效:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ docker compose -f docker/docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
### 把文档引擎从 Elasticsearch 切换成为 Infinity
|
||||||
@ -228,13 +232,14 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
|||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml down -v
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
Note: `-v` 将会删除 docker 容器的 volumes,已有的数据会被清空。
|
||||||
|
|
||||||
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
||||||
|
|
||||||
3. 启动容器:
|
3. 启动容器:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
@ -247,7 +252,7 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
||||||
@ -257,7 +262,7 @@ docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t in
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 以源代码启动服务
|
## 🔨 以源代码启动服务
|
||||||
@ -266,7 +271,7 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv
|
pipx install uv
|
||||||
export UV_INDEX=https://pypi.tuna.tsinghua.edu.cn/simple
|
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 下载源代码并安装 Python 依赖:
|
2. 下载源代码并安装 Python 依赖:
|
||||||
@ -283,12 +288,11 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
|||||||
docker compose -f docker/docker-compose-base.yml up -d
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
在 `/etc/hosts` 中添加以下代码,将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
在 `/etc/hosts` 中添加以下代码,目的是将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||||
|
|
||||||
```
|
```
|
||||||
127.0.0.1 es01 infinity mysql minio redis
|
127.0.0.1 es01 infinity mysql minio redis
|
||||||
```
|
```
|
||||||
|
|
||||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -317,13 +321,21 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
|||||||
_以下界面说明系统已经成功启动:_
|
_以下界面说明系统已经成功启动:_
|
||||||
|
|
||||||

|

|
||||||
|
8. 开发完成后停止 RAGFlow 服务
|
||||||
|
停止 RAGFlow 前端和后端服务:
|
||||||
|
```bash
|
||||||
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
|
```
|
||||||
|
|
||||||
## 📚 技术文档
|
## 📚 技术文档
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||||
|
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||||
|
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||||
|
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||||
- [References](https://ragflow.io/docs/dev/category/references)
|
- [References](https://ragflow.io/docs/dev/category/references)
|
||||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||||
|
|
||||||
## 📜 路线图
|
## 📜 路线图
|
||||||
|
|
||||||
@ -331,7 +343,7 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
|||||||
|
|
||||||
## 🏄 开源社区
|
## 🏄 开源社区
|
||||||
|
|
||||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
- [Discord](https://discord.gg/zd4qPW6t)
|
||||||
- [Twitter](https://twitter.com/infiniflowai)
|
- [Twitter](https://twitter.com/infiniflowai)
|
||||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||||
|
|
||||||
|
|||||||
@ -15,7 +15,6 @@
|
|||||||
#
|
#
|
||||||
import logging
|
import logging
|
||||||
import json
|
import json
|
||||||
from abc import ABC
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
@ -25,7 +24,7 @@ from agent.component import component_class
|
|||||||
from agent.component.base import ComponentBase
|
from agent.component.base import ComponentBase
|
||||||
|
|
||||||
|
|
||||||
class Canvas(ABC):
|
class Canvas:
|
||||||
"""
|
"""
|
||||||
dsl = {
|
dsl = {
|
||||||
"components": {
|
"components": {
|
||||||
@ -162,7 +161,7 @@ class Canvas(ABC):
|
|||||||
self.components[k]["obj"].reset()
|
self.components[k]["obj"].reset()
|
||||||
self._embed_id = ""
|
self._embed_id = ""
|
||||||
|
|
||||||
def get_compnent_name(self, cid):
|
def get_component_name(self, cid):
|
||||||
for n in self.dsl["graph"]["nodes"]:
|
for n in self.dsl["graph"]["nodes"]:
|
||||||
if cid == n["id"]:
|
if cid == n["id"]:
|
||||||
return n["data"]["name"]
|
return n["data"]["name"]
|
||||||
@ -210,7 +209,7 @@ class Canvas(ABC):
|
|||||||
if c not in waiting:
|
if c not in waiting:
|
||||||
waiting.append(c)
|
waiting.append(c)
|
||||||
continue
|
continue
|
||||||
yield "*'{}'* is running...🕞".format(self.get_compnent_name(c))
|
yield "*'{}'* is running...🕞".format(self.get_component_name(c))
|
||||||
|
|
||||||
if cpn.component_name.lower() == "iteration":
|
if cpn.component_name.lower() == "iteration":
|
||||||
st_cpn = cpn.get_start()
|
st_cpn = cpn.get_start()
|
||||||
@ -236,7 +235,7 @@ class Canvas(ABC):
|
|||||||
pid = self.components[cid]["parent_id"]
|
pid = self.components[cid]["parent_id"]
|
||||||
o, _ = self.components[cid]["obj"].output(allow_partial=False)
|
o, _ = self.components[cid]["obj"].output(allow_partial=False)
|
||||||
oo, _ = self.components[pid]["obj"].output(allow_partial=False)
|
oo, _ = self.components[pid]["obj"].output(allow_partial=False)
|
||||||
self.components[pid]["obj"].set(pd.concat([oo, o], ignore_index=True))
|
self.components[pid]["obj"].set_output(pd.concat([oo, o], ignore_index=True).dropna())
|
||||||
downstream = [pid]
|
downstream = [pid]
|
||||||
|
|
||||||
for m in prepare2run(downstream):
|
for m in prepare2run(downstream):
|
||||||
@ -253,20 +252,20 @@ class Canvas(ABC):
|
|||||||
if loop:
|
if loop:
|
||||||
raise OverflowError(f"Too much loops: {loop}")
|
raise OverflowError(f"Too much loops: {loop}")
|
||||||
|
|
||||||
|
downstream = []
|
||||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||||
assert switch_out in self.components, \
|
assert switch_out in self.components, \
|
||||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||||
for m in prepare2run([switch_out]):
|
downstream = [switch_out]
|
||||||
yield {"content": m, "running_status": True}
|
else:
|
||||||
continue
|
downstream = cpn["downstream"]
|
||||||
|
|
||||||
downstream = cpn["downstream"]
|
|
||||||
if not downstream and cpn.get("parent_id"):
|
if not downstream and cpn.get("parent_id"):
|
||||||
pid = cpn["parent_id"]
|
pid = cpn["parent_id"]
|
||||||
_, o = cpn["obj"].output(allow_partial=False)
|
_, o = cpn["obj"].output(allow_partial=False)
|
||||||
_, oo = self.components[pid]["obj"].output(allow_partial=False)
|
_, oo = self.components[pid]["obj"].output(allow_partial=False)
|
||||||
self.components[pid]["obj"].set_output(pd.concat([oo.dropna(axis=1), o.dropna(axis=1)], ignore_index=True))
|
self.components[pid]["obj"].set_output(pd.concat([oo.dropna(axis=1), o.dropna(axis=1)], ignore_index=True).dropna())
|
||||||
downstream = [pid]
|
downstream = [pid]
|
||||||
|
|
||||||
for m in prepare2run(downstream):
|
for m in prepare2run(downstream):
|
||||||
|
|||||||
@ -384,6 +384,11 @@ class ComponentBase(ABC):
|
|||||||
"params": {}
|
"params": {}
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
out = getattr(self._param, self._param.output_var_name)
|
||||||
|
if isinstance(out, pd.DataFrame) and "chunks" in out:
|
||||||
|
del out["chunks"]
|
||||||
|
setattr(self._param, self._param.output_var_name, out)
|
||||||
|
|
||||||
return """{{
|
return """{{
|
||||||
"component_name": "{}",
|
"component_name": "{}",
|
||||||
"params": {},
|
"params": {},
|
||||||
@ -396,6 +401,8 @@ class ComponentBase(ABC):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||||
|
from agent.canvas import Canvas # Local import to avoid cyclic dependency
|
||||||
|
assert isinstance(canvas, Canvas), "canvas must be an instance of Canvas"
|
||||||
self._canvas = canvas
|
self._canvas = canvas
|
||||||
self._id = id
|
self._id = id
|
||||||
self._param = param
|
self._param = param
|
||||||
@ -429,7 +436,7 @@ class ComponentBase(ABC):
|
|||||||
if not isinstance(o, partial):
|
if not isinstance(o, partial):
|
||||||
if not isinstance(o, pd.DataFrame):
|
if not isinstance(o, pd.DataFrame):
|
||||||
if isinstance(o, list):
|
if isinstance(o, list):
|
||||||
return self._param.output_var_name, pd.DataFrame(o)
|
return self._param.output_var_name, pd.DataFrame(o).dropna()
|
||||||
if o is None:
|
if o is None:
|
||||||
return self._param.output_var_name, pd.DataFrame()
|
return self._param.output_var_name, pd.DataFrame()
|
||||||
return self._param.output_var_name, pd.DataFrame([{"content": str(o)}])
|
return self._param.output_var_name, pd.DataFrame([{"content": str(o)}])
|
||||||
@ -437,15 +444,15 @@ class ComponentBase(ABC):
|
|||||||
|
|
||||||
if allow_partial or not isinstance(o, partial):
|
if allow_partial or not isinstance(o, partial):
|
||||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||||
return pd.DataFrame(o if isinstance(o, list) else [o])
|
return pd.DataFrame(o if isinstance(o, list) else [o]).dropna()
|
||||||
return self._param.output_var_name, o
|
return self._param.output_var_name, o
|
||||||
|
|
||||||
outs = None
|
outs = None
|
||||||
for oo in o():
|
for oo in o():
|
||||||
if not isinstance(oo, pd.DataFrame):
|
if not isinstance(oo, pd.DataFrame):
|
||||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo]).dropna()
|
||||||
else:
|
else:
|
||||||
outs = oo
|
outs = oo.dropna()
|
||||||
return self._param.output_var_name, outs
|
return self._param.output_var_name, outs
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
@ -463,6 +470,8 @@ class ComponentBase(ABC):
|
|||||||
if len(self._canvas.path) > 1:
|
if len(self._canvas.path) > 1:
|
||||||
reversed_cpnts.extend(self._canvas.path[-2])
|
reversed_cpnts.extend(self._canvas.path[-2])
|
||||||
reversed_cpnts.extend(self._canvas.path[-1])
|
reversed_cpnts.extend(self._canvas.path[-1])
|
||||||
|
up_cpns = self.get_upstream()
|
||||||
|
reversed_up_cpnts = [cpn for cpn in reversed_cpnts if cpn in up_cpns]
|
||||||
|
|
||||||
if self._param.query:
|
if self._param.query:
|
||||||
self._param.inputs = []
|
self._param.inputs = []
|
||||||
@ -484,7 +493,7 @@ class ComponentBase(ABC):
|
|||||||
if q["component_id"].lower().find("answer") == 0:
|
if q["component_id"].lower().find("answer") == 0:
|
||||||
txt = []
|
txt = []
|
||||||
for r, c in self._canvas.history[::-1][:self._param.message_history_window_size][::-1]:
|
for r, c in self._canvas.history[::-1][:self._param.message_history_window_size][::-1]:
|
||||||
txt.append(f"{r.upper()}: {c}")
|
txt.append(f"{r.upper()}:{c}")
|
||||||
txt = "\n".join(txt)
|
txt = "\n".join(txt)
|
||||||
self._param.inputs.append({"content": txt, "component_id": q["component_id"]})
|
self._param.inputs.append({"content": txt, "component_id": q["component_id"]})
|
||||||
outs.append(pd.DataFrame([{"content": txt}]))
|
outs.append(pd.DataFrame([{"content": txt}]))
|
||||||
@ -505,7 +514,7 @@ class ComponentBase(ABC):
|
|||||||
|
|
||||||
upstream_outs = []
|
upstream_outs = []
|
||||||
|
|
||||||
for u in reversed_cpnts[::-1]:
|
for u in reversed_up_cpnts[::-1]:
|
||||||
if self.get_component_name(u) in ["switch", "concentrator"]:
|
if self.get_component_name(u) in ["switch", "concentrator"]:
|
||||||
continue
|
continue
|
||||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||||
@ -545,7 +554,7 @@ class ComponentBase(ABC):
|
|||||||
return df
|
return df
|
||||||
|
|
||||||
def get_input_elements(self):
|
def get_input_elements(self):
|
||||||
assert self._param.query, "Please identify input parameters firstly."
|
assert self._param.query, "Please verify the input parameters first."
|
||||||
eles = []
|
eles = []
|
||||||
for q in self._param.query:
|
for q in self._param.query:
|
||||||
if q.get("component_id"):
|
if q.get("component_id"):
|
||||||
@ -555,7 +564,7 @@ class ComponentBase(ABC):
|
|||||||
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
eles.append({"name": self._canvas.get_compnent_name(cpn_id), "key": cpn_id})
|
eles.append({"name": self._canvas.get_component_name(cpn_id), "key": cpn_id})
|
||||||
else:
|
else:
|
||||||
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||||
return eles
|
return eles
|
||||||
@ -565,8 +574,10 @@ class ComponentBase(ABC):
|
|||||||
if len(self._canvas.path) > 1:
|
if len(self._canvas.path) > 1:
|
||||||
reversed_cpnts.extend(self._canvas.path[-2])
|
reversed_cpnts.extend(self._canvas.path[-2])
|
||||||
reversed_cpnts.extend(self._canvas.path[-1])
|
reversed_cpnts.extend(self._canvas.path[-1])
|
||||||
|
up_cpns = self.get_upstream()
|
||||||
|
reversed_up_cpnts = [cpn for cpn in reversed_cpnts if cpn in up_cpns]
|
||||||
|
|
||||||
for u in reversed_cpnts[::-1]:
|
for u in reversed_up_cpnts[::-1]:
|
||||||
if self.get_component_name(u) in ["switch", "answer"]:
|
if self.get_component_name(u) in ["switch", "answer"]:
|
||||||
continue
|
continue
|
||||||
return self._canvas.get_component(u)["obj"].output()[1]
|
return self._canvas.get_component(u)["obj"].output()[1]
|
||||||
@ -584,3 +595,7 @@ class ComponentBase(ABC):
|
|||||||
def get_parent(self):
|
def get_parent(self):
|
||||||
pid = self._canvas.get_component(self._id)["parent_id"]
|
pid = self._canvas.get_component(self._id)["parent_id"]
|
||||||
return self._canvas.get_component(pid)["obj"]
|
return self._canvas.get_component(pid)["obj"]
|
||||||
|
|
||||||
|
def get_upstream(self):
|
||||||
|
cpn_nms = self._canvas.get_component(self._id)['upstream']
|
||||||
|
return cpn_nms
|
||||||
|
|||||||
@ -50,26 +50,29 @@ class CategorizeParam(GenerateParam):
|
|||||||
for c, desc in self.category_description.items():
|
for c, desc in self.category_description.items():
|
||||||
if desc.get("description"):
|
if desc.get("description"):
|
||||||
descriptions.append(
|
descriptions.append(
|
||||||
"--------------------\nCategory: {}\nDescription: {}\n".format(c, desc["description"]))
|
"\nCategory: {}\nDescription: {}".format(c, desc["description"]))
|
||||||
|
|
||||||
self.prompt = """
|
self.prompt = """
|
||||||
You're a text classifier. You need to categorize the user’s questions into {} categories,
|
Role: You're a text classifier.
|
||||||
namely: {}
|
Task: You need to categorize the user’s questions into {} categories, namely: {}
|
||||||
Here's description of each category:
|
|
||||||
{}
|
|
||||||
|
|
||||||
You could learn from the following examples:
|
Here's description of each category:
|
||||||
{}
|
{}
|
||||||
You could learn from the above examples.
|
|
||||||
Just mention the category names, no need for any additional words.
|
You could learn from the following examples:
|
||||||
|
{}
|
||||||
---- Real Data ----
|
You could learn from the above examples.
|
||||||
{}
|
|
||||||
|
Requirements:
|
||||||
|
- Just mention the category names, no need for any additional words.
|
||||||
|
|
||||||
|
---- Real Data ----
|
||||||
|
USER: {}\n
|
||||||
""".format(
|
""".format(
|
||||||
len(self.category_description.keys()),
|
len(self.category_description.keys()),
|
||||||
"/".join(list(self.category_description.keys())),
|
"/".join(list(self.category_description.keys())),
|
||||||
"\n".join(descriptions),
|
"\n".join(descriptions),
|
||||||
"- ".join(cate_lines),
|
"\n\n- ".join(cate_lines),
|
||||||
chat_hist
|
chat_hist
|
||||||
)
|
)
|
||||||
return self.prompt
|
return self.prompt
|
||||||
@ -80,18 +83,26 @@ class Categorize(Generate, ABC):
|
|||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
input = self.get_input()
|
input = self.get_input()
|
||||||
|
input = " - ".join(input["content"]) if "content" in input else ""
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
logging.debug(f"input: {input}, answer: {str(ans)}")
|
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||||
|
# Count the number of times each category appears in the answer.
|
||||||
|
category_counts = {}
|
||||||
for c in self._param.category_description.keys():
|
for c in self._param.category_description.keys():
|
||||||
if ans.lower().find(c.lower()) >= 0:
|
count = ans.lower().count(c.lower())
|
||||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
category_counts[c] = count
|
||||||
|
|
||||||
|
# If a category is found, return the category with the highest count.
|
||||||
|
if any(category_counts.values()):
|
||||||
|
max_category = max(category_counts.items(), key=lambda x: x[1])
|
||||||
|
return Categorize.be_output(self._param.category_description[max_category[0]]["to"])
|
||||||
|
|
||||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||||
|
|
||||||
def debug(self, **kwargs):
|
def debug(self, **kwargs):
|
||||||
df = self._run([], **kwargs)
|
df = self._run([], **kwargs)
|
||||||
cpn_id = df.iloc[0, 0]
|
cpn_id = df.iloc[0, 0]
|
||||||
return Categorize.be_output(self._canvas.get_compnent_name(cpn_id))
|
return Categorize.be_output(self._canvas.get_component_name(cpn_id))
|
||||||
|
|
||||||
|
|||||||
@ -82,7 +82,10 @@ class Email(ComponentBase, ABC):
|
|||||||
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
||||||
|
|
||||||
context = smtplib.ssl.create_default_context()
|
context = smtplib.ssl.create_default_context()
|
||||||
with smtplib.SMTP_SSL(self._param.smtp_server, self._param.smtp_port, context=context) as server:
|
with smtplib.SMTP(self._param.smtp_server, self._param.smtp_port) as server:
|
||||||
|
server.ehlo()
|
||||||
|
server.starttls(context=context)
|
||||||
|
server.ehlo()
|
||||||
# Login
|
# Login
|
||||||
logging.info(f"Attempting to login with email: {self._param.email}")
|
logging.info(f"Attempting to login with email: {self._param.email}")
|
||||||
server.login(self._param.email, self._param.password)
|
server.login(self._param.email, self._param.password)
|
||||||
|
|||||||
@ -52,15 +52,16 @@ class ExeSQLParam(GenerateParam):
|
|||||||
self.check_positive_integer(self.top_n, "Number of records")
|
self.check_positive_integer(self.top_n, "Number of records")
|
||||||
if self.database == "rag_flow":
|
if self.database == "rag_flow":
|
||||||
if self.host == "ragflow-mysql":
|
if self.host == "ragflow-mysql":
|
||||||
raise ValueError("The host is not accessible.")
|
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||||
if self.password == "infini_rag_flow":
|
if self.password == "infini_rag_flow":
|
||||||
raise ValueError("The host is not accessible.")
|
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||||
|
|
||||||
|
|
||||||
class ExeSQL(Generate, ABC):
|
class ExeSQL(Generate, ABC):
|
||||||
component_name = "ExeSQL"
|
component_name = "ExeSQL"
|
||||||
|
|
||||||
def _refactor(self,ans):
|
def _refactor(self, ans):
|
||||||
|
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||||
if match:
|
if match:
|
||||||
ans = match.group(1) # Query content
|
ans = match.group(1) # Query content
|
||||||
@ -78,7 +79,6 @@ class ExeSQL(Generate, ABC):
|
|||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||||
ans = self._refactor(ans)
|
ans = self._refactor(ans)
|
||||||
logging.info("db_type: ",self._param.db_type)
|
|
||||||
if self._param.db_type in ["mysql", "mariadb"]:
|
if self._param.db_type in ["mysql", "mariadb"]:
|
||||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||||
port=self._param.port, password=self._param.password)
|
port=self._param.port, password=self._param.password)
|
||||||
@ -87,11 +87,11 @@ class ExeSQL(Generate, ABC):
|
|||||||
port=self._param.port, password=self._param.password)
|
port=self._param.port, password=self._param.password)
|
||||||
elif self._param.db_type == 'mssql':
|
elif self._param.db_type == 'mssql':
|
||||||
conn_str = (
|
conn_str = (
|
||||||
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
||||||
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
||||||
r'DATABASE=' + self._param.database + ';'
|
r'DATABASE=' + self._param.database + ';'
|
||||||
r'UID=' + self._param.username + ';'
|
r'UID=' + self._param.username + ';'
|
||||||
r'PWD=' + self._param.password
|
r'PWD=' + self._param.password
|
||||||
)
|
)
|
||||||
db = pyodbc.connect(conn_str)
|
db = pyodbc.connect(conn_str)
|
||||||
try:
|
try:
|
||||||
@ -101,51 +101,50 @@ class ExeSQL(Generate, ABC):
|
|||||||
if not hasattr(self, "_loop"):
|
if not hasattr(self, "_loop"):
|
||||||
setattr(self, "_loop", 0)
|
setattr(self, "_loop", 0)
|
||||||
self._loop += 1
|
self._loop += 1
|
||||||
input_list=re.split(r';', ans.replace(r"\n", " "))
|
input_list = re.split(r';', ans.replace(r"\n", " "))
|
||||||
sql_res = []
|
sql_res = []
|
||||||
for i in range(len(input_list)):
|
for i in range(len(input_list)):
|
||||||
single_sql=input_list[i]
|
single_sql = input_list[i]
|
||||||
while self._loop <= self._param.loop:
|
while self._loop <= self._param.loop:
|
||||||
self._loop+=1
|
self._loop += 1
|
||||||
if not single_sql:
|
if not single_sql:
|
||||||
break
|
break
|
||||||
try:
|
try:
|
||||||
logging.info("single_sql: ", single_sql)
|
|
||||||
cursor.execute(single_sql)
|
cursor.execute(single_sql)
|
||||||
if cursor.rowcount == 0:
|
if cursor.rowcount == 0:
|
||||||
sql_res.append({"content": "No record in the database!"})
|
sql_res.append({"content": "No record in the database!"})
|
||||||
break
|
break
|
||||||
if self._param.db_type == 'mssql':
|
if self._param.db_type == 'mssql':
|
||||||
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),columns = [desc[0] for desc in cursor.description])
|
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),
|
||||||
|
columns=[desc[0] for desc in cursor.description])
|
||||||
else:
|
else:
|
||||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||||
single_res.columns = [i[0] for i in cursor.description]
|
single_res.columns = [i[0] for i in cursor.description]
|
||||||
sql_res.append({"content": single_res.to_markdown()})
|
sql_res.append({"content": single_res.to_markdown(index=False, floatfmt=".6f")})
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
single_sql = self._regenerate_sql(single_sql, str(e), **kwargs)
|
single_sql = self._regenerate_sql(single_sql, str(e), **kwargs)
|
||||||
single_sql = self._refactor(single_sql)
|
single_sql = self._refactor(single_sql)
|
||||||
if self._loop > self._param.loop:
|
if self._loop > self._param.loop:
|
||||||
sql_res.append({"content": "Can't query the correct data via SQL statement."})
|
sql_res.append({"content": "Can't query the correct data via SQL statement."})
|
||||||
# raise Exception("Maximum loop time exceeds. Can't query the correct data via SQL statement.")
|
|
||||||
db.close()
|
db.close()
|
||||||
if not sql_res:
|
if not sql_res:
|
||||||
return ExeSQL.be_output("")
|
return ExeSQL.be_output("")
|
||||||
return pd.DataFrame(sql_res)
|
return pd.DataFrame(sql_res)
|
||||||
|
|
||||||
def _regenerate_sql(self, failed_sql, error_message,**kwargs):
|
def _regenerate_sql(self, failed_sql, error_message, **kwargs):
|
||||||
prompt = f'''
|
prompt = f'''
|
||||||
## You are the Repair SQL Statement Helper, please modify the original SQL statement based on the SQL query error report.
|
## You are the Repair SQL Statement Helper, please modify the original SQL statement based on the SQL query error report.
|
||||||
## The original SQL statement is as follows:{failed_sql}.
|
## The original SQL statement is as follows:{failed_sql}.
|
||||||
## The contents of the SQL query error report is as follows:{error_message}.
|
## The contents of the SQL query error report is as follows:{error_message}.
|
||||||
## Answer only the modified SQL statement. Please do not give any explanation, just answer the code.
|
## Answer only the modified SQL statement. Please do not give any explanation, just answer the code.
|
||||||
'''
|
'''
|
||||||
self._param.prompt=prompt
|
self._param.prompt = prompt
|
||||||
kwargs_ = deepcopy(kwargs)
|
kwargs_ = deepcopy(kwargs)
|
||||||
kwargs_["stream"] = False
|
kwargs_["stream"] = False
|
||||||
response = Generate._run(self, [], **kwargs_)
|
response = Generate._run(self, [], **kwargs_)
|
||||||
try:
|
try:
|
||||||
regenerated_sql = response.loc[0,"content"]
|
regenerated_sql = response.loc[0, "content"]
|
||||||
return regenerated_sql
|
return regenerated_sql
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Failed to regenerate SQL: {e}")
|
logging.error(f"Failed to regenerate SQL: {e}")
|
||||||
|
|||||||
@ -13,15 +13,16 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
from functools import partial
|
from functools import partial
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.conversation_service import structure_answer
|
from api.db.services.conversation_service import structure_answer
|
||||||
from api.db.services.dialog_service import message_fit_in
|
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api import settings
|
from api import settings
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from rag.prompts import message_fit_in
|
||||||
|
|
||||||
|
|
||||||
class GenerateParam(ComponentParamBase):
|
class GenerateParam(ComponentParamBase):
|
||||||
@ -69,36 +70,35 @@ class Generate(ComponentBase):
|
|||||||
component_name = "Generate"
|
component_name = "Generate"
|
||||||
|
|
||||||
def get_dependent_components(self):
|
def get_dependent_components(self):
|
||||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
inputs = self.get_input_elements()
|
||||||
if para.get("component_id") \
|
cpnts = set([i["key"] for i in inputs[1:] if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||||
and para["component_id"].lower().find("answer") < 0 \
|
|
||||||
and para["component_id"].lower().find("begin") < 0])
|
|
||||||
return list(cpnts)
|
return list(cpnts)
|
||||||
|
|
||||||
def set_cite(self, retrieval_res, answer):
|
def set_cite(self, retrieval_res, answer):
|
||||||
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
|
||||||
if "empty_response" in retrieval_res.columns:
|
if "empty_response" in retrieval_res.columns:
|
||||||
retrieval_res["empty_response"].fillna("", inplace=True)
|
retrieval_res["empty_response"].fillna("", inplace=True)
|
||||||
|
chunks = json.loads(retrieval_res["chunks"][0])
|
||||||
answer, idx = settings.retrievaler.insert_citations(answer,
|
answer, idx = settings.retrievaler.insert_citations(answer,
|
||||||
[ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
[ck["content_ltks"] for ck in chunks],
|
||||||
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
[ck["vector"] for ck in chunks],
|
||||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||||
self._canvas.get_embedding_model()), tkweight=0.7,
|
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||||
vtweight=0.3)
|
vtweight=0.3)
|
||||||
doc_ids = set([])
|
doc_ids = set([])
|
||||||
recall_docs = []
|
recall_docs = []
|
||||||
for i in idx:
|
for i in idx:
|
||||||
did = retrieval_res.loc[int(i), "doc_id"]
|
did = chunks[int(i)]["doc_id"]
|
||||||
if did in doc_ids:
|
if did in doc_ids:
|
||||||
continue
|
continue
|
||||||
doc_ids.add(did)
|
doc_ids.add(did)
|
||||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
recall_docs.append({"doc_id": did, "doc_name": chunks[int(i)]["docnm_kwd"]})
|
||||||
|
|
||||||
del retrieval_res["vector"]
|
for c in chunks:
|
||||||
del retrieval_res["content_ltks"]
|
del c["vector"]
|
||||||
|
del c["content_ltks"]
|
||||||
|
|
||||||
reference = {
|
reference = {
|
||||||
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
"chunks": chunks,
|
||||||
"doc_aggs": recall_docs
|
"doc_aggs": recall_docs
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,10 +110,26 @@ class Generate(ComponentBase):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
def get_input_elements(self):
|
def get_input_elements(self):
|
||||||
if self._param.parameters:
|
key_set = set([])
|
||||||
return [{"key": "user", "name": "Input your question here:"}, *self._param.parameters]
|
res = [{"key": "user", "name": "Input your question here:"}]
|
||||||
|
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.prompt, flags=re.IGNORECASE):
|
||||||
return [{"key": "user", "name": "Input your question here:"}]
|
cpn_id = r.group(1)
|
||||||
|
if cpn_id in key_set:
|
||||||
|
continue
|
||||||
|
if cpn_id.lower().find("begin@") == 0:
|
||||||
|
cpn_id, key = cpn_id.split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] != key:
|
||||||
|
continue
|
||||||
|
res.append({"key": r.group(1), "name": p["name"]})
|
||||||
|
key_set.add(r.group(1))
|
||||||
|
continue
|
||||||
|
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||||
|
if not cpn_nm:
|
||||||
|
continue
|
||||||
|
res.append({"key": cpn_id, "name": cpn_nm})
|
||||||
|
key_set.add(cpn_id)
|
||||||
|
return res
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||||
@ -121,22 +137,20 @@ class Generate(ComponentBase):
|
|||||||
|
|
||||||
retrieval_res = []
|
retrieval_res = []
|
||||||
self._param.inputs = []
|
self._param.inputs = []
|
||||||
for para in self._param.parameters:
|
for para in self.get_input_elements()[1:]:
|
||||||
if not para.get("component_id"):
|
if para["key"].lower().find("begin@") == 0:
|
||||||
continue
|
cpn_id, key = para["key"].split("@")
|
||||||
component_id = para["component_id"].split("@")[0]
|
|
||||||
if para["component_id"].lower().find("@") >= 0:
|
|
||||||
cpn_id, key = para["component_id"].split("@")
|
|
||||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
if p["key"] == key:
|
if p["key"] == key:
|
||||||
kwargs[para["key"]] = p.get("value", "")
|
kwargs[para["key"]] = p.get("value", "")
|
||||||
self._param.inputs.append(
|
self._param.inputs.append(
|
||||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
{"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
component_id = para["key"]
|
||||||
cpn = self._canvas.get_component(component_id)["obj"]
|
cpn = self._canvas.get_component(component_id)["obj"]
|
||||||
if cpn.component_name.lower() == "answer":
|
if cpn.component_name.lower() == "answer":
|
||||||
hist = self._canvas.get_history(1)
|
hist = self._canvas.get_history(1)
|
||||||
@ -152,8 +166,8 @@ class Generate(ComponentBase):
|
|||||||
else:
|
else:
|
||||||
if cpn.component_name.lower() == "retrieval":
|
if cpn.component_name.lower() == "retrieval":
|
||||||
retrieval_res.append(out)
|
retrieval_res.append(out)
|
||||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
kwargs[para["key"]] = " - " + "\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
self._param.inputs.append({"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||||
|
|
||||||
if retrieval_res:
|
if retrieval_res:
|
||||||
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||||
@ -175,19 +189,20 @@ class Generate(ComponentBase):
|
|||||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||||
|
|
||||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||||
return pd.DataFrame([res])
|
return pd.DataFrame([res])
|
||||||
|
|
||||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
if len(msg) < 1:
|
if len(msg) < 1:
|
||||||
msg.append({"role": "user", "content": ""})
|
msg.append({"role": "user", "content": "Output: "})
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||||
if len(msg) < 2:
|
if len(msg) < 2:
|
||||||
msg.append({"role": "user", "content": ""})
|
msg.append({"role": "user", "content": "Output: "})
|
||||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||||
|
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||||
|
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
if self._param.cite and "chunks" in retrieval_res.columns:
|
||||||
res = self.set_cite(retrieval_res, ans)
|
res = self.set_cite(retrieval_res, ans)
|
||||||
return pd.DataFrame([res])
|
return pd.DataFrame([res])
|
||||||
|
|
||||||
@ -196,25 +211,27 @@ class Generate(ComponentBase):
|
|||||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||||
res = None
|
res = None
|
||||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||||
yield res
|
yield res
|
||||||
self.set_output(res)
|
self.set_output(res)
|
||||||
return
|
return
|
||||||
|
|
||||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
|
if msg and msg[0]['role'] == 'assistant':
|
||||||
|
msg.pop(0)
|
||||||
if len(msg) < 1:
|
if len(msg) < 1:
|
||||||
msg.append({"role": "user", "content": ""})
|
msg.append({"role": "user", "content": "Output: "})
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||||
if len(msg) < 2:
|
if len(msg) < 2:
|
||||||
msg.append({"role": "user", "content": ""})
|
msg.append({"role": "user", "content": "Output: "})
|
||||||
answer = ""
|
answer = ""
|
||||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||||
res = {"content": ans, "reference": []}
|
res = {"content": ans, "reference": []}
|
||||||
answer = ans
|
answer = ans
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
if self._param.cite and "chunks" in retrieval_res.columns:
|
||||||
res = self.set_cite(retrieval_res, answer)
|
res = self.set_cite(retrieval_res, answer)
|
||||||
yield res
|
yield res
|
||||||
|
|
||||||
@ -230,5 +247,6 @@ class Generate(ComponentBase):
|
|||||||
for n, v in kwargs.items():
|
for n, v in kwargs.items():
|
||||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||||
|
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": kwargs.get("user", "")}], self._param.gen_conf())
|
u = kwargs.get("user")
|
||||||
|
ans = chat_mdl.chat(prompt, [{"role": "user", "content": u if u else "Output: "}], self._param.gen_conf())
|
||||||
return pd.DataFrame([ans])
|
return pd.DataFrame([ans])
|
||||||
|
|||||||
@ -35,12 +35,14 @@ class InvokeParam(ComponentParamBase):
|
|||||||
self.url = ""
|
self.url = ""
|
||||||
self.timeout = 60
|
self.timeout = 60
|
||||||
self.clean_html = False
|
self.clean_html = False
|
||||||
|
self.datatype = "json" # New parameter to determine data posting type
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
||||||
self.check_empty(self.url, "End point URL")
|
self.check_empty(self.url, "End point URL")
|
||||||
self.check_positive_integer(self.timeout, "Timeout time in second")
|
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||||
self.check_boolean(self.clean_html, "Clean HTML")
|
self.check_boolean(self.clean_html, "Clean HTML")
|
||||||
|
self.check_valid_value(self.datatype.lower(), "Data post type", ['json', 'formdata']) # Check for valid datapost value
|
||||||
|
|
||||||
|
|
||||||
class Invoke(ComponentBase, ABC):
|
class Invoke(ComponentBase, ABC):
|
||||||
@ -94,22 +96,36 @@ class Invoke(ComponentBase, ABC):
|
|||||||
return Invoke.be_output(response.text)
|
return Invoke.be_output(response.text)
|
||||||
|
|
||||||
if method == 'put':
|
if method == 'put':
|
||||||
response = requests.put(url=url,
|
if self._param.datatype.lower() == 'json':
|
||||||
data=args,
|
response = requests.put(url=url,
|
||||||
headers=headers,
|
json=args,
|
||||||
proxies=proxies,
|
headers=headers,
|
||||||
timeout=self._param.timeout)
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
|
else:
|
||||||
|
response = requests.put(url=url,
|
||||||
|
data=args,
|
||||||
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
if self._param.clean_html:
|
if self._param.clean_html:
|
||||||
sections = HtmlParser()(None, response.content)
|
sections = HtmlParser()(None, response.content)
|
||||||
return Invoke.be_output("\n".join(sections))
|
return Invoke.be_output("\n".join(sections))
|
||||||
return Invoke.be_output(response.text)
|
return Invoke.be_output(response.text)
|
||||||
|
|
||||||
if method == 'post':
|
if method == 'post':
|
||||||
response = requests.post(url=url,
|
if self._param.datatype.lower() == 'json':
|
||||||
json=args,
|
response = requests.post(url=url,
|
||||||
headers=headers,
|
json=args,
|
||||||
proxies=proxies,
|
headers=headers,
|
||||||
timeout=self._param.timeout)
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
|
else:
|
||||||
|
response = requests.post(url=url,
|
||||||
|
data=args,
|
||||||
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
|
timeout=self._param.timeout)
|
||||||
if self._param.clean_html:
|
if self._param.clean_html:
|
||||||
sections = HtmlParser()(None, response.content)
|
sections = HtmlParser()(None, response.content)
|
||||||
return Invoke.be_output("\n".join(sections))
|
return Invoke.be_output("\n".join(sections))
|
||||||
|
|||||||
@ -38,6 +38,10 @@ class IterationItem(ComponentBase, ABC):
|
|||||||
ans = parent.get_input()
|
ans = parent.get_input()
|
||||||
ans = parent._param.delimiter.join(ans["content"]) if "content" in ans else ""
|
ans = parent._param.delimiter.join(ans["content"]) if "content" in ans else ""
|
||||||
ans = [a.strip() for a in ans.split(parent._param.delimiter)]
|
ans = [a.strip() for a in ans.split(parent._param.delimiter)]
|
||||||
|
if not ans:
|
||||||
|
self._idx = -1
|
||||||
|
return pd.DataFrame()
|
||||||
|
|
||||||
df = pd.DataFrame([{"content": ans[self._idx]}])
|
df = pd.DataFrame([{"content": ans[self._idx]}])
|
||||||
self._idx += 1
|
self._idx += 1
|
||||||
if self._idx >= len(ans):
|
if self._idx >= len(ans):
|
||||||
|
|||||||
@ -57,9 +57,10 @@ class KeywordExtract(Generate, ABC):
|
|||||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": query}],
|
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": query}],
|
||||||
self._param.gen_conf())
|
self._param.gen_conf())
|
||||||
|
|
||||||
|
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||||
logging.debug(f"ans: {ans}")
|
logging.debug(f"ans: {ans}")
|
||||||
return KeywordExtract.be_output(ans)
|
return KeywordExtract.be_output(ans)
|
||||||
|
|
||||||
def debug(self, **kwargs):
|
def debug(self, **kwargs):
|
||||||
return self._run([], **kwargs)
|
return self._run([], **kwargs)
|
||||||
|
|||||||
@ -13,17 +13,20 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.dialog_service import label_question
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api import settings
|
from api import settings
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from rag.app.tag import label_question
|
||||||
|
from rag.prompts import kb_prompt
|
||||||
|
from rag.utils.tavily_conn import Tavily
|
||||||
|
|
||||||
|
|
||||||
class RetrievalParam(ComponentParamBase):
|
class RetrievalParam(ComponentParamBase):
|
||||||
@ -40,10 +43,12 @@ class RetrievalParam(ComponentParamBase):
|
|||||||
self.kb_ids = []
|
self.kb_ids = []
|
||||||
self.rerank_id = ""
|
self.rerank_id = ""
|
||||||
self.empty_response = ""
|
self.empty_response = ""
|
||||||
|
self.tavily_api_key = ""
|
||||||
|
self.use_kg = False
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keyword similarity weight")
|
||||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||||
|
|
||||||
|
|
||||||
@ -53,7 +58,6 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
query = self.get_input()
|
query = self.get_input()
|
||||||
query = str(query["content"][0]) if "content" in query else ""
|
query = str(query["content"][0]) if "content" in query else ""
|
||||||
|
|
||||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
return Retrieval.be_output("")
|
return Retrieval.be_output("")
|
||||||
@ -61,18 +65,38 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||||
|
|
||||||
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
embd_mdl = None
|
||||||
self._canvas.set_embedding_model(embd_nms[0])
|
if embd_nms:
|
||||||
|
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
||||||
|
self._canvas.set_embedding_model(embd_nms[0])
|
||||||
|
|
||||||
rerank_mdl = None
|
rerank_mdl = None
|
||||||
if self._param.rerank_id:
|
if self._param.rerank_id:
|
||||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||||
|
|
||||||
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
if kbs:
|
||||||
|
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||||
1, self._param.top_n,
|
1, self._param.top_n,
|
||||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||||
aggs=False, rerank_mdl=rerank_mdl,
|
aggs=False, rerank_mdl=rerank_mdl,
|
||||||
rank_feature=label_question(query, kbs))
|
rank_feature=label_question(query, kbs))
|
||||||
|
else:
|
||||||
|
kbinfos = {"chunks": [], "doc_aggs": []}
|
||||||
|
|
||||||
|
if self._param.use_kg and kbs:
|
||||||
|
ck = settings.kg_retrievaler.retrieval(query,
|
||||||
|
[kbs[0].tenant_id],
|
||||||
|
self._param.kb_ids,
|
||||||
|
embd_mdl,
|
||||||
|
LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
|
||||||
|
if ck["content_with_weight"]:
|
||||||
|
kbinfos["chunks"].insert(0, ck)
|
||||||
|
|
||||||
|
if self._param.tavily_api_key:
|
||||||
|
tav = Tavily(self._param.tavily_api_key)
|
||||||
|
tav_res = tav.retrieve_chunks(query)
|
||||||
|
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||||
|
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||||
|
|
||||||
if not kbinfos["chunks"]:
|
if not kbinfos["chunks"]:
|
||||||
df = Retrieval.be_output("")
|
df = Retrieval.be_output("")
|
||||||
@ -80,10 +104,8 @@ class Retrieval(ComponentBase, ABC):
|
|||||||
df["empty_response"] = self._param.empty_response
|
df["empty_response"] = self._param.empty_response
|
||||||
return df
|
return df
|
||||||
|
|
||||||
df = pd.DataFrame(kbinfos["chunks"])
|
df = pd.DataFrame({"content": kb_prompt(kbinfos, 200000), "chunks": json.dumps(kbinfos["chunks"])})
|
||||||
df["content"] = df["content_with_weight"]
|
|
||||||
del df["content_with_weight"]
|
|
||||||
logging.debug("{} {}".format(query, df))
|
logging.debug("{} {}".format(query, df))
|
||||||
return df
|
return df.dropna()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -13,93 +13,82 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import logging
|
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from api.db import LLMType
|
|
||||||
from api.db.services.llm_service import LLMBundle
|
|
||||||
from agent.component import GenerateParam, Generate
|
from agent.component import GenerateParam, Generate
|
||||||
|
from rag.prompts import full_question
|
||||||
|
|
||||||
|
|
||||||
class RewriteQuestionParam(GenerateParam):
|
class RewriteQuestionParam(GenerateParam):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Define the QuestionRewrite component parameters.
|
Define the QuestionRewrite component parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.temperature = 0.9
|
self.temperature = 0.9
|
||||||
self.prompt = ""
|
self.prompt = ""
|
||||||
|
self.language = ""
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
super().check()
|
super().check()
|
||||||
|
|
||||||
def get_prompt(self, conv):
|
|
||||||
self.prompt = """
|
|
||||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
|
||||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
|
||||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
|
||||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
|
||||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
|
||||||
And return 5 versions of question and one is from translation.
|
|
||||||
Just list the question. No other words are needed.
|
|
||||||
"""
|
|
||||||
return f"""
|
|
||||||
Role: A helpful assistant
|
|
||||||
Task: Generate a full user question that would follow the conversation.
|
|
||||||
Requirements & Restrictions:
|
|
||||||
- Text generated MUST be in the same language of the original user's question.
|
|
||||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
|
||||||
- DON'T generate anything except a refined question.
|
|
||||||
|
|
||||||
######################
|
|
||||||
-Examples-
|
|
||||||
######################
|
|
||||||
# Example 1
|
|
||||||
## Conversation
|
|
||||||
USER: What is the name of Donald Trump's father?
|
|
||||||
ASSISTANT: Fred Trump.
|
|
||||||
USER: And his mother?
|
|
||||||
###############
|
|
||||||
Output: What's the name of Donald Trump's mother?
|
|
||||||
------------
|
|
||||||
# Example 2
|
|
||||||
## Conversation
|
|
||||||
USER: What is the name of Donald Trump's father?
|
|
||||||
ASSISTANT: Fred Trump.
|
|
||||||
USER: And his mother?
|
|
||||||
ASSISTANT: Mary Trump.
|
|
||||||
User: What's her full name?
|
|
||||||
###############
|
|
||||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
|
||||||
######################
|
|
||||||
# Real Data
|
|
||||||
## Conversation
|
|
||||||
{conv}
|
|
||||||
###############
|
|
||||||
"""
|
|
||||||
return self.prompt
|
|
||||||
|
|
||||||
|
|
||||||
class RewriteQuestion(Generate, ABC):
|
class RewriteQuestion(Generate, ABC):
|
||||||
component_name = "RewriteQuestion"
|
component_name = "RewriteQuestion"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
hist = self._canvas.get_history(self._param.message_history_window_size)
|
hist = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
conv = []
|
query = self.get_input()
|
||||||
for m in hist:
|
query = str(query["content"][0]) if "content" in query else ""
|
||||||
if m["role"] not in ["user", "assistant"]:
|
messages = [h for h in hist if h["role"]!="system"]
|
||||||
continue
|
if messages[-1]["role"] != "user":
|
||||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
messages.append({"role": "user", "content": query})
|
||||||
conv = "\n".join(conv)
|
ans = full_question(self._canvas.get_tenant_id(), self._param.llm_id, messages, self.gen_lang(self._param.language))
|
||||||
|
|
||||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
|
||||||
ans = chat_mdl.chat(self._param.get_prompt(conv), [{"role": "user", "content": "Output: "}],
|
|
||||||
self._param.gen_conf())
|
|
||||||
self._canvas.history.pop()
|
self._canvas.history.pop()
|
||||||
self._canvas.history.append(("user", ans))
|
self._canvas.history.append(("user", ans))
|
||||||
|
|
||||||
logging.debug(ans)
|
|
||||||
return RewriteQuestion.be_output(ans)
|
return RewriteQuestion.be_output(ans)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def gen_lang(language):
|
||||||
|
# convert code lang to language word for the prompt
|
||||||
|
language_dict = {'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'ws': 'Samoan', 'am': 'Amharic',
|
||||||
|
'ar': 'Arabic', 'hy': 'Armenian', 'az': 'Azerbaijani', 'eu': 'Basque', 'be': 'Belarusian',
|
||||||
|
'bem': 'Bemba', 'bn': 'Bengali', 'bh': 'Bihari',
|
||||||
|
'xx-bork': 'Bork', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'bt': 'Bhutani',
|
||||||
|
'km': 'Cambodian', 'ca': 'Catalan', 'chr': 'Cherokee', 'ny': 'Chichewa', 'zh-cn': 'Chinese',
|
||||||
|
'zh-tw': 'Chinese', 'co': 'Corsican',
|
||||||
|
'hr': 'Croatian', 'cs': 'Czech', 'da': 'Danish', 'nl': 'Dutch', 'xx-elmer': 'Elmer',
|
||||||
|
'en': 'English', 'eo': 'Esperanto', 'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese',
|
||||||
|
'tl': 'Filipino', 'fi': 'Finnish', 'fr': 'French',
|
||||||
|
'fy': 'Frisian', 'gaa': 'Ga', 'gl': 'Galician', 'ka': 'Georgian', 'de': 'German',
|
||||||
|
'el': 'Greek', 'kl': 'Greenlandic', 'gn': 'Guarani', 'gu': 'Gujarati', 'xx-hacker': 'Hacker',
|
||||||
|
'ht': 'Haitian Creole', 'ha': 'Hausa', 'haw': 'Hawaiian',
|
||||||
|
'iw': 'Hebrew', 'hi': 'Hindi', 'hu': 'Hungarian', 'is': 'Icelandic', 'ig': 'Igbo',
|
||||||
|
'id': 'Indonesian', 'ia': 'Interlingua', 'ga': 'Irish', 'it': 'Italian', 'ja': 'Japanese',
|
||||||
|
'jw': 'Javanese', 'kn': 'Kannada', 'kk': 'Kazakh', 'rw': 'Kinyarwanda',
|
||||||
|
'rn': 'Kirundi', 'xx-klingon': 'Klingon', 'kg': 'Kongo', 'ko': 'Korean', 'kri': 'Krio',
|
||||||
|
'ku': 'Kurdish', 'ckb': 'Kurdish (Sorani)', 'ky': 'Kyrgyz', 'lo': 'Laothian', 'la': 'Latin',
|
||||||
|
'lv': 'Latvian', 'ln': 'Lingala', 'lt': 'Lithuanian',
|
||||||
|
'loz': 'Lozi', 'lg': 'Luganda', 'ach': 'Luo', 'mk': 'Macedonian', 'mg': 'Malagasy',
|
||||||
|
'ms': 'Malay', 'ml': 'Malayalam', 'mt': 'Maltese', 'mv': 'Maldivian', 'mi': 'Maori',
|
||||||
|
'mr': 'Marathi', 'mfe': 'Mauritian Creole', 'mo': 'Moldavian', 'mn': 'Mongolian',
|
||||||
|
'sr-me': 'Montenegrin', 'my': 'Burmese', 'ne': 'Nepali', 'pcm': 'Nigerian Pidgin',
|
||||||
|
'nso': 'Northern Sotho', 'no': 'Norwegian', 'nn': 'Norwegian Nynorsk', 'oc': 'Occitan',
|
||||||
|
'or': 'Oriya', 'om': 'Oromo', 'ps': 'Pashto', 'fa': 'Persian',
|
||||||
|
'xx-pirate': 'Pirate', 'pl': 'Polish', 'pt': 'Portuguese', 'pt-br': 'Portuguese (Brazilian)',
|
||||||
|
'pt-pt': 'Portuguese (Portugal)', 'pa': 'Punjabi', 'qu': 'Quechua', 'ro': 'Romanian',
|
||||||
|
'rm': 'Romansh', 'nyn': 'Runyankole', 'ru': 'Russian', 'gd': 'Scots Gaelic',
|
||||||
|
'sr': 'Serbian', 'sh': 'Serbo-Croatian', 'st': 'Sesotho', 'tn': 'Setswana',
|
||||||
|
'crs': 'Seychellois Creole', 'sn': 'Shona', 'sd': 'Sindhi', 'si': 'Sinhalese', 'sk': 'Slovak',
|
||||||
|
'sl': 'Slovenian', 'so': 'Somali', 'es': 'Spanish', 'es-419': 'Spanish (Latin America)',
|
||||||
|
'su': 'Sundanese',
|
||||||
|
'sw': 'Swahili', 'sv': 'Swedish', 'tg': 'Tajik', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu',
|
||||||
|
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tongan', 'lua': 'Tshiluba', 'tum': 'Tumbuka',
|
||||||
|
'tr': 'Turkish', 'tk': 'Turkmen', 'tw': 'Twi',
|
||||||
|
'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 'vu': 'Vanuatu',
|
||||||
|
'vi': 'Vietnamese', 'cy': 'Welsh', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish',
|
||||||
|
'yo': 'Yoruba', 'zu': 'Zulu'}
|
||||||
|
if language in language_dict:
|
||||||
|
return language_dict[language]
|
||||||
|
else:
|
||||||
|
return ""
|
||||||
|
|||||||
@ -54,7 +54,7 @@ class Switch(ComponentBase, ABC):
|
|||||||
for item in cond["items"]:
|
for item in cond["items"]:
|
||||||
if not item["cpn_id"]:
|
if not item["cpn_id"]:
|
||||||
continue
|
continue
|
||||||
if item["cpn_id"].find("begin") >= 0:
|
if item["cpn_id"].lower().find("begin") >= 0 or item["cpn_id"].lower().find("answer") >= 0:
|
||||||
continue
|
continue
|
||||||
cid = item["cpn_id"].split("@")[0]
|
cid = item["cpn_id"].split("@")[0]
|
||||||
res.append(cid)
|
res.append(cid)
|
||||||
@ -75,7 +75,7 @@ class Switch(ComponentBase, ABC):
|
|||||||
res.append(self.process_operator(p.get("value",""), item["operator"], item.get("value", "")))
|
res.append(self.process_operator(p.get("value",""), item["operator"], item.get("value", "")))
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
out = self._canvas.get_component(cid)["obj"].output()[1]
|
out = self._canvas.get_component(cid)["obj"].output(allow_partial=False)[1]
|
||||||
cpn_input = "" if "content" not in out.columns else " ".join([str(s) for s in out["content"]])
|
cpn_input = "" if "content" not in out.columns else " ".join([str(s) for s in out["content"]])
|
||||||
res.append(self.process_operator(cpn_input, item["operator"], item.get("value", "")))
|
res.append(self.process_operator(cpn_input, item["operator"], item.get("value", "")))
|
||||||
|
|
||||||
|
|||||||
@ -38,27 +38,39 @@ class Template(ComponentBase):
|
|||||||
component_name = "Template"
|
component_name = "Template"
|
||||||
|
|
||||||
def get_dependent_components(self):
|
def get_dependent_components(self):
|
||||||
cpnts = set(
|
inputs = self.get_input_elements()
|
||||||
[
|
cpnts = set([i["key"] for i in inputs if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||||
para["component_id"].split("@")[0]
|
|
||||||
for para in self._param.parameters
|
|
||||||
if para.get("component_id")
|
|
||||||
and para["component_id"].lower().find("answer") < 0
|
|
||||||
and para["component_id"].lower().find("begin") < 0
|
|
||||||
]
|
|
||||||
)
|
|
||||||
return list(cpnts)
|
return list(cpnts)
|
||||||
|
|
||||||
|
def get_input_elements(self):
|
||||||
|
key_set = set([])
|
||||||
|
res = []
|
||||||
|
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.content, flags=re.IGNORECASE):
|
||||||
|
cpn_id = r.group(1)
|
||||||
|
if cpn_id in key_set:
|
||||||
|
continue
|
||||||
|
if cpn_id.lower().find("begin@") == 0:
|
||||||
|
cpn_id, key = cpn_id.split("@")
|
||||||
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
|
if p["key"] != key:
|
||||||
|
continue
|
||||||
|
res.append({"key": r.group(1), "name": p["name"]})
|
||||||
|
key_set.add(r.group(1))
|
||||||
|
continue
|
||||||
|
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||||
|
if not cpn_nm:
|
||||||
|
continue
|
||||||
|
res.append({"key": cpn_id, "name": cpn_nm})
|
||||||
|
key_set.add(cpn_id)
|
||||||
|
return res
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
content = self._param.content
|
content = self._param.content
|
||||||
|
|
||||||
self._param.inputs = []
|
self._param.inputs = []
|
||||||
for para in self._param.parameters:
|
for para in self.get_input_elements():
|
||||||
if not para.get("component_id"):
|
if para["key"].lower().find("begin@") == 0:
|
||||||
continue
|
cpn_id, key = para["key"].split("@")
|
||||||
component_id = para["component_id"].split("@")[0]
|
|
||||||
if para["component_id"].lower().find("@") >= 0:
|
|
||||||
cpn_id, key = para["component_id"].split("@")
|
|
||||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||||
if p["key"] == key:
|
if p["key"] == key:
|
||||||
value = p.get("value", "")
|
value = p.get("value", "")
|
||||||
@ -68,6 +80,7 @@ class Template(ComponentBase):
|
|||||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
component_id = para["key"]
|
||||||
cpn = self._canvas.get_component(component_id)["obj"]
|
cpn = self._canvas.get_component(component_id)["obj"]
|
||||||
if cpn.component_name.lower() == "answer":
|
if cpn.component_name.lower() == "answer":
|
||||||
hist = self._canvas.get_history(1)
|
hist = self._canvas.get_history(1)
|
||||||
@ -96,16 +109,14 @@ class Template(ComponentBase):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
for n, v in kwargs.items():
|
for n, v in kwargs.items():
|
||||||
try:
|
if not isinstance(v, str):
|
||||||
v = json.dumps(v, ensure_ascii=False)
|
try:
|
||||||
except Exception:
|
v = json.dumps(v, ensure_ascii=False)
|
||||||
pass
|
except Exception:
|
||||||
|
pass
|
||||||
content = re.sub(
|
content = re.sub(
|
||||||
r"\{%s\}" % re.escape(n), v, content
|
r"\{%s\}" % re.escape(n), v, content
|
||||||
)
|
)
|
||||||
content = re.sub(
|
|
||||||
r"(\\\"|\")", "", content
|
|
||||||
)
|
|
||||||
content = re.sub(
|
content = re.sub(
|
||||||
r"(#+)", r" \1 ", content
|
r"(#+)", r" \1 ", content
|
||||||
)
|
)
|
||||||
@ -114,7 +125,7 @@ class Template(ComponentBase):
|
|||||||
|
|
||||||
def make_kwargs(self, para, kwargs, value):
|
def make_kwargs(self, para, kwargs, value):
|
||||||
self._param.inputs.append(
|
self._param.inputs.append(
|
||||||
{"component_id": para["component_id"], "content": value}
|
{"component_id": para["key"], "content": value}
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
value = json.loads(value)
|
value = json.loads(value)
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
agentic_reasoning/__init__.py
Normal file
1
agentic_reasoning/__init__.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
from .deep_research import DeepResearcher as DeepResearcher
|
||||||
223
agentic_reasoning/deep_research.py
Normal file
223
agentic_reasoning/deep_research.py
Normal file
@ -0,0 +1,223 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from functools import partial
|
||||||
|
from agentic_reasoning.prompts import BEGIN_SEARCH_QUERY, BEGIN_SEARCH_RESULT, END_SEARCH_RESULT, MAX_SEARCH_LIMIT, \
|
||||||
|
END_SEARCH_QUERY, REASON_PROMPT, RELEVANT_EXTRACTION_PROMPT
|
||||||
|
from api.db.services.llm_service import LLMBundle
|
||||||
|
from rag.nlp import extract_between
|
||||||
|
from rag.prompts import kb_prompt
|
||||||
|
from rag.utils.tavily_conn import Tavily
|
||||||
|
|
||||||
|
|
||||||
|
class DeepResearcher:
|
||||||
|
def __init__(self,
|
||||||
|
chat_mdl: LLMBundle,
|
||||||
|
prompt_config: dict,
|
||||||
|
kb_retrieve: partial = None,
|
||||||
|
kg_retrieve: partial = None
|
||||||
|
):
|
||||||
|
self.chat_mdl = chat_mdl
|
||||||
|
self.prompt_config = prompt_config
|
||||||
|
self._kb_retrieve = kb_retrieve
|
||||||
|
self._kg_retrieve = kg_retrieve
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _remove_query_tags(text):
|
||||||
|
"""Remove query tags from text"""
|
||||||
|
pattern = re.escape(BEGIN_SEARCH_QUERY) + r"(.*?)" + re.escape(END_SEARCH_QUERY)
|
||||||
|
return re.sub(pattern, "", text)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _remove_result_tags(text):
|
||||||
|
"""Remove result tags from text"""
|
||||||
|
pattern = re.escape(BEGIN_SEARCH_RESULT) + r"(.*?)" + re.escape(END_SEARCH_RESULT)
|
||||||
|
return re.sub(pattern, "", text)
|
||||||
|
|
||||||
|
def _generate_reasoning(self, msg_history):
|
||||||
|
"""Generate reasoning steps"""
|
||||||
|
query_think = ""
|
||||||
|
if msg_history[-1]["role"] != "user":
|
||||||
|
msg_history.append({"role": "user", "content": "Continues reasoning with the new information.\n"})
|
||||||
|
else:
|
||||||
|
msg_history[-1]["content"] += "\n\nContinues reasoning with the new information.\n"
|
||||||
|
|
||||||
|
for ans in self.chat_mdl.chat_streamly(REASON_PROMPT, msg_history, {"temperature": 0.7}):
|
||||||
|
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||||
|
if not ans:
|
||||||
|
continue
|
||||||
|
query_think = ans
|
||||||
|
yield query_think
|
||||||
|
return query_think
|
||||||
|
|
||||||
|
def _extract_search_queries(self, query_think, question, step_index):
|
||||||
|
"""Extract search queries from thinking"""
|
||||||
|
queries = extract_between(query_think, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
|
||||||
|
if not queries and step_index == 0:
|
||||||
|
# If this is the first step and no queries are found, use the original question as the query
|
||||||
|
queries = [question]
|
||||||
|
return queries
|
||||||
|
|
||||||
|
def _truncate_previous_reasoning(self, all_reasoning_steps):
|
||||||
|
"""Truncate previous reasoning steps to maintain a reasonable length"""
|
||||||
|
truncated_prev_reasoning = ""
|
||||||
|
for i, step in enumerate(all_reasoning_steps):
|
||||||
|
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
|
||||||
|
|
||||||
|
prev_steps = truncated_prev_reasoning.split('\n\n')
|
||||||
|
if len(prev_steps) <= 5:
|
||||||
|
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
||||||
|
else:
|
||||||
|
truncated_prev_reasoning = ''
|
||||||
|
for i, step in enumerate(prev_steps):
|
||||||
|
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
||||||
|
truncated_prev_reasoning += step + '\n\n'
|
||||||
|
else:
|
||||||
|
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
||||||
|
truncated_prev_reasoning += '...\n\n'
|
||||||
|
|
||||||
|
return truncated_prev_reasoning.strip('\n')
|
||||||
|
|
||||||
|
def _retrieve_information(self, search_query):
|
||||||
|
"""Retrieve information from different sources"""
|
||||||
|
# 1. Knowledge base retrieval
|
||||||
|
kbinfos = self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
|
||||||
|
|
||||||
|
# 2. Web retrieval (if Tavily API is configured)
|
||||||
|
if self.prompt_config.get("tavily_api_key"):
|
||||||
|
tav = Tavily(self.prompt_config["tavily_api_key"])
|
||||||
|
tav_res = tav.retrieve_chunks(search_query)
|
||||||
|
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||||
|
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||||
|
|
||||||
|
# 3. Knowledge graph retrieval (if configured)
|
||||||
|
if self.prompt_config.get("use_kg") and self._kg_retrieve:
|
||||||
|
ck = self._kg_retrieve(question=search_query)
|
||||||
|
if ck["content_with_weight"]:
|
||||||
|
kbinfos["chunks"].insert(0, ck)
|
||||||
|
|
||||||
|
return kbinfos
|
||||||
|
|
||||||
|
def _update_chunk_info(self, chunk_info, kbinfos):
|
||||||
|
"""Update chunk information for citations"""
|
||||||
|
if not chunk_info["chunks"]:
|
||||||
|
# If this is the first retrieval, use the retrieval results directly
|
||||||
|
for k in chunk_info.keys():
|
||||||
|
chunk_info[k] = kbinfos[k]
|
||||||
|
else:
|
||||||
|
# Merge newly retrieved information, avoiding duplicates
|
||||||
|
cids = [c["chunk_id"] for c in chunk_info["chunks"]]
|
||||||
|
for c in kbinfos["chunks"]:
|
||||||
|
if c["chunk_id"] not in cids:
|
||||||
|
chunk_info["chunks"].append(c)
|
||||||
|
|
||||||
|
dids = [d["doc_id"] for d in chunk_info["doc_aggs"]]
|
||||||
|
for d in kbinfos["doc_aggs"]:
|
||||||
|
if d["doc_id"] not in dids:
|
||||||
|
chunk_info["doc_aggs"].append(d)
|
||||||
|
|
||||||
|
def _extract_relevant_info(self, truncated_prev_reasoning, search_query, kbinfos):
|
||||||
|
"""Extract and summarize relevant information"""
|
||||||
|
summary_think = ""
|
||||||
|
for ans in self.chat_mdl.chat_streamly(
|
||||||
|
RELEVANT_EXTRACTION_PROMPT.format(
|
||||||
|
prev_reasoning=truncated_prev_reasoning,
|
||||||
|
search_query=search_query,
|
||||||
|
document="\n".join(kb_prompt(kbinfos, 4096))
|
||||||
|
),
|
||||||
|
[{"role": "user",
|
||||||
|
"content": f'Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.'}],
|
||||||
|
{"temperature": 0.7}):
|
||||||
|
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||||
|
if not ans:
|
||||||
|
continue
|
||||||
|
summary_think = ans
|
||||||
|
yield summary_think
|
||||||
|
|
||||||
|
return summary_think
|
||||||
|
|
||||||
|
def thinking(self, chunk_info: dict, question: str):
|
||||||
|
executed_search_queries = []
|
||||||
|
msg_history = [{"role": "user", "content": f'Question:\"{question}\"\n'}]
|
||||||
|
all_reasoning_steps = []
|
||||||
|
think = "<think>"
|
||||||
|
|
||||||
|
for step_index in range(MAX_SEARCH_LIMIT + 1):
|
||||||
|
# Check if the maximum search limit has been reached
|
||||||
|
if step_index == MAX_SEARCH_LIMIT - 1:
|
||||||
|
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
||||||
|
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
all_reasoning_steps.append(summary_think)
|
||||||
|
msg_history.append({"role": "assistant", "content": summary_think})
|
||||||
|
break
|
||||||
|
|
||||||
|
# Step 1: Generate reasoning
|
||||||
|
query_think = ""
|
||||||
|
for ans in self._generate_reasoning(msg_history):
|
||||||
|
query_think = ans
|
||||||
|
yield {"answer": think + self._remove_query_tags(query_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
|
||||||
|
think += self._remove_query_tags(query_think)
|
||||||
|
all_reasoning_steps.append(query_think)
|
||||||
|
|
||||||
|
# Step 2: Extract search queries
|
||||||
|
queries = self._extract_search_queries(query_think, question, step_index)
|
||||||
|
if not queries and step_index > 0:
|
||||||
|
# If not the first step and no queries, end the search process
|
||||||
|
break
|
||||||
|
|
||||||
|
# Process each search query
|
||||||
|
for search_query in queries:
|
||||||
|
logging.info(f"[THINK]Query: {step_index}. {search_query}")
|
||||||
|
msg_history.append({"role": "assistant", "content": search_query})
|
||||||
|
think += f"\n\n> {step_index + 1}. {search_query}\n\n"
|
||||||
|
yield {"answer": think + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
|
||||||
|
# Check if the query has already been executed
|
||||||
|
if search_query in executed_search_queries:
|
||||||
|
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
||||||
|
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
all_reasoning_steps.append(summary_think)
|
||||||
|
msg_history.append({"role": "user", "content": summary_think})
|
||||||
|
think += summary_think
|
||||||
|
continue
|
||||||
|
|
||||||
|
executed_search_queries.append(search_query)
|
||||||
|
|
||||||
|
# Step 3: Truncate previous reasoning steps
|
||||||
|
truncated_prev_reasoning = self._truncate_previous_reasoning(all_reasoning_steps)
|
||||||
|
|
||||||
|
# Step 4: Retrieve information
|
||||||
|
kbinfos = self._retrieve_information(search_query)
|
||||||
|
|
||||||
|
# Step 5: Update chunk information
|
||||||
|
self._update_chunk_info(chunk_info, kbinfos)
|
||||||
|
|
||||||
|
# Step 6: Extract relevant information
|
||||||
|
think += "\n\n"
|
||||||
|
summary_think = ""
|
||||||
|
for ans in self._extract_relevant_info(truncated_prev_reasoning, search_query, kbinfos):
|
||||||
|
summary_think = ans
|
||||||
|
yield {"answer": think + self._remove_result_tags(summary_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||||
|
|
||||||
|
all_reasoning_steps.append(summary_think)
|
||||||
|
msg_history.append(
|
||||||
|
{"role": "user", "content": f"\n\n{BEGIN_SEARCH_RESULT}{summary_think}{END_SEARCH_RESULT}\n\n"})
|
||||||
|
think += self._remove_result_tags(summary_think)
|
||||||
|
logging.info(f"[THINK]Summary: {step_index}. {summary_think}")
|
||||||
|
|
||||||
|
yield think + "</think>"
|
||||||
113
agentic_reasoning/prompts.py
Normal file
113
agentic_reasoning/prompts.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
||||||
|
END_SEARCH_QUERY = "<|end_search_query|>"
|
||||||
|
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
||||||
|
END_SEARCH_RESULT = "<|end_search_result|>"
|
||||||
|
MAX_SEARCH_LIMIT = 6
|
||||||
|
|
||||||
|
REASON_PROMPT = (
|
||||||
|
"You are a reasoning assistant with the ability to perform dataset searches to help "
|
||||||
|
"you answer the user's question accurately. You have special tools:\n\n"
|
||||||
|
f"- To perform a search: write {BEGIN_SEARCH_QUERY} your query here {END_SEARCH_QUERY}.\n"
|
||||||
|
f"Then, the system will search and analyze relevant content, then provide you with helpful information in the format {BEGIN_SEARCH_RESULT} ...search results... {END_SEARCH_RESULT}.\n\n"
|
||||||
|
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
||||||
|
"Once you have all the information you need, continue your reasoning.\n\n"
|
||||||
|
"-- Example 1 --\n" ########################################
|
||||||
|
"Question: \"Are both the directors of Jaws and Casino Royale from the same country?\"\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Who is the director of Jaws?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nThe director of Jaws is Steven Spielberg...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information.\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Where is Steven Spielberg from?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nSteven Allan Spielberg is an American filmmaker...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information...\n\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Who is the director of Casino Royale?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nCasino Royale is a 2006 spy film directed by Martin Campbell...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information...\n\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Where is Martin Campbell from?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nMartin Campbell (born 24 October 1943) is a New Zealand film and television director...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information...\n\n"
|
||||||
|
"Assistant:\nIt's enough to answer the question\n"
|
||||||
|
|
||||||
|
"-- Example 2 --\n" #########################################
|
||||||
|
"Question: \"When was the founder of craigslist born?\"\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY}Who was the founder of craigslist?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nCraigslist was founded by Craig Newmark...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information.\n"
|
||||||
|
"Assistant:\n"
|
||||||
|
f" {BEGIN_SEARCH_QUERY} When was Craig Newmark born?{END_SEARCH_QUERY}\n\n"
|
||||||
|
"User:\n"
|
||||||
|
f" {BEGIN_SEARCH_RESULT}\nCraig Newmark was born on December 6, 1952...\n{END_SEARCH_RESULT}\n\n"
|
||||||
|
"Continues reasoning with the new information...\n\n"
|
||||||
|
"Assistant:\nIt's enough to answer the question\n"
|
||||||
|
"**Remember**:\n"
|
||||||
|
f"- You have a dataset to search, so you just provide a proper search query.\n"
|
||||||
|
f"- Use {BEGIN_SEARCH_QUERY} to request a dataset search and end with {END_SEARCH_QUERY}.\n"
|
||||||
|
"- The language of query MUST be as the same as 'Question' or 'search result'.\n"
|
||||||
|
"- If no helpful information can be found, rewrite the search query to be less and precise keywords.\n"
|
||||||
|
"- When done searching, continue your reasoning.\n\n"
|
||||||
|
'Please answer the following question. You should think step by step to solve it.\n\n'
|
||||||
|
)
|
||||||
|
|
||||||
|
RELEVANT_EXTRACTION_PROMPT = """**Task Instruction:**
|
||||||
|
|
||||||
|
You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
|
||||||
|
|
||||||
|
**Guidelines:**
|
||||||
|
|
||||||
|
1. **Analyze the Searched Web Pages:**
|
||||||
|
- Carefully review the content of each searched web page.
|
||||||
|
- Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
|
||||||
|
|
||||||
|
2. **Extract Relevant Information:**
|
||||||
|
- Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
|
||||||
|
- Ensure that the extracted information is accurate and relevant.
|
||||||
|
|
||||||
|
3. **Output Format:**
|
||||||
|
- **If the web pages provide helpful information for current search query:** Present the information beginning with `**Final Information**` as shown below.
|
||||||
|
- The language of query **MUST BE** as the same as 'Search Query' or 'Web Pages'.\n"
|
||||||
|
**Final Information**
|
||||||
|
|
||||||
|
[Helpful information]
|
||||||
|
|
||||||
|
- **If the web pages do not provide any helpful information for current search query:** Output the following text.
|
||||||
|
|
||||||
|
**Final Information**
|
||||||
|
|
||||||
|
No helpful information found.
|
||||||
|
|
||||||
|
**Inputs:**
|
||||||
|
- **Previous Reasoning Steps:**
|
||||||
|
{prev_reasoning}
|
||||||
|
|
||||||
|
- **Current Search Query:**
|
||||||
|
{search_query}
|
||||||
|
|
||||||
|
- **Searched Web Pages:**
|
||||||
|
{document}
|
||||||
|
|
||||||
|
"""
|
||||||
@ -83,7 +83,7 @@ app.errorhandler(Exception)(server_error_response)
|
|||||||
app.config["SESSION_PERMANENT"] = False
|
app.config["SESSION_PERMANENT"] = False
|
||||||
app.config["SESSION_TYPE"] = "filesystem"
|
app.config["SESSION_TYPE"] = "filesystem"
|
||||||
app.config["MAX_CONTENT_LENGTH"] = int(
|
app.config["MAX_CONTENT_LENGTH"] = int(
|
||||||
os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024)
|
os.environ.get("MAX_CONTENT_LENGTH", 1024 * 1024 * 1024)
|
||||||
)
|
)
|
||||||
|
|
||||||
Session(app)
|
Session(app)
|
||||||
@ -119,8 +119,9 @@ def register_page(page_path):
|
|||||||
sys.modules[module_name] = page
|
sys.modules[module_name] = page
|
||||||
spec.loader.exec_module(page)
|
spec.loader.exec_module(page)
|
||||||
page_name = getattr(page, "page_name", page_name)
|
page_name = getattr(page, "page_name", page_name)
|
||||||
|
sdk_path = "\\sdk\\" if sys.platform.startswith("win") else "/sdk/"
|
||||||
url_prefix = (
|
url_prefix = (
|
||||||
f"/api/{API_VERSION}" if "/sdk/" in path else f"/{API_VERSION}/{page_name}"
|
f"/api/{API_VERSION}" if sdk_path in path else f"/{API_VERSION}/{page_name}"
|
||||||
)
|
)
|
||||||
|
|
||||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||||
|
|||||||
@ -25,7 +25,7 @@ from api.db import FileType, LLMType, ParserType, FileSource
|
|||||||
from api.db.db_models import APIToken, Task, File
|
from api.db.db_models import APIToken, Task, File
|
||||||
from api.db.services import duplicate_name
|
from api.db.services import duplicate_name
|
||||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||||
from api.db.services.dialog_service import DialogService, chat, keyword_extraction, label_question
|
from api.db.services.dialog_service import DialogService, chat
|
||||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
@ -38,6 +38,8 @@ from api.utils.api_utils import server_error_response, get_data_error_result, ge
|
|||||||
generate_confirmation_token
|
generate_confirmation_token
|
||||||
|
|
||||||
from api.utils.file_utils import filename_type, thumbnail
|
from api.utils.file_utils import filename_type, thumbnail
|
||||||
|
from rag.app.tag import label_question
|
||||||
|
from rag.prompts import keyword_extraction
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
from api.db.services.canvas_service import UserCanvasService
|
from api.db.services.canvas_service import UserCanvasService
|
||||||
@ -477,7 +479,7 @@ def upload():
|
|||||||
doc = doc.to_dict()
|
doc = doc.to_dict()
|
||||||
doc["tenant_id"] = tenant_id
|
doc["tenant_id"] = tenant_id
|
||||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||||
queue_tasks(doc, bucket, name)
|
queue_tasks(doc, bucket, name, 0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|||||||
@ -18,13 +18,16 @@ import traceback
|
|||||||
from flask import request, Response
|
from flask import request, Response
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||||
from api.settings import RetCode
|
from api.settings import RetCode
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||||
from agent.canvas import Canvas
|
from agent.canvas import Canvas
|
||||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||||
from api.db.db_models import APIToken
|
from api.db.db_models import APIToken
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
|
||||||
@manager.route('/templates', methods=['GET']) # noqa: F821
|
@manager.route('/templates', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@ -61,7 +64,6 @@ def save():
|
|||||||
req["user_id"] = current_user.id
|
req["user_id"] = current_user.id
|
||||||
if not isinstance(req["dsl"], str):
|
if not isinstance(req["dsl"], str):
|
||||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||||
|
|
||||||
req["dsl"] = json.loads(req["dsl"])
|
req["dsl"] = json.loads(req["dsl"])
|
||||||
if "id" not in req:
|
if "id" not in req:
|
||||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||||
@ -75,16 +77,22 @@ def save():
|
|||||||
data=False, message='Only owner of canvas authorized for this operation.',
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
code=RetCode.OPERATING_ERROR)
|
code=RetCode.OPERATING_ERROR)
|
||||||
UserCanvasService.update_by_id(req["id"], req)
|
UserCanvasService.update_by_id(req["id"], req)
|
||||||
|
# save version
|
||||||
|
UserCanvasVersionService.insert( user_canvas_id=req["id"], dsl=req["dsl"], title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")))
|
||||||
|
UserCanvasVersionService.delete_all_versions(req["id"])
|
||||||
return get_json_result(data=req)
|
return get_json_result(data=req)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get/<canvas_id>', methods=['GET']) # noqa: F821
|
@manager.route('/get/<canvas_id>', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get(canvas_id):
|
def get(canvas_id):
|
||||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
e, c = UserCanvasService.get_by_tenant_id(canvas_id)
|
||||||
|
logging.info(f"get canvas_id: {canvas_id} c: {c}")
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(message="canvas not found.")
|
return get_data_error_result(message="canvas not found.")
|
||||||
return get_json_result(data=c.to_dict())
|
return get_json_result(data=c)
|
||||||
|
|
||||||
@manager.route('/getsse/<canvas_id>', methods=['GET']) # type: ignore # noqa: F821
|
@manager.route('/getsse/<canvas_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||||
def getsse(canvas_id):
|
def getsse(canvas_id):
|
||||||
@ -283,4 +291,62 @@ def test_db_connect():
|
|||||||
return get_json_result(data="Database Connection Successful!")
|
return get_json_result(data="Database Connection Successful!")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
#api get list version dsl of canvas
|
||||||
|
@manager.route('/getlistversion/<canvas_id>', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def getlistversion(canvas_id):
|
||||||
|
try:
|
||||||
|
list =sorted([c.to_dict() for c in UserCanvasVersionService.list_by_canvas_id(canvas_id)], key=lambda x: x["update_time"]*-1)
|
||||||
|
return get_json_result(data=list)
|
||||||
|
except Exception as e:
|
||||||
|
return get_data_error_result(message=f"Error getting history files: {e}")
|
||||||
|
#api get version dsl of canvas
|
||||||
|
@manager.route('/getversion/<version_id>', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def getversion( version_id):
|
||||||
|
try:
|
||||||
|
|
||||||
|
e, version = UserCanvasVersionService.get_by_id(version_id)
|
||||||
|
if version:
|
||||||
|
return get_json_result(data=version.to_dict())
|
||||||
|
except Exception as e:
|
||||||
|
return get_json_result(data=f"Error getting history file: {e}")
|
||||||
|
@manager.route('/listteam', methods=['GET']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def list_kbs():
|
||||||
|
keywords = request.args.get("keywords", "")
|
||||||
|
page_number = int(request.args.get("page", 1))
|
||||||
|
items_per_page = int(request.args.get("page_size", 150))
|
||||||
|
orderby = request.args.get("orderby", "create_time")
|
||||||
|
desc = request.args.get("desc", True)
|
||||||
|
try:
|
||||||
|
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||||
|
kbs, total = UserCanvasService.get_by_tenant_ids(
|
||||||
|
[m["tenant_id"] for m in tenants], current_user.id, page_number,
|
||||||
|
items_per_page, orderby, desc, keywords)
|
||||||
|
return get_json_result(data={"kbs": kbs, "total": total})
|
||||||
|
except Exception as e:
|
||||||
|
return server_error_response(e)
|
||||||
|
@manager.route('/setting', methods=['POST']) # noqa: F821
|
||||||
|
@validate_request("id", "title", "permission")
|
||||||
|
@login_required
|
||||||
|
def setting():
|
||||||
|
req = request.json
|
||||||
|
req["user_id"] = current_user.id
|
||||||
|
e,flow = UserCanvasService.get_by_id(req["id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(message="canvas not found.")
|
||||||
|
flow = flow.to_dict()
|
||||||
|
flow["title"] = req["title"]
|
||||||
|
if req["description"]:
|
||||||
|
flow["description"] = req["description"]
|
||||||
|
if req["permission"]:
|
||||||
|
flow["permission"] = req["permission"]
|
||||||
|
if req["avatar"]:
|
||||||
|
flow["avatar"] = req["avatar"]
|
||||||
|
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||||
|
return get_json_result(
|
||||||
|
data=False, message='Only owner of canvas authorized for this operation.',
|
||||||
|
code=RetCode.OPERATING_ERROR)
|
||||||
|
num= UserCanvasService.update_by_id(req["id"], flow)
|
||||||
|
return get_json_result(data=num)
|
||||||
|
|||||||
@ -19,9 +19,10 @@ import json
|
|||||||
from flask import request
|
from flask import request
|
||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
|
|
||||||
from api.db.services.dialog_service import keyword_extraction, label_question
|
|
||||||
from rag.app.qa import rmPrefix, beAdoc
|
from rag.app.qa import rmPrefix, beAdoc
|
||||||
|
from rag.app.tag import label_question
|
||||||
from rag.nlp import search, rag_tokenizer
|
from rag.nlp import search, rag_tokenizer
|
||||||
|
from rag.prompts import keyword_extraction
|
||||||
from rag.settings import PAGERANK_FLD
|
from rag.settings import PAGERANK_FLD
|
||||||
from rag.utils import rmSpace
|
from rag.utils import rmSpace
|
||||||
from api.db import LLMType, ParserType
|
from api.db import LLMType, ParserType
|
||||||
@ -93,12 +94,14 @@ def get():
|
|||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(message="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
tenant_id = tenants[0].tenant_id
|
for tenant in tenants:
|
||||||
|
kb_ids = KnowledgebaseService.get_kb_ids(tenant.tenant_id)
|
||||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant.tenant_id), kb_ids)
|
||||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
if chunk:
|
||||||
|
break
|
||||||
if chunk is None:
|
if chunk is None:
|
||||||
return server_error_response(Exception("Chunk not found"))
|
return server_error_response(Exception("Chunk not found"))
|
||||||
|
|
||||||
k = []
|
k = []
|
||||||
for n in chunk.keys():
|
for n in chunk.keys():
|
||||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||||
|
|||||||
@ -17,24 +17,25 @@ import json
|
|||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from api.db.db_models import APIToken
|
|
||||||
|
|
||||||
from api.db.services.conversation_service import ConversationService, structure_answer
|
import trio
|
||||||
from api.db.services.user_service import UserTenantService
|
from flask import Response, request
|
||||||
from flask import request, Response
|
from flask_login import current_user, login_required
|
||||||
from flask_login import login_required, current_user
|
|
||||||
|
|
||||||
|
from api import settings
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.dialog_service import DialogService, chat, ask, label_question
|
from api.db.db_models import APIToken
|
||||||
|
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||||
|
from api.db.services.dialog_service import DialogService, ask, chat
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle, TenantService
|
from api.db.services.llm_service import LLMBundle, TenantService
|
||||||
from api import settings
|
from api.db.services.user_service import UserTenantService
|
||||||
from api.utils.api_utils import get_json_result
|
from api.utils.api_utils import get_data_error_result, get_json_result, server_error_response, validate_request
|
||||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
|
||||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||||
|
from rag.app.tag import label_question
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
@manager.route("/set", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def set_conversation():
|
def set_conversation():
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -48,8 +49,7 @@ def set_conversation():
|
|||||||
return get_data_error_result(message="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
e, conv = ConversationService.get_by_id(conv_id)
|
e, conv = ConversationService.get_by_id(conv_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(
|
return get_data_error_result(message="Fail to update a conversation!")
|
||||||
message="Fail to update a conversation!")
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -59,38 +59,30 @@ def set_conversation():
|
|||||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(message="Dialog not found")
|
return get_data_error_result(message="Dialog not found")
|
||||||
conv = {
|
conv = {"id": conv_id, "dialog_id": req["dialog_id"], "name": req.get("name", "New conversation"), "message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]}
|
||||||
"id": conv_id,
|
|
||||||
"dialog_id": req["dialog_id"],
|
|
||||||
"name": req.get("name", "New conversation"),
|
|
||||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
|
||||||
}
|
|
||||||
ConversationService.save(**conv)
|
ConversationService.save(**conv)
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
@manager.route("/get", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def get():
|
def get():
|
||||||
conv_id = request.args["conversation_id"]
|
conv_id = request.args["conversation_id"]
|
||||||
try:
|
try:
|
||||||
|
|
||||||
e, conv = ConversationService.get_by_id(conv_id)
|
e, conv = ConversationService.get_by_id(conv_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(message="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
tenants = UserTenantService.query(user_id=current_user.id)
|
tenants = UserTenantService.query(user_id=current_user.id)
|
||||||
avatar =None
|
avatar = None
|
||||||
for tenant in tenants:
|
for tenant in tenants:
|
||||||
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
||||||
if dialog and len(dialog)>0:
|
if dialog and len(dialog) > 0:
|
||||||
avatar = dialog[0].icon
|
avatar = dialog[0].icon
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="Only owner of conversation authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||||
data=False, message='Only owner of conversation authorized for this operation.',
|
|
||||||
code=settings.RetCode.OPERATING_ERROR)
|
|
||||||
|
|
||||||
def get_value(d, k1, k2):
|
def get_value(d, k1, k2):
|
||||||
return d.get(k1, d.get(k2))
|
return d.get(k1, d.get(k2))
|
||||||
@ -98,26 +90,29 @@ def get():
|
|||||||
for ref in conv.reference:
|
for ref in conv.reference:
|
||||||
if isinstance(ref, list):
|
if isinstance(ref, list):
|
||||||
continue
|
continue
|
||||||
ref["chunks"] = [{
|
ref["chunks"] = [
|
||||||
"id": get_value(ck, "chunk_id", "id"),
|
{
|
||||||
"content": get_value(ck, "content", "content_with_weight"),
|
"id": get_value(ck, "chunk_id", "id"),
|
||||||
"document_id": get_value(ck, "doc_id", "document_id"),
|
"content": get_value(ck, "content", "content_with_weight"),
|
||||||
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||||
"image_id": get_value(ck, "image_id", "img_id"),
|
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||||
"positions": get_value(ck, "positions", "position_int"),
|
"image_id": get_value(ck, "image_id", "img_id"),
|
||||||
} for ck in ref.get("chunks", [])]
|
"positions": get_value(ck, "positions", "position_int"),
|
||||||
|
}
|
||||||
|
for ck in ref.get("chunks", [])
|
||||||
|
]
|
||||||
|
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
conv["avatar"]=avatar
|
conv["avatar"] = avatar
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
@manager.route('/getsse/<dialog_id>', methods=['GET']) # type: ignore # noqa: F821
|
|
||||||
|
@manager.route("/getsse/<dialog_id>", methods=["GET"]) # type: ignore # noqa: F821
|
||||||
def getsse(dialog_id):
|
def getsse(dialog_id):
|
||||||
|
token = request.headers.get("Authorization").split()
|
||||||
token = request.headers.get('Authorization').split()
|
|
||||||
if len(token) != 2:
|
if len(token) != 2:
|
||||||
return get_data_error_result(message='Authorization is not valid!"')
|
return get_data_error_result(message='Authorization is not valid!"')
|
||||||
token = token[1]
|
token = token[1]
|
||||||
@ -129,13 +124,14 @@ def getsse(dialog_id):
|
|||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(message="Dialog not found!")
|
return get_data_error_result(message="Dialog not found!")
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
conv["avatar"]= conv["icon"]
|
conv["avatar"] = conv["icon"]
|
||||||
del conv["icon"]
|
del conv["icon"]
|
||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
|
||||||
|
@manager.route("/rm", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def rm():
|
def rm():
|
||||||
conv_ids = request.json["conversation_ids"]
|
conv_ids = request.json["conversation_ids"]
|
||||||
@ -149,28 +145,21 @@ def rm():
|
|||||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="Only owner of conversation authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||||
data=False, message='Only owner of conversation authorized for this operation.',
|
|
||||||
code=settings.RetCode.OPERATING_ERROR)
|
|
||||||
ConversationService.delete_by_id(cid)
|
ConversationService.delete_by_id(cid)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_convsersation():
|
def list_convsersation():
|
||||||
dialog_id = request.args["dialog_id"]
|
dialog_id = request.args["dialog_id"]
|
||||||
try:
|
try:
|
||||||
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="Only owner of dialog authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||||
data=False, message='Only owner of dialog authorized for this operation.',
|
convs = ConversationService.query(dialog_id=dialog_id, order_by=ConversationService.model.create_time, reverse=True)
|
||||||
code=settings.RetCode.OPERATING_ERROR)
|
|
||||||
convs = ConversationService.query(
|
|
||||||
dialog_id=dialog_id,
|
|
||||||
order_by=ConversationService.model.create_time,
|
|
||||||
reverse=True)
|
|
||||||
|
|
||||||
convs = [d.to_dict() for d in convs]
|
convs = [d.to_dict() for d in convs]
|
||||||
return get_json_result(data=convs)
|
return get_json_result(data=convs)
|
||||||
@ -178,7 +167,7 @@ def list_convsersation():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
@manager.route("/completion", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "messages")
|
@validate_request("conversation_id", "messages")
|
||||||
def completion():
|
def completion():
|
||||||
@ -205,25 +194,30 @@ def completion():
|
|||||||
if not conv.reference:
|
if not conv.reference:
|
||||||
conv.reference = []
|
conv.reference = []
|
||||||
else:
|
else:
|
||||||
|
|
||||||
def get_value(d, k1, k2):
|
def get_value(d, k1, k2):
|
||||||
return d.get(k1, d.get(k2))
|
return d.get(k1, d.get(k2))
|
||||||
|
|
||||||
for ref in conv.reference:
|
for ref in conv.reference:
|
||||||
if isinstance(ref, list):
|
if isinstance(ref, list):
|
||||||
continue
|
continue
|
||||||
ref["chunks"] = [{
|
ref["chunks"] = [
|
||||||
"id": get_value(ck, "chunk_id", "id"),
|
{
|
||||||
"content": get_value(ck, "content", "content_with_weight"),
|
"id": get_value(ck, "chunk_id", "id"),
|
||||||
"document_id": get_value(ck, "doc_id", "document_id"),
|
"content": get_value(ck, "content", "content_with_weight"),
|
||||||
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||||
"image_id": get_value(ck, "image_id", "img_id"),
|
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||||
"positions": get_value(ck, "positions", "position_int"),
|
"image_id": get_value(ck, "image_id", "img_id"),
|
||||||
} for ck in ref.get("chunks", [])]
|
"positions": get_value(ck, "positions", "position_int"),
|
||||||
|
}
|
||||||
|
for ck in ref.get("chunks", [])
|
||||||
|
]
|
||||||
|
|
||||||
if not conv.reference:
|
if not conv.reference:
|
||||||
conv.reference = []
|
conv.reference = []
|
||||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
def stream():
|
def stream():
|
||||||
nonlocal dia, msg, req, conv
|
nonlocal dia, msg, req, conv
|
||||||
try:
|
try:
|
||||||
@ -233,9 +227,7 @@ def completion():
|
|||||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
|
||||||
ensure_ascii=False) + "\n\n"
|
|
||||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
if req.get("stream", True):
|
if req.get("stream", True):
|
||||||
@ -257,7 +249,7 @@ def completion():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/tts', methods=['POST']) # noqa: F821
|
@manager.route("/tts", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def tts():
|
def tts():
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -279,9 +271,7 @@ def tts():
|
|||||||
for chunk in tts_mdl.tts(txt):
|
for chunk in tts_mdl.tts(txt):
|
||||||
yield chunk
|
yield chunk
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield ("data:" + json.dumps({"code": 500, "message": str(e),
|
yield ("data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e)}}, ensure_ascii=False)).encode("utf-8")
|
||||||
"data": {"answer": "**ERROR**: " + str(e)}},
|
|
||||||
ensure_ascii=False)).encode('utf-8')
|
|
||||||
|
|
||||||
resp = Response(stream_audio(), mimetype="audio/mpeg")
|
resp = Response(stream_audio(), mimetype="audio/mpeg")
|
||||||
resp.headers.add_header("Cache-Control", "no-cache")
|
resp.headers.add_header("Cache-Control", "no-cache")
|
||||||
@ -291,7 +281,7 @@ def tts():
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/delete_msg', methods=['POST']) # noqa: F821
|
@manager.route("/delete_msg", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "message_id")
|
@validate_request("conversation_id", "message_id")
|
||||||
def delete_msg():
|
def delete_msg():
|
||||||
@ -314,7 +304,7 @@ def delete_msg():
|
|||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/thumbup', methods=['POST']) # noqa: F821
|
@manager.route("/thumbup", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("conversation_id", "message_id")
|
@validate_request("conversation_id", "message_id")
|
||||||
def thumbup():
|
def thumbup():
|
||||||
@ -322,7 +312,7 @@ def thumbup():
|
|||||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(message="Conversation not found!")
|
return get_data_error_result(message="Conversation not found!")
|
||||||
up_down = req.get("set")
|
up_down = req.get("thumbup")
|
||||||
feedback = req.get("feedback", "")
|
feedback = req.get("feedback", "")
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
for i, msg in enumerate(conv["message"]):
|
for i, msg in enumerate(conv["message"]):
|
||||||
@ -341,7 +331,7 @@ def thumbup():
|
|||||||
return get_json_result(data=conv)
|
return get_json_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/ask', methods=['POST']) # noqa: F821
|
@manager.route("/ask", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question", "kb_ids")
|
@validate_request("question", "kb_ids")
|
||||||
def ask_about():
|
def ask_about():
|
||||||
@ -354,9 +344,7 @@ def ask_about():
|
|||||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
|
||||||
ensure_ascii=False) + "\n\n"
|
|
||||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
@ -367,7 +355,7 @@ def ask_about():
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/mindmap', methods=['POST']) # noqa: F821
|
@manager.route("/mindmap", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question", "kb_ids")
|
@validate_request("question", "kb_ids")
|
||||||
def mindmap():
|
def mindmap():
|
||||||
@ -380,18 +368,16 @@ def mindmap():
|
|||||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
|
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
|
||||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||||
question = req["question"]
|
question = req["question"]
|
||||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
ranks = settings.retrievaler.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, 1, 12, 0.3, 0.3, aggs=False, rank_feature=label_question(question, [kb]))
|
||||||
0.3, 0.3, aggs=False,
|
|
||||||
rank_feature=label_question(question, [kb])
|
|
||||||
)
|
|
||||||
mindmap = MindMapExtractor(chat_mdl)
|
mindmap = MindMapExtractor(chat_mdl)
|
||||||
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in ranks["chunks"]])
|
||||||
|
mind_map = mind_map.output
|
||||||
if "error" in mind_map:
|
if "error" in mind_map:
|
||||||
return server_error_response(Exception(mind_map["error"]))
|
return server_error_response(Exception(mind_map["error"]))
|
||||||
return get_json_result(data=mind_map)
|
return get_json_result(data=mind_map)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/related_questions', methods=['POST']) # noqa: F821
|
@manager.route("/related_questions", methods=["POST"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("question")
|
@validate_request("question")
|
||||||
def related_questions():
|
def related_questions():
|
||||||
@ -399,31 +385,49 @@ def related_questions():
|
|||||||
question = req["question"]
|
question = req["question"]
|
||||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||||
prompt = """
|
prompt = """
|
||||||
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
|
Role: You are an AI language model assistant tasked with generating 5-10 related questions based on a user’s original query. These questions should help expand the search query scope and improve search relevance.
|
||||||
Instructions:
|
|
||||||
- Based on the keywords provided by the user, generate 5-10 related search terms.
|
|
||||||
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
|
|
||||||
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
|
|
||||||
- Keep the term length between 2-4 words, concise and clear.
|
|
||||||
- DO NOT translate, use the language of the original keywords.
|
|
||||||
|
|
||||||
### Example:
|
Instructions:
|
||||||
Keywords: Chinese football
|
Input: You are provided with a user’s question.
|
||||||
Related search terms:
|
Output: Generate 5-10 alternative questions that are related to the original user question. These alternatives should help retrieve a broader range of relevant documents from a vector database.
|
||||||
1. Current status of Chinese football
|
Context: Focus on rephrasing the original question in different ways, making sure the alternative questions are diverse but still connected to the topic of the original query. Do not create overly obscure, irrelevant, or unrelated questions.
|
||||||
2. Reform of Chinese football
|
Fallback: If you cannot generate any relevant alternatives, do not return any questions.
|
||||||
3. Youth training of Chinese football
|
Guidance:
|
||||||
4. Chinese football in the Asian Cup
|
1. Each alternative should be unique but still relevant to the original query.
|
||||||
5. Chinese football in the World Cup
|
2. Keep the phrasing clear, concise, and easy to understand.
|
||||||
|
3. Avoid overly technical jargon or specialized terms unless directly relevant.
|
||||||
|
4. Ensure that each question contributes towards improving search results by broadening the search angle, not narrowing it.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
Original Question: What are the benefits of electric vehicles?
|
||||||
|
|
||||||
|
Alternative Questions:
|
||||||
|
1. How do electric vehicles impact the environment?
|
||||||
|
2. What are the advantages of owning an electric car?
|
||||||
|
3. What is the cost-effectiveness of electric vehicles?
|
||||||
|
4. How do electric vehicles compare to traditional cars in terms of fuel efficiency?
|
||||||
|
5. What are the environmental benefits of switching to electric cars?
|
||||||
|
6. How do electric vehicles help reduce carbon emissions?
|
||||||
|
7. Why are electric vehicles becoming more popular?
|
||||||
|
8. What are the long-term savings of using electric vehicles?
|
||||||
|
9. How do electric vehicles contribute to sustainability?
|
||||||
|
10. What are the key benefits of electric vehicles for consumers?
|
||||||
|
|
||||||
Reason:
|
Reason:
|
||||||
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
|
Rephrasing the original query into multiple alternative questions helps the user explore different aspects of their search topic, improving the quality of search results.
|
||||||
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
|
These questions guide the search engine to provide a more comprehensive set of relevant documents.
|
||||||
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
ans = chat_mdl.chat(
|
||||||
|
prompt,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": f"""
|
||||||
Keywords: {question}
|
Keywords: {question}
|
||||||
Related search terms:
|
Related search terms:
|
||||||
"""}], {"temperature": 0.9})
|
""",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
{"temperature": 0.9},
|
||||||
|
)
|
||||||
return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||||
|
|||||||
@ -18,6 +18,7 @@ from flask import request
|
|||||||
from flask_login import login_required, current_user
|
from flask_login import login_required, current_user
|
||||||
from api.db.services.dialog_service import DialogService
|
from api.db.services.dialog_service import DialogService
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
|
from api.db.services.llm_service import TenantLLMService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.user_service import TenantService, UserTenantService
|
from api.db.services.user_service import TenantService, UserTenantService
|
||||||
from api import settings
|
from api import settings
|
||||||
@ -57,11 +58,6 @@ def set_dialog():
|
|||||||
|
|
||||||
if not prompt_config["system"]:
|
if not prompt_config["system"]:
|
||||||
prompt_config["system"] = default_prompt["system"]
|
prompt_config["system"] = default_prompt["system"]
|
||||||
# if len(prompt_config["parameters"]) < 1:
|
|
||||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
|
||||||
# for p in prompt_config["parameters"]:
|
|
||||||
# if p["key"] == "knowledge":break
|
|
||||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
|
||||||
|
|
||||||
for p in prompt_config["parameters"]:
|
for p in prompt_config["parameters"]:
|
||||||
if p["optional"]:
|
if p["optional"]:
|
||||||
@ -74,22 +70,19 @@ def set_dialog():
|
|||||||
e, tenant = TenantService.get_by_id(current_user.id)
|
e, tenant = TenantService.get_by_id(current_user.id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_data_error_result(message="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids"))
|
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids", []))
|
||||||
embd_count = len(set([kb.embd_id for kb in kbs]))
|
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||||
if embd_count != 1:
|
embd_count = len(set(embd_ids))
|
||||||
|
if embd_count > 1:
|
||||||
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
||||||
|
|
||||||
llm_id = req.get("llm_id", tenant.llm_id)
|
llm_id = req.get("llm_id", tenant.llm_id)
|
||||||
if not dialog_id:
|
if not dialog_id:
|
||||||
if not req.get("kb_ids"):
|
|
||||||
return get_data_error_result(
|
|
||||||
message="Fail! Please select knowledgebase!")
|
|
||||||
|
|
||||||
dia = {
|
dia = {
|
||||||
"id": get_uuid(),
|
"id": get_uuid(),
|
||||||
"tenant_id": current_user.id,
|
"tenant_id": current_user.id,
|
||||||
"name": name,
|
"name": name,
|
||||||
"kb_ids": req["kb_ids"],
|
"kb_ids": req.get("kb_ids", []),
|
||||||
"description": description,
|
"description": description,
|
||||||
"llm_id": llm_id,
|
"llm_id": llm_id,
|
||||||
"llm_setting": llm_setting,
|
"llm_setting": llm_setting,
|
||||||
|
|||||||
@ -71,11 +71,13 @@ def upload():
|
|||||||
if not e:
|
if not e:
|
||||||
raise LookupError("Can't find this knowledgebase!")
|
raise LookupError("Can't find this knowledgebase!")
|
||||||
|
|
||||||
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
err, files = FileService.upload_document(kb, file_objs, current_user.id)
|
||||||
|
files = [f[0] for f in files] # remove the blob
|
||||||
|
|
||||||
if err:
|
if err:
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
data=files, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=files)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/web_crawl', methods=['POST']) # noqa: F821
|
@manager.route('/web_crawl', methods=['POST']) # noqa: F821
|
||||||
@ -329,10 +331,10 @@ def rm():
|
|||||||
message="Database error (Document removal)!")
|
message="Database error (Document removal)!")
|
||||||
|
|
||||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
deleted_file_count = FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||||
File2DocumentService.delete_by_document_id(doc_id)
|
File2DocumentService.delete_by_document_id(doc_id)
|
||||||
|
if deleted_file_count > 0:
|
||||||
STORAGE_IMPL.rm(b, n)
|
STORAGE_IMPL.rm(b, n)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
errors += str(e)
|
errors += str(e)
|
||||||
|
|
||||||
@ -345,7 +347,7 @@ def rm():
|
|||||||
@manager.route('/run', methods=['POST']) # noqa: F821
|
@manager.route('/run', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("doc_ids", "run")
|
@validate_request("doc_ids", "run")
|
||||||
def run():
|
def run():
|
||||||
req = request.json
|
req = request.json
|
||||||
for doc_id in req["doc_ids"]:
|
for doc_id in req["doc_ids"]:
|
||||||
if not DocumentService.accessible(doc_id, current_user.id):
|
if not DocumentService.accessible(doc_id, current_user.id):
|
||||||
@ -378,7 +380,7 @@ def run():
|
|||||||
doc = doc.to_dict()
|
doc = doc.to_dict()
|
||||||
doc["tenant_id"] = tenant_id
|
doc["tenant_id"] = tenant_id
|
||||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||||
queue_tasks(doc, bucket, name)
|
queue_tasks(doc, bucket, name, 0)
|
||||||
|
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@ -38,8 +38,12 @@ def convert():
|
|||||||
file2documents = []
|
file2documents = []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
files = FileService.get_by_ids(file_ids)
|
||||||
|
files_set = dict({file.id: file for file in files})
|
||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
e, file = FileService.get_by_id(file_id)
|
file = files_set[file_id]
|
||||||
|
if not file:
|
||||||
|
return get_data_error_result(message="File not found!")
|
||||||
file_ids_list = [file_id]
|
file_ids_list = [file_id]
|
||||||
if file.type == FileType.FOLDER.value:
|
if file.type == FileType.FOLDER.value:
|
||||||
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
|
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||||
@ -86,6 +90,7 @@ def convert():
|
|||||||
"file_id": id,
|
"file_id": id,
|
||||||
"document_id": doc.id,
|
"document_id": doc.id,
|
||||||
})
|
})
|
||||||
|
|
||||||
file2documents.append(file2document.to_json())
|
file2documents.append(file2document.to_json())
|
||||||
return get_json_result(data=file2documents)
|
return get_json_result(data=file2documents)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@ -55,20 +55,17 @@ def upload():
|
|||||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||||
file_res = []
|
file_res = []
|
||||||
try:
|
try:
|
||||||
|
e, pf_folder = FileService.get_by_id(pf_id)
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result( message="Can't find this folder!")
|
||||||
for file_obj in file_objs:
|
for file_obj in file_objs:
|
||||||
e, file = FileService.get_by_id(pf_id)
|
|
||||||
if not e:
|
|
||||||
return get_data_error_result(
|
|
||||||
message="Can't find this folder!")
|
|
||||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
||||||
return get_data_error_result(
|
return get_data_error_result( message="Exceed the maximum file number of a free user!")
|
||||||
message="Exceed the maximum file number of a free user!")
|
|
||||||
|
|
||||||
# split file name path
|
# split file name path
|
||||||
if not file_obj.filename:
|
if not file_obj.filename:
|
||||||
e, file = FileService.get_by_id(pf_id)
|
file_obj_names = [pf_folder.name, file_obj.filename]
|
||||||
file_obj_names = [file.name, file_obj.filename]
|
|
||||||
else:
|
else:
|
||||||
full_path = '/' + file_obj.filename
|
full_path = '/' + file_obj.filename
|
||||||
file_obj_names = full_path.split('/')
|
file_obj_names = full_path.split('/')
|
||||||
@ -184,7 +181,7 @@ def list_files():
|
|||||||
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
current_user.id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
||||||
|
|
||||||
parent_folder = FileService.get_parent_folder(pf_id)
|
parent_folder = FileService.get_parent_folder(pf_id)
|
||||||
if not FileService.get_parent_folder(pf_id):
|
if not parent_folder:
|
||||||
return get_json_result(message="File not found!")
|
return get_json_result(message="File not found!")
|
||||||
|
|
||||||
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
||||||
@ -358,9 +355,14 @@ def move():
|
|||||||
try:
|
try:
|
||||||
file_ids = req["src_file_ids"]
|
file_ids = req["src_file_ids"]
|
||||||
parent_id = req["dest_file_id"]
|
parent_id = req["dest_file_id"]
|
||||||
|
files = FileService.get_by_ids(file_ids)
|
||||||
|
files_dict = {}
|
||||||
|
for file in files:
|
||||||
|
files_dict[file.id] = file
|
||||||
|
|
||||||
for file_id in file_ids:
|
for file_id in file_ids:
|
||||||
e, file = FileService.get_by_id(file_id)
|
file = files_dict[file_id]
|
||||||
if not e:
|
if not file:
|
||||||
return get_data_error_result(message="File or Folder not found!")
|
return get_data_error_result(message="File or Folder not found!")
|
||||||
if not file.tenant_id:
|
if not file.tenant_id:
|
||||||
return get_data_error_result(message="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|||||||
@ -14,7 +14,6 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
@ -74,7 +73,7 @@ def create():
|
|||||||
|
|
||||||
@manager.route('/update', methods=['post']) # noqa: F821
|
@manager.route('/update', methods=['post']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
@validate_request("kb_id", "name", "description", "parser_id")
|
||||||
@not_allowed_parameters("id", "tenant_id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
@not_allowed_parameters("id", "tenant_id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||||
def update():
|
def update():
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -158,25 +157,38 @@ def detail():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def list_kbs():
|
def list_kbs():
|
||||||
keywords = request.args.get("keywords", "")
|
keywords = request.args.get("keywords", "")
|
||||||
page_number = int(request.args.get("page", 1))
|
page_number = int(request.args.get("page", 0))
|
||||||
items_per_page = int(request.args.get("page_size", 150))
|
items_per_page = int(request.args.get("page_size", 0))
|
||||||
parser_id = request.args.get("parser_id")
|
parser_id = request.args.get("parser_id")
|
||||||
orderby = request.args.get("orderby", "create_time")
|
orderby = request.args.get("orderby", "create_time")
|
||||||
desc = request.args.get("desc", True)
|
desc = request.args.get("desc", True)
|
||||||
|
|
||||||
|
req = request.get_json()
|
||||||
|
owner_ids = req.get("owner_ids", [])
|
||||||
try:
|
try:
|
||||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
if not owner_ids:
|
||||||
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||||
[m["tenant_id"] for m in tenants], current_user.id, page_number,
|
tenants = [m["tenant_id"] for m in tenants]
|
||||||
items_per_page, orderby, desc, keywords, parser_id)
|
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||||
|
tenants, current_user.id, page_number,
|
||||||
|
items_per_page, orderby, desc, keywords, parser_id)
|
||||||
|
else:
|
||||||
|
tenants = owner_ids
|
||||||
|
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||||
|
tenants, current_user.id, 0,
|
||||||
|
0, orderby, desc, keywords, parser_id)
|
||||||
|
kbs = [kb for kb in kbs if kb["tenant_id"] in tenants]
|
||||||
|
if page_number and items_per_page:
|
||||||
|
kbs = kbs[(page_number-1)*items_per_page:page_number*items_per_page]
|
||||||
|
total = len(kbs)
|
||||||
return get_json_result(data={"kbs": kbs, "total": total})
|
return get_json_result(data={"kbs": kbs, "total": total})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/rm', methods=['post']) # noqa: F821
|
@manager.route('/rm', methods=['post']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
@validate_request("kb_id")
|
@validate_request("kb_id")
|
||||||
@ -300,11 +312,12 @@ def knowledge_graph(kb_id):
|
|||||||
"kb_id": [kb_id],
|
"kb_id": [kb_id],
|
||||||
"knowledge_graph_kwd": ["graph"]
|
"knowledge_graph_kwd": ["graph"]
|
||||||
}
|
}
|
||||||
|
|
||||||
obj = {"graph": {}, "mind_map": {}}
|
obj = {"graph": {}, "mind_map": {}}
|
||||||
try:
|
if not settings.docStoreConn.indexExist(search.index_name(kb.tenant_id), kb_id):
|
||||||
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [kb_id])
|
return get_json_result(data=obj)
|
||||||
except Exception as e:
|
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [kb_id])
|
||||||
logging.exception(e)
|
if not len(sres.ids):
|
||||||
return get_json_result(data=obj)
|
return get_json_result(data=obj)
|
||||||
|
|
||||||
for id in sres.ids[:1]:
|
for id in sres.ids[:1]:
|
||||||
@ -318,6 +331,22 @@ def knowledge_graph(kb_id):
|
|||||||
|
|
||||||
if "nodes" in obj["graph"]:
|
if "nodes" in obj["graph"]:
|
||||||
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||||
if "edges" in obj["graph"]:
|
if "edges" in obj["graph"]:
|
||||||
obj["graph"]["edges"] = sorted(obj["graph"]["edges"], key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
node_id_set = { o["id"] for o in obj["graph"]["nodes"] }
|
||||||
return get_json_result(data=obj)
|
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||||
|
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||||
|
return get_json_result(data=obj)
|
||||||
|
|
||||||
|
@manager.route('/<kb_id>/knowledge_graph', methods=['DELETE']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
def delete_knowledge_graph(kb_id):
|
||||||
|
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message='No authorization.',
|
||||||
|
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||||
|
)
|
||||||
|
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
|
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]}, search.index_name(kb.tenant_id), kb_id)
|
||||||
|
|
||||||
|
return get_json_result(data=True)
|
||||||
|
|||||||
97
api/apps/langfuse_app.py
Normal file
97
api/apps/langfuse_app.py
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from flask import request
|
||||||
|
from flask_login import current_user, login_required
|
||||||
|
from langfuse import Langfuse
|
||||||
|
|
||||||
|
from api.db.db_models import DB
|
||||||
|
from api.db.services.langfuse_service import TenantLangfuseService
|
||||||
|
from api.utils.api_utils import get_error_data_result, get_json_result, server_error_response, validate_request
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/api_key", methods=["POST", "PUT"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
@validate_request("secret_key", "public_key", "host")
|
||||||
|
def set_api_key():
|
||||||
|
req = request.get_json()
|
||||||
|
secret_key = req.get("secret_key", "")
|
||||||
|
public_key = req.get("public_key", "")
|
||||||
|
host = req.get("host", "")
|
||||||
|
if not all([secret_key, public_key, host]):
|
||||||
|
return get_error_data_result(message="Missing required fields")
|
||||||
|
|
||||||
|
langfuse_keys = dict(
|
||||||
|
tenant_id=current_user.id,
|
||||||
|
secret_key=secret_key,
|
||||||
|
public_key=public_key,
|
||||||
|
host=host,
|
||||||
|
)
|
||||||
|
|
||||||
|
langfuse = Langfuse(public_key=langfuse_keys["public_key"], secret_key=langfuse_keys["secret_key"], host=langfuse_keys["host"])
|
||||||
|
if not langfuse.auth_check():
|
||||||
|
return get_error_data_result(message="Invalid Langfuse keys")
|
||||||
|
|
||||||
|
langfuse_entry = TenantLangfuseService.filter_by_tenant(tenant_id=current_user.id)
|
||||||
|
with DB.atomic():
|
||||||
|
try:
|
||||||
|
if not langfuse_entry:
|
||||||
|
TenantLangfuseService.save(**langfuse_keys)
|
||||||
|
else:
|
||||||
|
TenantLangfuseService.update_by_tenant(tenant_id=current_user.id, langfuse_keys=langfuse_keys)
|
||||||
|
return get_json_result(data=langfuse_keys)
|
||||||
|
except Exception as e:
|
||||||
|
server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/api_key", methods=["GET"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
@validate_request()
|
||||||
|
def get_api_key():
|
||||||
|
langfuse_entry = TenantLangfuseService.filter_by_tenant_with_info(tenant_id=current_user.id)
|
||||||
|
if not langfuse_entry:
|
||||||
|
return get_json_result(message="Have not record any Langfuse keys.")
|
||||||
|
|
||||||
|
langfuse = Langfuse(public_key=langfuse_entry["public_key"], secret_key=langfuse_entry["secret_key"], host=langfuse_entry["host"])
|
||||||
|
try:
|
||||||
|
if not langfuse.auth_check():
|
||||||
|
return get_error_data_result(message="Invalid Langfuse keys loaded")
|
||||||
|
except langfuse.api.core.api_error.ApiError as api_err:
|
||||||
|
return get_json_result(message=f"Error from Langfuse: {api_err}")
|
||||||
|
except Exception as e:
|
||||||
|
server_error_response(e)
|
||||||
|
|
||||||
|
langfuse_entry["project_id"] = langfuse.api.projects.get().dict()["data"][0]["id"]
|
||||||
|
langfuse_entry["project_name"] = langfuse.api.projects.get().dict()["data"][0]["name"]
|
||||||
|
|
||||||
|
return get_json_result(data=langfuse_entry)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/api_key", methods=["DELETE"]) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
@validate_request()
|
||||||
|
def delete_api_key():
|
||||||
|
langfuse_entry = TenantLangfuseService.filter_by_tenant(tenant_id=current_user.id)
|
||||||
|
if not langfuse_entry:
|
||||||
|
return get_json_result(message="Have not record any Langfuse keys.")
|
||||||
|
|
||||||
|
with DB.atomic():
|
||||||
|
try:
|
||||||
|
TenantLangfuseService.delete_model(langfuse_entry)
|
||||||
|
return get_json_result(data=True)
|
||||||
|
except Exception as e:
|
||||||
|
server_error_response(e)
|
||||||
@ -61,6 +61,7 @@ def set_api_key():
|
|||||||
msg = ""
|
msg = ""
|
||||||
for llm in LLMService.query(fid=factory):
|
for llm in LLMService.query(fid=factory):
|
||||||
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
||||||
|
assert factory in EmbeddingModel, f"Embedding model from {factory} is not supported yet."
|
||||||
mdl = EmbeddingModel[factory](
|
mdl = EmbeddingModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
try:
|
try:
|
||||||
@ -71,6 +72,7 @@ def set_api_key():
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
||||||
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
||||||
|
assert factory in ChatModel, f"Chat model from {factory} is not supported yet."
|
||||||
mdl = ChatModel[factory](
|
mdl = ChatModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
try:
|
try:
|
||||||
@ -83,6 +85,7 @@ def set_api_key():
|
|||||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||||
e)
|
e)
|
||||||
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
||||||
|
assert factory in RerankModel, f"Re-rank model from {factory} is not supported yet."
|
||||||
mdl = RerankModel[factory](
|
mdl = RerankModel[factory](
|
||||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||||
try:
|
try:
|
||||||
@ -135,6 +138,8 @@ def set_api_key():
|
|||||||
def add_llm():
|
def add_llm():
|
||||||
req = request.json
|
req = request.json
|
||||||
factory = req["llm_factory"]
|
factory = req["llm_factory"]
|
||||||
|
api_key = req.get("api_key", "x")
|
||||||
|
llm_name = req.get("llm_name")
|
||||||
|
|
||||||
def apikey_json(keys):
|
def apikey_json(keys):
|
||||||
nonlocal req
|
nonlocal req
|
||||||
@ -143,7 +148,6 @@ def add_llm():
|
|||||||
if factory == "VolcEngine":
|
if factory == "VolcEngine":
|
||||||
# For VolcEngine, due to its special authentication method
|
# For VolcEngine, due to its special authentication method
|
||||||
# Assemble ark_api_key endpoint_id into api_key
|
# Assemble ark_api_key endpoint_id into api_key
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["ark_api_key", "endpoint_id"])
|
api_key = apikey_json(["ark_api_key", "endpoint_id"])
|
||||||
|
|
||||||
elif factory == "Tencent Hunyuan":
|
elif factory == "Tencent Hunyuan":
|
||||||
@ -152,52 +156,43 @@ def add_llm():
|
|||||||
|
|
||||||
elif factory == "Tencent Cloud":
|
elif factory == "Tencent Cloud":
|
||||||
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
||||||
|
return set_api_key()
|
||||||
|
|
||||||
elif factory == "Bedrock":
|
elif factory == "Bedrock":
|
||||||
# For Bedrock, due to its special authentication method
|
# For Bedrock, due to its special authentication method
|
||||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
||||||
|
|
||||||
elif factory == "LocalAI":
|
elif factory == "LocalAI":
|
||||||
llm_name = req["llm_name"] + "___LocalAI"
|
llm_name += "___LocalAI"
|
||||||
api_key = "xxxxxxxxxxxxxxx"
|
|
||||||
|
|
||||||
elif factory == "HuggingFace":
|
elif factory == "HuggingFace":
|
||||||
llm_name = req["llm_name"] + "___HuggingFace"
|
llm_name += "___HuggingFace"
|
||||||
api_key = "xxxxxxxxxxxxxxx"
|
|
||||||
|
|
||||||
elif factory == "OpenAI-API-Compatible":
|
elif factory == "OpenAI-API-Compatible":
|
||||||
llm_name = req["llm_name"] + "___OpenAI-API"
|
llm_name += "___OpenAI-API"
|
||||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
|
||||||
|
elif factory == "VLLM":
|
||||||
|
llm_name += "___VLLM"
|
||||||
|
|
||||||
elif factory == "XunFei Spark":
|
elif factory == "XunFei Spark":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
if req["model_type"] == "chat":
|
if req["model_type"] == "chat":
|
||||||
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
api_key = req.get("spark_api_password", "")
|
||||||
elif req["model_type"] == "tts":
|
elif req["model_type"] == "tts":
|
||||||
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
||||||
|
|
||||||
elif factory == "BaiduYiyan":
|
elif factory == "BaiduYiyan":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["yiyan_ak", "yiyan_sk"])
|
api_key = apikey_json(["yiyan_ak", "yiyan_sk"])
|
||||||
|
|
||||||
elif factory == "Fish Audio":
|
elif factory == "Fish Audio":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["fish_audio_ak", "fish_audio_refid"])
|
api_key = apikey_json(["fish_audio_ak", "fish_audio_refid"])
|
||||||
|
|
||||||
elif factory == "Google Cloud":
|
elif factory == "Google Cloud":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
||||||
|
|
||||||
elif factory == "Azure-OpenAI":
|
elif factory == "Azure-OpenAI":
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = apikey_json(["api_key", "api_version"])
|
api_key = apikey_json(["api_key", "api_version"])
|
||||||
|
|
||||||
else:
|
|
||||||
llm_name = req["llm_name"]
|
|
||||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
|
||||||
|
|
||||||
llm = {
|
llm = {
|
||||||
"tenant_id": current_user.id,
|
"tenant_id": current_user.id,
|
||||||
"llm_factory": factory,
|
"llm_factory": factory,
|
||||||
@ -209,66 +204,74 @@ def add_llm():
|
|||||||
}
|
}
|
||||||
|
|
||||||
msg = ""
|
msg = ""
|
||||||
|
mdl_nm = llm["llm_name"].split("___")[0]
|
||||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||||
|
assert factory in EmbeddingModel, f"Embedding model from {factory} is not supported yet."
|
||||||
mdl = EmbeddingModel[factory](
|
mdl = EmbeddingModel[factory](
|
||||||
key=llm['api_key'],
|
key=llm['api_key'],
|
||||||
model_name=llm["llm_name"],
|
model_name=mdl_nm,
|
||||||
base_url=llm["api_base"])
|
base_url=llm["api_base"])
|
||||||
try:
|
try:
|
||||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||||
if len(arr[0]) == 0:
|
if len(arr[0]) == 0:
|
||||||
raise Exception("Fail")
|
raise Exception("Fail")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
msg += f"\nFail to access embedding model({mdl_nm})." + str(e)
|
||||||
elif llm["model_type"] == LLMType.CHAT.value:
|
elif llm["model_type"] == LLMType.CHAT.value:
|
||||||
|
assert factory in ChatModel, f"Chat model from {factory} is not supported yet."
|
||||||
mdl = ChatModel[factory](
|
mdl = ChatModel[factory](
|
||||||
key=llm['api_key'],
|
key=llm['api_key'],
|
||||||
model_name=llm["llm_name"],
|
model_name=mdl_nm,
|
||||||
base_url=llm["api_base"]
|
base_url=llm["api_base"]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||||
"temperature": 0.9})
|
"temperature": 0.9})
|
||||||
if not tc:
|
if not tc and m.find("**ERROR**:") >= 0:
|
||||||
raise Exception(m)
|
raise Exception(m)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||||
e)
|
e)
|
||||||
elif llm["model_type"] == LLMType.RERANK:
|
elif llm["model_type"] == LLMType.RERANK:
|
||||||
mdl = RerankModel[factory](
|
assert factory in RerankModel, f"RE-rank model from {factory} is not supported yet."
|
||||||
key=llm["api_key"],
|
|
||||||
model_name=llm["llm_name"],
|
|
||||||
base_url=llm["api_base"]
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
|
mdl = RerankModel[factory](
|
||||||
|
key=llm["api_key"],
|
||||||
|
model_name=mdl_nm,
|
||||||
|
base_url=llm["api_base"]
|
||||||
|
)
|
||||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||||
if len(arr) == 0:
|
if len(arr) == 0:
|
||||||
raise Exception("Not known.")
|
raise Exception("Not known.")
|
||||||
|
except KeyError:
|
||||||
|
msg += f"{factory} dose not support this model({mdl_nm})"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||||
e)
|
e)
|
||||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||||
|
assert factory in CvModel, f"Image to text model from {factory} is not supported yet."
|
||||||
mdl = CvModel[factory](
|
mdl = CvModel[factory](
|
||||||
key=llm["api_key"],
|
key=llm["api_key"],
|
||||||
model_name=llm["llm_name"],
|
model_name=mdl_nm,
|
||||||
base_url=llm["api_base"]
|
base_url=llm["api_base"]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
with open(os.path.join(get_project_base_directory(), "web/src/assets/yay.jpg"), "rb") as f:
|
with open(os.path.join(get_project_base_directory(), "web/src/assets/yay.jpg"), "rb") as f:
|
||||||
m, tc = mdl.describe(f.read())
|
m, tc = mdl.describe(f.read())
|
||||||
if not tc:
|
if not m and not tc:
|
||||||
raise Exception(m)
|
raise Exception(m)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||||
elif llm["model_type"] == LLMType.TTS:
|
elif llm["model_type"] == LLMType.TTS:
|
||||||
|
assert factory in TTSModel, f"TTS model from {factory} is not supported yet."
|
||||||
mdl = TTSModel[factory](
|
mdl = TTSModel[factory](
|
||||||
key=llm["api_key"], model_name=llm["llm_name"], base_url=llm["api_base"]
|
key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
for resp in mdl.tts("Hello~ Ragflower!"):
|
for resp in mdl.tts("Hello~ Ragflower!"):
|
||||||
pass
|
pass
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||||
else:
|
else:
|
||||||
# TODO: check other type of models
|
# TODO: check other type of models
|
||||||
pass
|
pass
|
||||||
@ -343,8 +346,6 @@ def list_app():
|
|||||||
|
|
||||||
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||||
for o in objs:
|
for o in objs:
|
||||||
if not o.api_key:
|
|
||||||
continue
|
|
||||||
if o.llm_name + "@" + o.llm_factory in llm_set:
|
if o.llm_name + "@" + o.llm_factory in llm_set:
|
||||||
continue
|
continue
|
||||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||||
@ -359,4 +360,4 @@ def list_app():
|
|||||||
|
|
||||||
return get_json_result(data=res)
|
return get_json_result(data=res)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|||||||
@ -13,6 +13,8 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import logging
|
||||||
|
|
||||||
from flask import request
|
from flask import request
|
||||||
from api import settings
|
from api import settings
|
||||||
from api.db import StatusEnum
|
from api.db import StatusEnum
|
||||||
@ -21,17 +23,15 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
|||||||
from api.db.services.llm_service import TenantLLMService
|
from api.db.services.llm_service import TenantLLMService
|
||||||
from api.db.services.user_service import TenantService
|
from api.db.services.user_service import TenantService
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_error_data_result, token_required
|
from api.utils.api_utils import get_error_data_result, token_required, get_result, check_duplicate_ids
|
||||||
from api.utils.api_utils import get_result
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chats', methods=['POST']) # noqa: F821
|
@manager.route('/chats', methods=['POST']) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def create(tenant_id):
|
def create(tenant_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
ids = req.get("dataset_ids")
|
ids = [i for i in req.get("dataset_ids", []) if i]
|
||||||
if not ids:
|
|
||||||
return get_error_data_result(message="`dataset_ids` is required")
|
|
||||||
for kb_id in ids:
|
for kb_id in ids:
|
||||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
@ -40,9 +40,11 @@ def create(tenant_id):
|
|||||||
kb = kbs[0]
|
kb = kbs[0]
|
||||||
if kb.chunk_num == 0:
|
if kb.chunk_num == 0:
|
||||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
|
||||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
kbs = KnowledgebaseService.get_by_ids(ids) if ids else []
|
||||||
if len(embd_count) != 1:
|
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||||
|
embd_count = list(set(embd_ids))
|
||||||
|
if len(embd_count) > 1:
|
||||||
return get_result(message='Datasets use different embedding models."',
|
return get_result(message='Datasets use different embedding models."',
|
||||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
req["kb_ids"] = ids
|
req["kb_ids"] = ids
|
||||||
@ -175,8 +177,10 @@ def update(tenant_id, chat_id):
|
|||||||
kb = kbs[0]
|
kb = kbs[0]
|
||||||
if kb.chunk_num == 0:
|
if kb.chunk_num == 0:
|
||||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||||
|
|
||||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||||
|
embd_count = list(set(embd_ids))
|
||||||
if len(embd_count) != 1:
|
if len(embd_count) != 1:
|
||||||
return get_result(
|
return get_result(
|
||||||
message='Datasets use different embedding models."',
|
message='Datasets use different embedding models."',
|
||||||
@ -219,11 +223,11 @@ def update(tenant_id, chat_id):
|
|||||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||||
if "name" in req:
|
if "name" in req:
|
||||||
if not req.get("name"):
|
if not req.get("name"):
|
||||||
return get_error_data_result(message="`name` is not empty.")
|
return get_error_data_result(message="`name` cannot be empty.")
|
||||||
if req["name"].lower() != res["name"].lower() \
|
if req["name"].lower() != res["name"].lower() \
|
||||||
and len(
|
and len(
|
||||||
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||||
return get_error_data_result(message="Duplicated chat name in updating dataset.")
|
return get_error_data_result(message="Duplicated chat name in updating chat.")
|
||||||
if "prompt_config" in req:
|
if "prompt_config" in req:
|
||||||
res["prompt_config"].update(req["prompt_config"])
|
res["prompt_config"].update(req["prompt_config"])
|
||||||
for p in res["prompt_config"]["parameters"]:
|
for p in res["prompt_config"]["parameters"]:
|
||||||
@ -248,6 +252,8 @@ def update(tenant_id, chat_id):
|
|||||||
@manager.route('/chats', methods=['DELETE']) # noqa: F821
|
@manager.route('/chats', methods=['DELETE']) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def delete(tenant_id):
|
def delete(tenant_id):
|
||||||
|
errors = []
|
||||||
|
success_count = 0
|
||||||
req = request.json
|
req = request.json
|
||||||
if not req:
|
if not req:
|
||||||
ids = None
|
ids = None
|
||||||
@ -260,14 +266,39 @@ def delete(tenant_id):
|
|||||||
id_list.append(dia.id)
|
id_list.append(dia.id)
|
||||||
else:
|
else:
|
||||||
id_list = ids
|
id_list = ids
|
||||||
for id in id_list:
|
|
||||||
|
unique_id_list, duplicate_messages = check_duplicate_ids(id_list, "assistant")
|
||||||
|
|
||||||
|
for id in unique_id_list:
|
||||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||||
return get_error_data_result(message=f"You don't own the chat {id}")
|
errors.append(f"Assistant({id}) not found.")
|
||||||
|
continue
|
||||||
temp_dict = {"status": StatusEnum.INVALID.value}
|
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||||
DialogService.update_by_id(id, temp_dict)
|
DialogService.update_by_id(id, temp_dict)
|
||||||
|
success_count += 1
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(
|
||||||
|
data={"success_count": success_count, "errors": errors},
|
||||||
|
message=f"Partially deleted {success_count} chats with {len(errors)} errors"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message="; ".join(errors))
|
||||||
|
|
||||||
|
if duplicate_messages:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(
|
||||||
|
message=f"Partially deleted {success_count} chats with {len(duplicate_messages)} errors",
|
||||||
|
data={"success_count": success_count, "errors": duplicate_messages}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message=";".join(duplicate_messages))
|
||||||
|
|
||||||
return get_result()
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chats', methods=['GET']) # noqa: F821
|
@manager.route('/chats', methods=['GET']) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def list_chat(tenant_id):
|
def list_chat(tenant_id):
|
||||||
@ -316,7 +347,8 @@ def list_chat(tenant_id):
|
|||||||
for kb_id in res["kb_ids"]:
|
for kb_id in res["kb_ids"]:
|
||||||
kb = KnowledgebaseService.query(id=kb_id)
|
kb = KnowledgebaseService.query(id=kb_id)
|
||||||
if not kb:
|
if not kb:
|
||||||
return get_error_data_result(message=f"Don't exist the kb {kb_id}")
|
logging.warning(f"The kb {kb_id} does not exist.")
|
||||||
|
continue
|
||||||
kb_list.append(kb[0].to_json())
|
kb_list.append(kb[0].to_json())
|
||||||
del res["kb_ids"]
|
del res["kb_ids"]
|
||||||
res["datasets"] = kb_list
|
res["datasets"] = kb_list
|
||||||
|
|||||||
@ -30,7 +30,7 @@ from api.utils.api_utils import (
|
|||||||
token_required,
|
token_required,
|
||||||
get_error_data_result,
|
get_error_data_result,
|
||||||
valid,
|
valid,
|
||||||
get_parser_config,
|
get_parser_config, valid_parser_config, dataset_readonly_fields,check_duplicate_ids
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -66,14 +66,10 @@ def create(tenant_id):
|
|||||||
type: string
|
type: string
|
||||||
enum: ['me', 'team']
|
enum: ['me', 'team']
|
||||||
description: Dataset permission.
|
description: Dataset permission.
|
||||||
language:
|
|
||||||
type: string
|
|
||||||
enum: ['Chinese', 'English']
|
|
||||||
description: Language of the dataset.
|
|
||||||
chunk_method:
|
chunk_method:
|
||||||
type: string
|
type: string
|
||||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||||
"presentation", "picture", "one", "knowledge_graph", "email", "tag"
|
"presentation", "picture", "one", "email", "tag"
|
||||||
]
|
]
|
||||||
description: Chunking method.
|
description: Chunking method.
|
||||||
parser_config:
|
parser_config:
|
||||||
@ -89,13 +85,15 @@ def create(tenant_id):
|
|||||||
type: object
|
type: object
|
||||||
"""
|
"""
|
||||||
req = request.json
|
req = request.json
|
||||||
|
for k in req.keys():
|
||||||
|
if dataset_readonly_fields(k):
|
||||||
|
return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"'{k}' is readonly.")
|
||||||
e, t = TenantService.get_by_id(tenant_id)
|
e, t = TenantService.get_by_id(tenant_id)
|
||||||
permission = req.get("permission")
|
permission = req.get("permission")
|
||||||
language = req.get("language")
|
|
||||||
chunk_method = req.get("chunk_method")
|
chunk_method = req.get("chunk_method")
|
||||||
parser_config = req.get("parser_config")
|
parser_config = req.get("parser_config")
|
||||||
|
valid_parser_config(parser_config)
|
||||||
valid_permission = ["me", "team"]
|
valid_permission = ["me", "team"]
|
||||||
valid_language = ["Chinese", "English"]
|
|
||||||
valid_chunk_method = [
|
valid_chunk_method = [
|
||||||
"naive",
|
"naive",
|
||||||
"manual",
|
"manual",
|
||||||
@ -107,15 +105,12 @@ def create(tenant_id):
|
|||||||
"presentation",
|
"presentation",
|
||||||
"picture",
|
"picture",
|
||||||
"one",
|
"one",
|
||||||
"knowledge_graph",
|
|
||||||
"email",
|
"email",
|
||||||
"tag"
|
"tag"
|
||||||
]
|
]
|
||||||
check_validation = valid(
|
check_validation = valid(
|
||||||
permission,
|
permission,
|
||||||
valid_permission,
|
valid_permission,
|
||||||
language,
|
|
||||||
valid_language,
|
|
||||||
chunk_method,
|
chunk_method,
|
||||||
valid_chunk_method,
|
valid_chunk_method,
|
||||||
)
|
)
|
||||||
@ -134,28 +129,23 @@ def create(tenant_id):
|
|||||||
req["name"] = req["name"].strip()
|
req["name"] = req["name"].strip()
|
||||||
if req["name"] == "":
|
if req["name"] == "":
|
||||||
return get_error_data_result(message="`name` is not empty string!")
|
return get_error_data_result(message="`name` is not empty string!")
|
||||||
|
if len(req["name"]) >= 128:
|
||||||
|
return get_error_data_result(
|
||||||
|
message="Dataset name should not be longer than 128 characters."
|
||||||
|
)
|
||||||
if KnowledgebaseService.query(
|
if KnowledgebaseService.query(
|
||||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||||
):
|
):
|
||||||
return get_error_data_result(
|
return get_error_data_result(
|
||||||
message="Duplicated dataset name in creating dataset."
|
message="Duplicated dataset name in creating dataset."
|
||||||
)
|
)
|
||||||
req["tenant_id"] = req["created_by"] = tenant_id
|
req["tenant_id"] = tenant_id
|
||||||
|
req["created_by"] = tenant_id
|
||||||
if not req.get("embedding_model"):
|
if not req.get("embedding_model"):
|
||||||
req["embedding_model"] = t.embd_id
|
req["embedding_model"] = t.embd_id
|
||||||
else:
|
else:
|
||||||
valid_embedding_models = [
|
valid_embedding_models = [
|
||||||
"BAAI/bge-large-zh-v1.5",
|
"BAAI/bge-large-zh-v1.5",
|
||||||
"BAAI/bge-base-en-v1.5",
|
|
||||||
"BAAI/bge-large-en-v1.5",
|
|
||||||
"BAAI/bge-small-en-v1.5",
|
|
||||||
"BAAI/bge-small-zh-v1.5",
|
|
||||||
"jinaai/jina-embeddings-v2-base-en",
|
|
||||||
"jinaai/jina-embeddings-v2-small-en",
|
|
||||||
"nomic-ai/nomic-embed-text-v1.5",
|
|
||||||
"sentence-transformers/all-MiniLM-L6-v2",
|
|
||||||
"text-embedding-v2",
|
|
||||||
"text-embedding-v3",
|
|
||||||
"maidalun1020/bce-embedding-base_v1",
|
"maidalun1020/bce-embedding-base_v1",
|
||||||
]
|
]
|
||||||
embd_model = LLMService.query(
|
embd_model = LLMService.query(
|
||||||
@ -182,6 +172,10 @@ def create(tenant_id):
|
|||||||
if old_key in req
|
if old_key in req
|
||||||
}
|
}
|
||||||
req.update(mapped_keys)
|
req.update(mapped_keys)
|
||||||
|
flds = list(req.keys())
|
||||||
|
for f in flds:
|
||||||
|
if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
|
||||||
|
del req[f]
|
||||||
if not KnowledgebaseService.save(**req):
|
if not KnowledgebaseService.save(**req):
|
||||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||||
renamed_data = {}
|
renamed_data = {}
|
||||||
@ -226,6 +220,8 @@ def delete(tenant_id):
|
|||||||
schema:
|
schema:
|
||||||
type: object
|
type: object
|
||||||
"""
|
"""
|
||||||
|
errors = []
|
||||||
|
success_count = 0
|
||||||
req = request.json
|
req = request.json
|
||||||
if not req:
|
if not req:
|
||||||
ids = None
|
ids = None
|
||||||
@ -238,15 +234,18 @@ def delete(tenant_id):
|
|||||||
id_list.append(kb.id)
|
id_list.append(kb.id)
|
||||||
else:
|
else:
|
||||||
id_list = ids
|
id_list = ids
|
||||||
|
unique_id_list, duplicate_messages = check_duplicate_ids(id_list, "dataset")
|
||||||
|
id_list = unique_id_list
|
||||||
|
|
||||||
for id in id_list:
|
for id in id_list:
|
||||||
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
return get_error_data_result(message=f"You don't own the dataset {id}")
|
errors.append(f"You don't own the dataset {id}")
|
||||||
|
continue
|
||||||
for doc in DocumentService.query(kb_id=id):
|
for doc in DocumentService.query(kb_id=id):
|
||||||
if not DocumentService.remove_document(doc, tenant_id):
|
if not DocumentService.remove_document(doc, tenant_id):
|
||||||
return get_error_data_result(
|
errors.append(f"Remove document error for dataset {id}")
|
||||||
message="Remove document error.(Database error)"
|
continue
|
||||||
)
|
|
||||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||||
FileService.filter_delete(
|
FileService.filter_delete(
|
||||||
[
|
[
|
||||||
@ -258,11 +257,26 @@ def delete(tenant_id):
|
|||||||
FileService.filter_delete(
|
FileService.filter_delete(
|
||||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||||
if not KnowledgebaseService.delete_by_id(id):
|
if not KnowledgebaseService.delete_by_id(id):
|
||||||
return get_error_data_result(message="Delete dataset error.(Database error)")
|
errors.append(f"Delete dataset error for {id}")
|
||||||
|
continue
|
||||||
|
success_count += 1
|
||||||
|
if errors:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(
|
||||||
|
data={"success_count": success_count, "errors": errors},
|
||||||
|
message=f"Partially deleted {success_count} datasets with {len(errors)} errors"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message="; ".join(errors))
|
||||||
|
if duplicate_messages:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(message=f"Partially deleted {success_count} datasets with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message=";".join(duplicate_messages))
|
||||||
return get_result(code=settings.RetCode.SUCCESS)
|
return get_result(code=settings.RetCode.SUCCESS)
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def update(tenant_id, dataset_id):
|
def update(tenant_id, dataset_id):
|
||||||
"""
|
"""
|
||||||
@ -297,14 +311,10 @@ def update(tenant_id, dataset_id):
|
|||||||
type: string
|
type: string
|
||||||
enum: ['me', 'team']
|
enum: ['me', 'team']
|
||||||
description: Updated permission.
|
description: Updated permission.
|
||||||
language:
|
|
||||||
type: string
|
|
||||||
enum: ['Chinese', 'English']
|
|
||||||
description: Updated language.
|
|
||||||
chunk_method:
|
chunk_method:
|
||||||
type: string
|
type: string
|
||||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||||
"presentation", "picture", "one", "knowledge_graph", "email", "tag"
|
"presentation", "picture", "one", "email", "tag"
|
||||||
]
|
]
|
||||||
description: Updated chunking method.
|
description: Updated chunking method.
|
||||||
parser_config:
|
parser_config:
|
||||||
@ -319,16 +329,18 @@ def update(tenant_id, dataset_id):
|
|||||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||||
return get_error_data_result(message="You don't own the dataset")
|
return get_error_data_result(message="You don't own the dataset")
|
||||||
req = request.json
|
req = request.json
|
||||||
|
for k in req.keys():
|
||||||
|
if dataset_readonly_fields(k):
|
||||||
|
return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"'{k}' is readonly.")
|
||||||
e, t = TenantService.get_by_id(tenant_id)
|
e, t = TenantService.get_by_id(tenant_id)
|
||||||
invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id"}
|
invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id", "create_date", "create_time", "created_by", "status","token_num","update_date","update_time"}
|
||||||
if any(key in req for key in invalid_keys):
|
if any(key in req for key in invalid_keys):
|
||||||
return get_error_data_result(message="The input parameters are invalid.")
|
return get_error_data_result(message="The input parameters are invalid.")
|
||||||
permission = req.get("permission")
|
permission = req.get("permission")
|
||||||
language = req.get("language")
|
|
||||||
chunk_method = req.get("chunk_method")
|
chunk_method = req.get("chunk_method")
|
||||||
parser_config = req.get("parser_config")
|
parser_config = req.get("parser_config")
|
||||||
|
valid_parser_config(parser_config)
|
||||||
valid_permission = ["me", "team"]
|
valid_permission = ["me", "team"]
|
||||||
valid_language = ["Chinese", "English"]
|
|
||||||
valid_chunk_method = [
|
valid_chunk_method = [
|
||||||
"naive",
|
"naive",
|
||||||
"manual",
|
"manual",
|
||||||
@ -340,15 +352,12 @@ def update(tenant_id, dataset_id):
|
|||||||
"presentation",
|
"presentation",
|
||||||
"picture",
|
"picture",
|
||||||
"one",
|
"one",
|
||||||
"knowledge_graph",
|
|
||||||
"email",
|
"email",
|
||||||
"tag"
|
"tag"
|
||||||
]
|
]
|
||||||
check_validation = valid(
|
check_validation = valid(
|
||||||
permission,
|
permission,
|
||||||
valid_permission,
|
valid_permission,
|
||||||
language,
|
|
||||||
valid_language,
|
|
||||||
chunk_method,
|
chunk_method,
|
||||||
valid_chunk_method,
|
valid_chunk_method,
|
||||||
)
|
)
|
||||||
@ -370,7 +379,7 @@ def update(tenant_id, dataset_id):
|
|||||||
if req["document_count"] != kb.doc_num:
|
if req["document_count"] != kb.doc_num:
|
||||||
return get_error_data_result(message="Can't change `document_count`.")
|
return get_error_data_result(message="Can't change `document_count`.")
|
||||||
req.pop("document_count")
|
req.pop("document_count")
|
||||||
if "chunk_method" in req:
|
if req.get("chunk_method"):
|
||||||
if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
|
if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
|
||||||
return get_error_data_result(
|
return get_error_data_result(
|
||||||
message="If `chunk_count` is not 0, `chunk_method` is not changeable."
|
message="If `chunk_count` is not 0, `chunk_method` is not changeable."
|
||||||
@ -416,6 +425,10 @@ def update(tenant_id, dataset_id):
|
|||||||
req["embd_id"] = req.pop("embedding_model")
|
req["embd_id"] = req.pop("embedding_model")
|
||||||
if "name" in req:
|
if "name" in req:
|
||||||
req["name"] = req["name"].strip()
|
req["name"] = req["name"].strip()
|
||||||
|
if len(req["name"]) >= 128:
|
||||||
|
return get_error_data_result(
|
||||||
|
message="Dataset name should not be longer than 128 characters."
|
||||||
|
)
|
||||||
if (
|
if (
|
||||||
req["name"].lower() != kb.name.lower()
|
req["name"].lower() != kb.name.lower()
|
||||||
and len(
|
and len(
|
||||||
@ -428,6 +441,10 @@ def update(tenant_id, dataset_id):
|
|||||||
return get_error_data_result(
|
return get_error_data_result(
|
||||||
message="Duplicated dataset name in updating dataset."
|
message="Duplicated dataset name in updating dataset."
|
||||||
)
|
)
|
||||||
|
flds = list(req.keys())
|
||||||
|
for f in flds:
|
||||||
|
if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
|
||||||
|
del req[f]
|
||||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||||
return get_error_data_result(message="Update dataset error.(Database error)")
|
return get_error_data_result(message="Update dataset error.(Database error)")
|
||||||
return get_result(code=settings.RetCode.SUCCESS)
|
return get_result(code=settings.RetCode.SUCCESS)
|
||||||
@ -435,7 +452,7 @@ def update(tenant_id, dataset_id):
|
|||||||
|
|
||||||
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def list(tenant_id):
|
def list_datasets(tenant_id):
|
||||||
"""
|
"""
|
||||||
List datasets.
|
List datasets.
|
||||||
---
|
---
|
||||||
@ -504,7 +521,9 @@ def list(tenant_id):
|
|||||||
page_number = int(request.args.get("page", 1))
|
page_number = int(request.args.get("page", 1))
|
||||||
items_per_page = int(request.args.get("page_size", 30))
|
items_per_page = int(request.args.get("page_size", 30))
|
||||||
orderby = request.args.get("orderby", "create_time")
|
orderby = request.args.get("orderby", "create_time")
|
||||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
if request.args.get("desc", "false").lower() not in ["true", "false"]:
|
||||||
|
return get_error_data_result("desc should be true or false")
|
||||||
|
if request.args.get("desc", "true").lower() == "false":
|
||||||
desc = False
|
desc = False
|
||||||
else:
|
else:
|
||||||
desc = True
|
desc = True
|
||||||
|
|||||||
@ -16,11 +16,11 @@
|
|||||||
from flask import request, jsonify
|
from flask import request, jsonify
|
||||||
|
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.services.dialog_service import label_question
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api import settings
|
from api import settings
|
||||||
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||||
|
from rag.app.tag import label_question
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
||||||
|
|||||||
@ -16,7 +16,6 @@
|
|||||||
import pathlib
|
import pathlib
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
from api.db.services.dialog_service import keyword_extraction, label_question
|
|
||||||
from rag.app.qa import rmPrefix, beAdoc
|
from rag.app.qa import rmPrefix, beAdoc
|
||||||
from rag.nlp import rag_tokenizer
|
from rag.nlp import rag_tokenizer
|
||||||
from api.db import LLMType, ParserType
|
from api.db import LLMType, ParserType
|
||||||
@ -37,8 +36,10 @@ from api.db.services.document_service import DocumentService
|
|||||||
from api.db.services.file2document_service import File2DocumentService
|
from api.db.services.file2document_service import File2DocumentService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.utils.api_utils import construct_json_result, get_parser_config
|
from api.utils.api_utils import construct_json_result, get_parser_config, check_duplicate_ids
|
||||||
from rag.nlp import search
|
from rag.nlp import search
|
||||||
|
from rag.prompts import keyword_extraction
|
||||||
|
from rag.app.tag import label_question
|
||||||
from rag.utils import rmSpace
|
from rag.utils import rmSpace
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
@ -66,6 +67,7 @@ class Chunk(BaseModel):
|
|||||||
raise ValueError("Each sublist in positions must have a length of 5")
|
raise ValueError("Each sublist in positions must have a length of 5")
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/datasets/<dataset_id>/documents", methods=["POST"]) # noqa: F821
|
@manager.route("/datasets/<dataset_id>/documents", methods=["POST"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def upload(dataset_id, tenant_id):
|
def upload(dataset_id, tenant_id):
|
||||||
@ -135,6 +137,10 @@ def upload(dataset_id, tenant_id):
|
|||||||
return get_result(
|
return get_result(
|
||||||
message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR
|
message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR
|
||||||
)
|
)
|
||||||
|
if len(file_obj.filename.encode("utf-8")) >= 128:
|
||||||
|
return get_result(
|
||||||
|
message="File name should be less than 128 bytes.", code=settings.RetCode.ARGUMENT_ERROR
|
||||||
|
)
|
||||||
'''
|
'''
|
||||||
# total size
|
# total size
|
||||||
total_size = 0
|
total_size = 0
|
||||||
@ -239,7 +245,17 @@ def update_doc(tenant_id, dataset_id, document_id):
|
|||||||
if req["progress"] != doc.progress:
|
if req["progress"] != doc.progress:
|
||||||
return get_error_data_result(message="Can't change `progress`.")
|
return get_error_data_result(message="Can't change `progress`.")
|
||||||
|
|
||||||
|
if "meta_fields" in req:
|
||||||
|
if not isinstance(req["meta_fields"], dict):
|
||||||
|
return get_error_data_result(message="meta_fields must be a dictionary")
|
||||||
|
DocumentService.update_meta_fields(document_id, req["meta_fields"])
|
||||||
|
|
||||||
if "name" in req and req["name"] != doc.name:
|
if "name" in req and req["name"] != doc.name:
|
||||||
|
if len(req["name"].encode("utf-8")) >= 128:
|
||||||
|
return get_result(
|
||||||
|
message="The name should be less than 128 bytes.",
|
||||||
|
code=settings.RetCode.ARGUMENT_ERROR,
|
||||||
|
)
|
||||||
if (
|
if (
|
||||||
pathlib.Path(req["name"].lower()).suffix
|
pathlib.Path(req["name"].lower()).suffix
|
||||||
!= pathlib.Path(doc.name.lower()).suffix
|
!= pathlib.Path(doc.name.lower()).suffix
|
||||||
@ -260,6 +276,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
|||||||
if informs:
|
if informs:
|
||||||
e, file = FileService.get_by_id(informs[0].file_id)
|
e, file = FileService.get_by_id(informs[0].file_id)
|
||||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||||
|
|
||||||
if "parser_config" in req:
|
if "parser_config" in req:
|
||||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||||
if "chunk_method" in req:
|
if "chunk_method" in req:
|
||||||
@ -356,6 +373,10 @@ def download(tenant_id, dataset_id, document_id):
|
|||||||
schema:
|
schema:
|
||||||
type: object
|
type: object
|
||||||
"""
|
"""
|
||||||
|
if not document_id:
|
||||||
|
return get_error_data_result(
|
||||||
|
message="Specify document_id please."
|
||||||
|
)
|
||||||
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
|
||||||
return get_error_data_result(message=f"You do not own the dataset {dataset_id}.")
|
return get_error_data_result(message=f"You do not own the dataset {dataset_id}.")
|
||||||
doc = DocumentService.query(kb_id=dataset_id, id=document_id)
|
doc = DocumentService.query(kb_id=dataset_id, id=document_id)
|
||||||
@ -472,10 +493,12 @@ def list_docs(dataset_id, tenant_id):
|
|||||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
|
return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
|
||||||
id = request.args.get("id")
|
id = request.args.get("id")
|
||||||
name = request.args.get("name")
|
name = request.args.get("name")
|
||||||
if not DocumentService.query(id=id, kb_id=dataset_id):
|
|
||||||
|
if id and not DocumentService.query(id=id, kb_id=dataset_id):
|
||||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||||
if not DocumentService.query(name=name, kb_id=dataset_id):
|
if name and not DocumentService.query(name=name, kb_id=dataset_id):
|
||||||
return get_error_data_result(message=f"You don't own the document {name}.")
|
return get_error_data_result(message=f"You don't own the document {name}.")
|
||||||
|
|
||||||
page = int(request.args.get("page", 1))
|
page = int(request.args.get("page", 1))
|
||||||
keywords = request.args.get("keywords", "")
|
keywords = request.args.get("keywords", "")
|
||||||
page_size = int(request.args.get("page_size", 30))
|
page_size = int(request.args.get("page_size", 30))
|
||||||
@ -569,15 +592,22 @@ def delete(tenant_id, dataset_id):
|
|||||||
doc_list.append(doc.id)
|
doc_list.append(doc.id)
|
||||||
else:
|
else:
|
||||||
doc_list = doc_ids
|
doc_list = doc_ids
|
||||||
|
|
||||||
|
unique_doc_ids, duplicate_messages = check_duplicate_ids(doc_list, "document")
|
||||||
|
doc_list = unique_doc_ids
|
||||||
|
|
||||||
root_folder = FileService.get_root_folder(tenant_id)
|
root_folder = FileService.get_root_folder(tenant_id)
|
||||||
pf_id = root_folder["id"]
|
pf_id = root_folder["id"]
|
||||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||||
errors = ""
|
errors = ""
|
||||||
|
not_found = []
|
||||||
|
success_count = 0
|
||||||
for doc_id in doc_list:
|
for doc_id in doc_list:
|
||||||
try:
|
try:
|
||||||
e, doc = DocumentService.get_by_id(doc_id)
|
e, doc = DocumentService.get_by_id(doc_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_error_data_result(message="Document not found!")
|
not_found.append(doc_id)
|
||||||
|
continue
|
||||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||||
if not tenant_id:
|
if not tenant_id:
|
||||||
return get_error_data_result(message="Tenant not found!")
|
return get_error_data_result(message="Tenant not found!")
|
||||||
@ -599,12 +629,22 @@ def delete(tenant_id, dataset_id):
|
|||||||
File2DocumentService.delete_by_document_id(doc_id)
|
File2DocumentService.delete_by_document_id(doc_id)
|
||||||
|
|
||||||
STORAGE_IMPL.rm(b, n)
|
STORAGE_IMPL.rm(b, n)
|
||||||
|
success_count += 1
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
errors += str(e)
|
errors += str(e)
|
||||||
|
|
||||||
|
if not_found:
|
||||||
|
return get_result(message=f"Documents not found: {not_found}", code=settings.RetCode.DATA_ERROR)
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
return get_result(message=errors, code=settings.RetCode.SERVER_ERROR)
|
return get_result(message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||||
|
|
||||||
|
if duplicate_messages:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(message=f"Partially deleted {success_count} datasets with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message=";".join(duplicate_messages))
|
||||||
|
|
||||||
return get_result()
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
@ -652,18 +692,24 @@ def parse(tenant_id, dataset_id):
|
|||||||
req = request.json
|
req = request.json
|
||||||
if not req.get("document_ids"):
|
if not req.get("document_ids"):
|
||||||
return get_error_data_result("`document_ids` is required")
|
return get_error_data_result("`document_ids` is required")
|
||||||
for id in req["document_ids"]:
|
doc_list = req.get("document_ids")
|
||||||
|
unique_doc_ids, duplicate_messages = check_duplicate_ids(doc_list, "document")
|
||||||
|
doc_list = unique_doc_ids
|
||||||
|
|
||||||
|
not_found = []
|
||||||
|
success_count = 0
|
||||||
|
for id in doc_list:
|
||||||
doc = DocumentService.query(id=id, kb_id=dataset_id)
|
doc = DocumentService.query(id=id, kb_id=dataset_id)
|
||||||
|
if not doc:
|
||||||
|
not_found.append(id)
|
||||||
|
continue
|
||||||
if not doc:
|
if not doc:
|
||||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||||
if doc[0].progress != 0.0:
|
if 0.0 < doc[0].progress < 1.0:
|
||||||
return get_error_data_result(
|
return get_error_data_result(
|
||||||
"Can't stop parsing document with progress at 0 or 100"
|
"Can't parse document that is currently being processed"
|
||||||
)
|
)
|
||||||
info = {"run": "1", "progress": 0}
|
info = {"run": "1", "progress": 0, "progress_msg": "", "chunk_num": 0, "token_num": 0}
|
||||||
info["progress_msg"] = ""
|
|
||||||
info["chunk_num"] = 0
|
|
||||||
info["token_num"] = 0
|
|
||||||
DocumentService.update_by_id(id, info)
|
DocumentService.update_by_id(id, info)
|
||||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), dataset_id)
|
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), dataset_id)
|
||||||
TaskService.filter_delete([Task.doc_id == id])
|
TaskService.filter_delete([Task.doc_id == id])
|
||||||
@ -671,7 +717,16 @@ def parse(tenant_id, dataset_id):
|
|||||||
doc = doc.to_dict()
|
doc = doc.to_dict()
|
||||||
doc["tenant_id"] = tenant_id
|
doc["tenant_id"] = tenant_id
|
||||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||||
queue_tasks(doc, bucket, name)
|
queue_tasks(doc, bucket, name, 0)
|
||||||
|
success_count += 1
|
||||||
|
if not_found:
|
||||||
|
return get_result(message=f"Documents not found: {not_found}", code=settings.RetCode.DATA_ERROR)
|
||||||
|
if duplicate_messages:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(message=f"Partially parsed {success_count} documents with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message=";".join(duplicate_messages))
|
||||||
|
|
||||||
return get_result()
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
@ -717,9 +772,15 @@ def stop_parsing(tenant_id, dataset_id):
|
|||||||
if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
|
if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
|
||||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
||||||
req = request.json
|
req = request.json
|
||||||
|
|
||||||
if not req.get("document_ids"):
|
if not req.get("document_ids"):
|
||||||
return get_error_data_result("`document_ids` is required")
|
return get_error_data_result("`document_ids` is required")
|
||||||
for id in req["document_ids"]:
|
doc_list = req.get("document_ids")
|
||||||
|
unique_doc_ids, duplicate_messages = check_duplicate_ids(doc_list, "document")
|
||||||
|
doc_list = unique_doc_ids
|
||||||
|
|
||||||
|
success_count = 0
|
||||||
|
for id in doc_list:
|
||||||
doc = DocumentService.query(id=id, kb_id=dataset_id)
|
doc = DocumentService.query(id=id, kb_id=dataset_id)
|
||||||
if not doc:
|
if not doc:
|
||||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||||
@ -729,7 +790,13 @@ def stop_parsing(tenant_id, dataset_id):
|
|||||||
)
|
)
|
||||||
info = {"run": "2", "progress": 0, "chunk_num": 0}
|
info = {"run": "2", "progress": 0, "chunk_num": 0}
|
||||||
DocumentService.update_by_id(id, info)
|
DocumentService.update_by_id(id, info)
|
||||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
|
settings.docStoreConn.delete({"doc_id": doc[0].id}, search.index_name(tenant_id), dataset_id)
|
||||||
|
success_count += 1
|
||||||
|
if duplicate_messages:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(message=f"Partially stopped {success_count} documents with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message=";".join(duplicate_messages))
|
||||||
return get_result()
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
@ -850,6 +917,8 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
|||||||
res = {"total": 0, "chunks": [], "doc": renamed_doc}
|
res = {"total": 0, "chunks": [], "doc": renamed_doc}
|
||||||
if req.get("id"):
|
if req.get("id"):
|
||||||
chunk = settings.docStoreConn.get(req.get("id"), search.index_name(tenant_id), [dataset_id])
|
chunk = settings.docStoreConn.get(req.get("id"), search.index_name(tenant_id), [dataset_id])
|
||||||
|
if not chunk:
|
||||||
|
return get_result(message=f"Chunk not found: {dataset_id}/{req.get('id')}", code=settings.RetCode.NOT_FOUND)
|
||||||
k = []
|
k = []
|
||||||
for n in chunk.keys():
|
for n in chunk.keys():
|
||||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||||
@ -867,7 +936,7 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
|||||||
"important_keywords":chunk.get("important_kwd",[]),
|
"important_keywords":chunk.get("important_kwd",[]),
|
||||||
"questions":chunk.get("question_kwd",[]),
|
"questions":chunk.get("question_kwd",[]),
|
||||||
"dataset_id":chunk.get("kb_id",chunk.get("dataset_id")),
|
"dataset_id":chunk.get("kb_id",chunk.get("dataset_id")),
|
||||||
"image_id":chunk["img_id"],
|
"image_id":chunk.get("img_id", ""),
|
||||||
"available":bool(chunk.get("available_int",1)),
|
"available":bool(chunk.get("available_int",1)),
|
||||||
"positions":chunk.get("position_int",[]),
|
"positions":chunk.get("position_int",[]),
|
||||||
}
|
}
|
||||||
@ -892,7 +961,7 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
|||||||
"questions": sres.field[id].get("question_kwd", []),
|
"questions": sres.field[id].get("question_kwd", []),
|
||||||
"dataset_id": sres.field[id].get("kb_id", sres.field[id].get("dataset_id")),
|
"dataset_id": sres.field[id].get("kb_id", sres.field[id].get("dataset_id")),
|
||||||
"image_id": sres.field[id].get("img_id", ""),
|
"image_id": sres.field[id].get("img_id", ""),
|
||||||
"available": bool(sres.field[id].get("available_int", 1)),
|
"available": bool(int(sres.field[id].get("available_int", "1"))),
|
||||||
"positions": sres.field[id].get("position_int",[]),
|
"positions": sres.field[id].get("position_int",[]),
|
||||||
}
|
}
|
||||||
res["chunks"].append(d)
|
res["chunks"].append(d)
|
||||||
@ -977,7 +1046,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
|||||||
)
|
)
|
||||||
doc = doc[0]
|
doc = doc[0]
|
||||||
req = request.json
|
req = request.json
|
||||||
if not req.get("content"):
|
if not str(req.get("content", "")).strip():
|
||||||
return get_error_data_result(message="`content` is required")
|
return get_error_data_result(message="`content` is required")
|
||||||
if "important_keywords" in req:
|
if "important_keywords" in req:
|
||||||
if not isinstance(req["important_keywords"], list):
|
if not isinstance(req["important_keywords"], list):
|
||||||
@ -1000,7 +1069,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
|||||||
d["important_tks"] = rag_tokenizer.tokenize(
|
d["important_tks"] = rag_tokenizer.tokenize(
|
||||||
" ".join(req.get("important_keywords", []))
|
" ".join(req.get("important_keywords", []))
|
||||||
)
|
)
|
||||||
d["question_kwd"] = req.get("questions", [])
|
d["question_kwd"] = [str(q).strip() for q in req.get("questions", []) if str(q).strip()]
|
||||||
d["question_tks"] = rag_tokenizer.tokenize(
|
d["question_tks"] = rag_tokenizer.tokenize(
|
||||||
"\n".join(req.get("questions", []))
|
"\n".join(req.get("questions", []))
|
||||||
)
|
)
|
||||||
@ -1089,15 +1158,23 @@ def rm_chunk(tenant_id, dataset_id, document_id):
|
|||||||
"""
|
"""
|
||||||
if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
|
if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
|
||||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
|
||||||
|
docs = DocumentService.get_by_ids([document_id])
|
||||||
|
if not docs:
|
||||||
|
raise LookupError(f"Can't find the document with ID {document_id}!")
|
||||||
req = request.json
|
req = request.json
|
||||||
condition = {"doc_id": document_id}
|
condition = {"doc_id": document_id}
|
||||||
if "chunk_ids" in req:
|
if "chunk_ids" in req:
|
||||||
condition["id"] = req["chunk_ids"]
|
unique_chunk_ids, duplicate_messages = check_duplicate_ids(req["chunk_ids"], "chunk")
|
||||||
|
condition["id"] = unique_chunk_ids
|
||||||
chunk_number = settings.docStoreConn.delete(condition, search.index_name(tenant_id), dataset_id)
|
chunk_number = settings.docStoreConn.delete(condition, search.index_name(tenant_id), dataset_id)
|
||||||
if chunk_number != 0:
|
if chunk_number != 0:
|
||||||
DocumentService.decrement_chunk_num(document_id, dataset_id, 1, chunk_number, 0)
|
DocumentService.decrement_chunk_num(document_id, dataset_id, 1, chunk_number, 0)
|
||||||
if "chunk_ids" in req and chunk_number != len(req["chunk_ids"]):
|
if "chunk_ids" in req and chunk_number != len(unique_chunk_ids):
|
||||||
return get_error_data_result(message=f"rm_chunk deleted chunks {chunk_number}, expect {len(req['chunk_ids'])}")
|
if len(unique_chunk_ids) == 0:
|
||||||
|
return get_result(message=f"deleted {chunk_number} chunks")
|
||||||
|
return get_error_data_result(message=f"rm_chunk deleted chunks {chunk_number}, expect {len(unique_chunk_ids)}")
|
||||||
|
if duplicate_messages:
|
||||||
|
return get_result(message=f"Partially deleted {chunk_number} chunks with {len(duplicate_messages)} errors", data={"success_count": chunk_number, "errors": duplicate_messages},)
|
||||||
return get_result(message=f"deleted {chunk_number} chunks")
|
return get_result(message=f"deleted {chunk_number} chunks")
|
||||||
|
|
||||||
|
|
||||||
@ -1185,7 +1262,7 @@ def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
|
|||||||
if "questions" in req:
|
if "questions" in req:
|
||||||
if not isinstance(req["questions"], list):
|
if not isinstance(req["questions"], list):
|
||||||
return get_error_data_result("`questions` should be a list")
|
return get_error_data_result("`questions` should be a list")
|
||||||
d["question_kwd"] = req.get("questions")
|
d["question_kwd"] = [str(q).strip() for q in req.get("questions", []) if str(q).strip()]
|
||||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["questions"]))
|
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["questions"]))
|
||||||
if "available" in req:
|
if "available" in req:
|
||||||
d["available_int"] = int(req["available"])
|
d["available_int"] = int(req["available"])
|
||||||
@ -1301,7 +1378,7 @@ def retrieval_test(tenant_id):
|
|||||||
if not KnowledgebaseService.accessible(kb_id=id, user_id=tenant_id):
|
if not KnowledgebaseService.accessible(kb_id=id, user_id=tenant_id):
|
||||||
return get_error_data_result(f"You don't own the dataset {id}.")
|
return get_error_data_result(f"You don't own the dataset {id}.")
|
||||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
embd_nms = list(set([TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs])) # remove vendor suffix for comparison
|
||||||
if len(embd_nms) != 1:
|
if len(embd_nms) != 1:
|
||||||
return get_result(
|
return get_result(
|
||||||
message='Datasets use different embedding models."',
|
message='Datasets use different embedding models."',
|
||||||
|
|||||||
@ -13,30 +13,30 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import re
|
|
||||||
import json
|
import json
|
||||||
from api.db import LLMType
|
import re
|
||||||
from flask import request, Response
|
import time
|
||||||
|
|
||||||
|
import tiktoken
|
||||||
|
from flask import Response, jsonify, request
|
||||||
from api.db.services.conversation_service import ConversationService, iframe_completion
|
from api.db.services.conversation_service import ConversationService, iframe_completion
|
||||||
from api.db.services.conversation_service import completion as rag_completion
|
from api.db.services.conversation_service import completion as rag_completion
|
||||||
from api.db.services.canvas_service import completion as agent_completion
|
from api.db.services.canvas_service import completion as agent_completion, completionOpenAI
|
||||||
from api.db.services.dialog_service import ask
|
|
||||||
from agent.canvas import Canvas
|
from agent.canvas import Canvas
|
||||||
from api.db import StatusEnum
|
from api.db import LLMType, StatusEnum
|
||||||
from api.db.db_models import APIToken
|
from api.db.db_models import APIToken
|
||||||
from api.db.services.api_service import API4ConversationService
|
from api.db.services.api_service import API4ConversationService
|
||||||
from api.db.services.canvas_service import UserCanvasService
|
from api.db.services.canvas_service import UserCanvasService
|
||||||
from api.db.services.dialog_service import DialogService
|
from api.db.services.dialog_service import DialogService, ask, chat
|
||||||
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
from api.utils.api_utils import get_error_data_result
|
from api.utils.api_utils import get_result, token_required, get_data_openai, get_error_data_result, validate_request, check_duplicate_ids
|
||||||
from api.utils.api_utils import get_result, token_required
|
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST']) # noqa: F821
|
@manager.route("/chats/<chat_id>/sessions", methods=["POST"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def create(tenant_id, chat_id):
|
def create(tenant_id, chat_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -49,7 +49,7 @@ def create(tenant_id, chat_id):
|
|||||||
"dialog_id": req["dialog_id"],
|
"dialog_id": req["dialog_id"],
|
||||||
"name": req.get("name", "New session"),
|
"name": req.get("name", "New session"),
|
||||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}],
|
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}],
|
||||||
"user_id": req.get("user_id", "")
|
"user_id": req.get("user_id", ""),
|
||||||
}
|
}
|
||||||
if not conv.get("name"):
|
if not conv.get("name"):
|
||||||
return get_error_data_result(message="`name` can not be empty.")
|
return get_error_data_result(message="`name` can not be empty.")
|
||||||
@ -58,23 +58,25 @@ def create(tenant_id, chat_id):
|
|||||||
if not e:
|
if not e:
|
||||||
return get_error_data_result(message="Fail to create a session!")
|
return get_error_data_result(message="Fail to create a session!")
|
||||||
conv = conv.to_dict()
|
conv = conv.to_dict()
|
||||||
conv['messages'] = conv.pop("message")
|
conv["messages"] = conv.pop("message")
|
||||||
conv["chat_id"] = conv.pop("dialog_id")
|
conv["chat_id"] = conv.pop("dialog_id")
|
||||||
del conv["reference"]
|
del conv["reference"]
|
||||||
return get_result(data=conv)
|
return get_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/agents/<agent_id>/sessions', methods=['POST']) # noqa: F821
|
@manager.route("/agents/<agent_id>/sessions", methods=["POST"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def create_agent_session(tenant_id, agent_id):
|
def create_agent_session(tenant_id, agent_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
|
if not request.is_json:
|
||||||
|
req = request.form
|
||||||
|
files = request.files
|
||||||
|
user_id = request.args.get("user_id", "")
|
||||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||||
if not e:
|
if not e:
|
||||||
return get_error_data_result("Agent not found.")
|
return get_error_data_result("Agent not found.")
|
||||||
|
|
||||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||||
return get_error_data_result("You cannot access the agent.")
|
return get_error_data_result("You cannot access the agent.")
|
||||||
|
|
||||||
if not isinstance(cvs.dsl, str):
|
if not isinstance(cvs.dsl, str):
|
||||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||||
|
|
||||||
@ -84,33 +86,45 @@ def create_agent_session(tenant_id, agent_id):
|
|||||||
if query:
|
if query:
|
||||||
for ele in query:
|
for ele in query:
|
||||||
if not ele["optional"]:
|
if not ele["optional"]:
|
||||||
if not req.get(ele["key"]):
|
if ele["type"] == "file":
|
||||||
return get_error_data_result(f"`{ele['key']}` is required")
|
if files is None or not files.get(ele["key"]):
|
||||||
ele["value"] = req[ele["key"]]
|
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||||
if ele["optional"]:
|
upload_file = files.get(ele["key"])
|
||||||
if req.get(ele["key"]):
|
file_content = FileService.parse_docs([upload_file], user_id)
|
||||||
ele["value"] = req[ele['key']]
|
file_name = upload_file.filename
|
||||||
|
ele["value"] = file_name + "\n" + file_content
|
||||||
else:
|
else:
|
||||||
if "value" in ele:
|
if req is None or not req.get(ele["key"]):
|
||||||
ele.pop("value")
|
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||||
else:
|
ele["value"] = req[ele["key"]]
|
||||||
for ans in canvas.run(stream=False):
|
else:
|
||||||
pass
|
if ele["type"] == "file":
|
||||||
|
if files is not None and files.get(ele["key"]):
|
||||||
|
upload_file = files.get(ele["key"])
|
||||||
|
file_content = FileService.parse_docs([upload_file], user_id)
|
||||||
|
file_name = upload_file.filename
|
||||||
|
ele["value"] = file_name + "\n" + file_content
|
||||||
|
else:
|
||||||
|
if "value" in ele:
|
||||||
|
ele.pop("value")
|
||||||
|
else:
|
||||||
|
if req is not None and req.get(ele["key"]):
|
||||||
|
ele["value"] = req[ele["key"]]
|
||||||
|
else:
|
||||||
|
if "value" in ele:
|
||||||
|
ele.pop("value")
|
||||||
|
|
||||||
|
for ans in canvas.run(stream=False):
|
||||||
|
pass
|
||||||
|
|
||||||
cvs.dsl = json.loads(str(canvas))
|
cvs.dsl = json.loads(str(canvas))
|
||||||
conv = {
|
conv = {"id": get_uuid(), "dialog_id": cvs.id, "user_id": user_id, "message": [{"role": "assistant", "content": canvas.get_prologue()}], "source": "agent", "dsl": cvs.dsl}
|
||||||
"id": get_uuid(),
|
|
||||||
"dialog_id": cvs.id,
|
|
||||||
"user_id": req.get("user_id", "") if isinstance(req, dict) else "",
|
|
||||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
|
||||||
"source": "agent",
|
|
||||||
"dsl": cvs.dsl
|
|
||||||
}
|
|
||||||
API4ConversationService.save(**conv)
|
API4ConversationService.save(**conv)
|
||||||
conv["agent_id"] = conv.pop("dialog_id")
|
conv["agent_id"] = conv.pop("dialog_id")
|
||||||
return get_result(data=conv)
|
return get_result(data=conv)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT']) # noqa: F821
|
@manager.route("/chats/<chat_id>/sessions/<session_id>", methods=["PUT"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def update(tenant_id, chat_id, session_id):
|
def update(tenant_id, chat_id, session_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -132,12 +146,14 @@ def update(tenant_id, chat_id, session_id):
|
|||||||
return get_result()
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chats/<chat_id>/completions', methods=['POST']) # noqa: F821
|
@manager.route("/chats/<chat_id>/completions", methods=["POST"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def chat_completion(tenant_id, chat_id):
|
def chat_completion(tenant_id, chat_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
if not req or not req.get("session_id"):
|
if not req:
|
||||||
req = {"question": ""}
|
req = {"question": ""}
|
||||||
|
if not req.get("session_id"):
|
||||||
|
req["question"] = ""
|
||||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||||
if req.get("session_id"):
|
if req.get("session_id"):
|
||||||
@ -159,7 +175,227 @@ def chat_completion(tenant_id, chat_id):
|
|||||||
return get_result(data=answer)
|
return get_result(data=answer)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
@manager.route("/chats_openai/<chat_id>/chat/completions", methods=["POST"]) # noqa: F821
|
||||||
|
@validate_request("model", "messages") # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def chat_completion_openai_like(tenant_id, chat_id):
|
||||||
|
"""
|
||||||
|
OpenAI-like chat completion API that simulates the behavior of OpenAI's completions endpoint.
|
||||||
|
|
||||||
|
This function allows users to interact with a model and receive responses based on a series of historical messages.
|
||||||
|
If `stream` is set to True (by default), the response will be streamed in chunks, mimicking the OpenAI-style API.
|
||||||
|
Set `stream` to False explicitly, the response will be returned in a single complete answer.
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
curl -X POST https://ragflow_address.com/api/v1/chats_openai/<chat_id>/chat/completions \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "Authorization: Bearer $RAGFLOW_API_KEY" \
|
||||||
|
-d '{
|
||||||
|
"model": "model",
|
||||||
|
"messages": [{"role": "user", "content": "Say this is a test!"}],
|
||||||
|
"stream": true
|
||||||
|
}'
|
||||||
|
|
||||||
|
Alternatively, you can use Python's `OpenAI` client:
|
||||||
|
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
model = "model"
|
||||||
|
client = OpenAI(api_key="ragflow-api-key", base_url=f"http://ragflow_address/api/v1/chats_openai/<chat_id>")
|
||||||
|
|
||||||
|
completion = client.chat.completions.create(
|
||||||
|
model=model,
|
||||||
|
messages=[
|
||||||
|
{"role": "system", "content": "You are a helpful assistant."},
|
||||||
|
{"role": "user", "content": "Who are you?"},
|
||||||
|
{"role": "assistant", "content": "I am an AI assistant named..."},
|
||||||
|
{"role": "user", "content": "Can you tell me how to install neovim"},
|
||||||
|
],
|
||||||
|
stream=True
|
||||||
|
)
|
||||||
|
|
||||||
|
stream = True
|
||||||
|
if stream:
|
||||||
|
for chunk in completion:
|
||||||
|
print(chunk)
|
||||||
|
else:
|
||||||
|
print(completion.choices[0].message.content)
|
||||||
|
"""
|
||||||
|
req = request.json
|
||||||
|
|
||||||
|
messages = req.get("messages", [])
|
||||||
|
# To prevent empty [] input
|
||||||
|
if len(messages) < 1:
|
||||||
|
return get_error_data_result("You have to provide messages.")
|
||||||
|
if messages[-1]["role"] != "user":
|
||||||
|
return get_error_data_result("The last content of this conversation is not from user.")
|
||||||
|
|
||||||
|
prompt = messages[-1]["content"]
|
||||||
|
# Treat context tokens as reasoning tokens
|
||||||
|
context_token_used = sum(len(message["content"]) for message in messages)
|
||||||
|
|
||||||
|
dia = DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value)
|
||||||
|
if not dia:
|
||||||
|
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||||
|
dia = dia[0]
|
||||||
|
|
||||||
|
# Filter system and non-sense assistant messages
|
||||||
|
msg = []
|
||||||
|
for m in messages:
|
||||||
|
if m["role"] == "system":
|
||||||
|
continue
|
||||||
|
if m["role"] == "assistant" and not msg:
|
||||||
|
continue
|
||||||
|
msg.append(m)
|
||||||
|
|
||||||
|
# tools = get_tools()
|
||||||
|
# toolcall_session = SimpleFunctionCallServer()
|
||||||
|
tools = None
|
||||||
|
toolcall_session = None
|
||||||
|
|
||||||
|
if req.get("stream", True):
|
||||||
|
# The value for the usage field on all chunks except for the last one will be null.
|
||||||
|
# The usage field on the last chunk contains token usage statistics for the entire request.
|
||||||
|
# The choices field on the last chunk will always be an empty array [].
|
||||||
|
def streamed_response_generator(chat_id, dia, msg):
|
||||||
|
token_used = 0
|
||||||
|
answer_cache = ""
|
||||||
|
reasoning_cache = ""
|
||||||
|
response = {
|
||||||
|
"id": f"chatcmpl-{chat_id}",
|
||||||
|
"choices": [{"delta": {"content": "", "role": "assistant", "function_call": None, "tool_calls": None, "reasoning_content": ""}, "finish_reason": None, "index": 0, "logprobs": None}],
|
||||||
|
"created": int(time.time()),
|
||||||
|
"model": "model",
|
||||||
|
"object": "chat.completion.chunk",
|
||||||
|
"system_fingerprint": "",
|
||||||
|
"usage": None,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
for ans in chat(dia, msg, True, toolcall_session=toolcall_session, tools=tools):
|
||||||
|
answer = ans["answer"]
|
||||||
|
|
||||||
|
reasoning_match = re.search(r"<think>(.*?)</think>", answer, flags=re.DOTALL)
|
||||||
|
if reasoning_match:
|
||||||
|
reasoning_part = reasoning_match.group(1)
|
||||||
|
content_part = answer[reasoning_match.end() :]
|
||||||
|
else:
|
||||||
|
reasoning_part = ""
|
||||||
|
content_part = answer
|
||||||
|
|
||||||
|
reasoning_incremental = ""
|
||||||
|
if reasoning_part:
|
||||||
|
if reasoning_part.startswith(reasoning_cache):
|
||||||
|
reasoning_incremental = reasoning_part.replace(reasoning_cache, "", 1)
|
||||||
|
else:
|
||||||
|
reasoning_incremental = reasoning_part
|
||||||
|
reasoning_cache = reasoning_part
|
||||||
|
|
||||||
|
content_incremental = ""
|
||||||
|
if content_part:
|
||||||
|
if content_part.startswith(answer_cache):
|
||||||
|
content_incremental = content_part.replace(answer_cache, "", 1)
|
||||||
|
else:
|
||||||
|
content_incremental = content_part
|
||||||
|
answer_cache = content_part
|
||||||
|
|
||||||
|
token_used += len(reasoning_incremental) + len(content_incremental)
|
||||||
|
|
||||||
|
if not any([reasoning_incremental, content_incremental]):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if reasoning_incremental:
|
||||||
|
response["choices"][0]["delta"]["reasoning_content"] = reasoning_incremental
|
||||||
|
else:
|
||||||
|
response["choices"][0]["delta"]["reasoning_content"] = None
|
||||||
|
|
||||||
|
if content_incremental:
|
||||||
|
response["choices"][0]["delta"]["content"] = content_incremental
|
||||||
|
else:
|
||||||
|
response["choices"][0]["delta"]["content"] = None
|
||||||
|
|
||||||
|
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||||
|
except Exception as e:
|
||||||
|
response["choices"][0]["delta"]["content"] = "**ERROR**: " + str(e)
|
||||||
|
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||||
|
|
||||||
|
# The last chunk
|
||||||
|
response["choices"][0]["delta"]["content"] = None
|
||||||
|
response["choices"][0]["delta"]["reasoning_content"] = None
|
||||||
|
response["choices"][0]["finish_reason"] = "stop"
|
||||||
|
response["usage"] = {"prompt_tokens": len(prompt), "completion_tokens": token_used, "total_tokens": len(prompt) + token_used}
|
||||||
|
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||||
|
yield "data:[DONE]\n\n"
|
||||||
|
|
||||||
|
resp = Response(streamed_response_generator(chat_id, dia, msg), mimetype="text/event-stream")
|
||||||
|
resp.headers.add_header("Cache-control", "no-cache")
|
||||||
|
resp.headers.add_header("Connection", "keep-alive")
|
||||||
|
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||||
|
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
|
return resp
|
||||||
|
else:
|
||||||
|
answer = None
|
||||||
|
for ans in chat(dia, msg, False, toolcall_session=toolcall_session, tools=tools):
|
||||||
|
# focus answer content only
|
||||||
|
answer = ans
|
||||||
|
break
|
||||||
|
content = answer["answer"]
|
||||||
|
|
||||||
|
response = {
|
||||||
|
"id": f"chatcmpl-{chat_id}",
|
||||||
|
"object": "chat.completion",
|
||||||
|
"created": int(time.time()),
|
||||||
|
"model": req.get("model", ""),
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": len(prompt),
|
||||||
|
"completion_tokens": len(content),
|
||||||
|
"total_tokens": len(prompt) + len(content),
|
||||||
|
"completion_tokens_details": {
|
||||||
|
"reasoning_tokens": context_token_used,
|
||||||
|
"accepted_prediction_tokens": len(content),
|
||||||
|
"rejected_prediction_tokens": 0, # 0 for simplicity
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"choices": [{"message": {"role": "assistant", "content": content}, "logprobs": None, "finish_reason": "stop", "index": 0}],
|
||||||
|
}
|
||||||
|
return jsonify(response)
|
||||||
|
|
||||||
|
@manager.route('/agents_openai/<agent_id>/chat/completions', methods=['POST']) # noqa: F821
|
||||||
|
@validate_request("model", "messages") # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def agents_completion_openai_compatibility (tenant_id, agent_id):
|
||||||
|
req = request.json
|
||||||
|
tiktokenenc = tiktoken.get_encoding("cl100k_base")
|
||||||
|
messages = req.get("messages", [])
|
||||||
|
if not messages:
|
||||||
|
return get_error_data_result("You must provide at least one message.")
|
||||||
|
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||||
|
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||||
|
|
||||||
|
filtered_messages = [m for m in messages if m["role"] in ["user", "assistant"]]
|
||||||
|
prompt_tokens = sum(len(tiktokenenc.encode(m["content"])) for m in filtered_messages)
|
||||||
|
if not filtered_messages:
|
||||||
|
return jsonify(get_data_openai(
|
||||||
|
id=agent_id,
|
||||||
|
content="No valid messages found (user or assistant).",
|
||||||
|
finish_reason="stop",
|
||||||
|
model=req.get("model", ""),
|
||||||
|
completion_tokens=len(tiktokenenc.encode("No valid messages found (user or assistant).")),
|
||||||
|
prompt_tokens=prompt_tokens,
|
||||||
|
))
|
||||||
|
|
||||||
|
# Get the last user message as the question
|
||||||
|
question = next((m["content"] for m in reversed(messages) if m["role"] == "user"), "")
|
||||||
|
|
||||||
|
if req.get("stream", True):
|
||||||
|
return Response(completionOpenAI(tenant_id, agent_id, question, session_id=req.get("id", ""), stream=True), mimetype="text/event-stream")
|
||||||
|
else:
|
||||||
|
# For non-streaming, just return the response directly
|
||||||
|
response = next(completionOpenAI(tenant_id, agent_id, question, session_id=req.get("id", ""), stream=False))
|
||||||
|
return jsonify(response)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/agents/<agent_id>/completions", methods=["POST"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def agent_completions(tenant_id, agent_id):
|
def agent_completions(tenant_id, agent_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -170,12 +406,20 @@ def agent_completions(tenant_id, agent_id):
|
|||||||
dsl = cvs[0].dsl
|
dsl = cvs[0].dsl
|
||||||
if not isinstance(dsl, str):
|
if not isinstance(dsl, str):
|
||||||
dsl = json.dumps(dsl)
|
dsl = json.dumps(dsl)
|
||||||
#canvas = Canvas(dsl, tenant_id)
|
|
||||||
#if canvas.get_preset_param():
|
|
||||||
# req["question"] = ""
|
|
||||||
conv = API4ConversationService.query(id=req["session_id"], dialog_id=agent_id)
|
conv = API4ConversationService.query(id=req["session_id"], dialog_id=agent_id)
|
||||||
if not conv:
|
if not conv:
|
||||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||||
|
# If an update to UserCanvas is detected, update the API4Conversation.dsl
|
||||||
|
sync_dsl = req.get("sync_dsl", False)
|
||||||
|
if sync_dsl is True and cvs[0].update_time > conv[0].update_time:
|
||||||
|
current_dsl = conv[0].dsl
|
||||||
|
new_dsl = json.loads(dsl)
|
||||||
|
state_fields = ["history", "messages", "path", "reference"]
|
||||||
|
states = {field: current_dsl.get(field, []) for field in state_fields}
|
||||||
|
current_dsl.update(new_dsl)
|
||||||
|
current_dsl.update(states)
|
||||||
|
API4ConversationService.update_by_id(req["session_id"], {"dsl": current_dsl})
|
||||||
else:
|
else:
|
||||||
req["question"] = ""
|
req["question"] = ""
|
||||||
if req.get("stream", True):
|
if req.get("stream", True):
|
||||||
@ -192,7 +436,7 @@ def agent_completions(tenant_id, agent_id):
|
|||||||
return get_error_data_result(str(e))
|
return get_error_data_result(str(e))
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET']) # noqa: F821
|
@manager.route("/chats/<chat_id>/sessions", methods=["GET"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def list_session(tenant_id, chat_id):
|
def list_session(tenant_id, chat_id):
|
||||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||||
@ -211,7 +455,7 @@ def list_session(tenant_id, chat_id):
|
|||||||
if not convs:
|
if not convs:
|
||||||
return get_result(data=[])
|
return get_result(data=[])
|
||||||
for conv in convs:
|
for conv in convs:
|
||||||
conv['messages'] = conv.pop("message")
|
conv["messages"] = conv.pop("message")
|
||||||
infos = conv["messages"]
|
infos = conv["messages"]
|
||||||
for info in infos:
|
for info in infos:
|
||||||
if "prompt" in info:
|
if "prompt" in info:
|
||||||
@ -245,7 +489,7 @@ def list_session(tenant_id, chat_id):
|
|||||||
return get_result(data=convs)
|
return get_result(data=convs)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/agents/<agent_id>/sessions', methods=['GET']) # noqa: F821
|
@manager.route("/agents/<agent_id>/sessions", methods=["GET"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def list_agent_session(tenant_id, agent_id):
|
def list_agent_session(tenant_id, agent_id):
|
||||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||||
@ -259,11 +503,13 @@ def list_agent_session(tenant_id, agent_id):
|
|||||||
desc = False
|
desc = False
|
||||||
else:
|
else:
|
||||||
desc = True
|
desc = True
|
||||||
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id, user_id)
|
# dsl defaults to True in all cases except for False and false
|
||||||
|
include_dsl = request.args.get("dsl") != "False" and request.args.get("dsl") != "false"
|
||||||
|
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id, user_id, include_dsl)
|
||||||
if not convs:
|
if not convs:
|
||||||
return get_result(data=[])
|
return get_result(data=[])
|
||||||
for conv in convs:
|
for conv in convs:
|
||||||
conv['messages'] = conv.pop("message")
|
conv["messages"] = conv.pop("message")
|
||||||
infos = conv["messages"]
|
infos = conv["messages"]
|
||||||
for info in infos:
|
for info in infos:
|
||||||
if "prompt" in info:
|
if "prompt" in info:
|
||||||
@ -296,11 +542,14 @@ def list_agent_session(tenant_id, agent_id):
|
|||||||
return get_result(data=convs)
|
return get_result(data=convs)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"]) # noqa: F821
|
@manager.route("/chats/<chat_id>/sessions", methods=["DELETE"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def delete(tenant_id, chat_id):
|
def delete(tenant_id, chat_id):
|
||||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||||
return get_error_data_result(message="You don't own the chat")
|
return get_error_data_result(message="You don't own the chat")
|
||||||
|
|
||||||
|
errors = []
|
||||||
|
success_count = 0
|
||||||
req = request.json
|
req = request.json
|
||||||
convs = ConversationService.query(dialog_id=chat_id)
|
convs = ConversationService.query(dialog_id=chat_id)
|
||||||
if not req:
|
if not req:
|
||||||
@ -314,15 +563,98 @@ def delete(tenant_id, chat_id):
|
|||||||
conv_list.append(conv.id)
|
conv_list.append(conv.id)
|
||||||
else:
|
else:
|
||||||
conv_list = ids
|
conv_list = ids
|
||||||
|
|
||||||
|
unique_conv_ids, duplicate_messages = check_duplicate_ids(conv_list, "session")
|
||||||
|
conv_list = unique_conv_ids
|
||||||
|
|
||||||
for id in conv_list:
|
for id in conv_list:
|
||||||
conv = ConversationService.query(id=id, dialog_id=chat_id)
|
conv = ConversationService.query(id=id, dialog_id=chat_id)
|
||||||
if not conv:
|
if not conv:
|
||||||
return get_error_data_result(message="The chat doesn't own the session")
|
errors.append(f"The chat doesn't own the session {id}")
|
||||||
|
continue
|
||||||
ConversationService.delete_by_id(id)
|
ConversationService.delete_by_id(id)
|
||||||
|
success_count += 1
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(
|
||||||
|
data={"success_count": success_count, "errors": errors},
|
||||||
|
message=f"Partially deleted {success_count} sessions with {len(errors)} errors"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message="; ".join(errors))
|
||||||
|
|
||||||
|
if duplicate_messages:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(
|
||||||
|
message=f"Partially deleted {success_count} sessions with {len(duplicate_messages)} errors",
|
||||||
|
data={"success_count": success_count, "errors": duplicate_messages}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message=";".join(duplicate_messages))
|
||||||
|
|
||||||
return get_result()
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/sessions/ask', methods=['POST']) # noqa: F821
|
@manager.route("/agents/<agent_id>/sessions", methods=["DELETE"]) # noqa: F821
|
||||||
|
@token_required
|
||||||
|
def delete_agent_session(tenant_id, agent_id):
|
||||||
|
errors = []
|
||||||
|
success_count = 0
|
||||||
|
req = request.json
|
||||||
|
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||||
|
if not cvs:
|
||||||
|
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||||
|
|
||||||
|
convs = API4ConversationService.query(dialog_id=agent_id)
|
||||||
|
if not convs:
|
||||||
|
return get_error_data_result(f"Agent {agent_id} has no sessions")
|
||||||
|
|
||||||
|
if not req:
|
||||||
|
ids = None
|
||||||
|
else:
|
||||||
|
ids = req.get("ids")
|
||||||
|
|
||||||
|
if not ids:
|
||||||
|
conv_list = []
|
||||||
|
for conv in convs:
|
||||||
|
conv_list.append(conv.id)
|
||||||
|
else:
|
||||||
|
conv_list = ids
|
||||||
|
|
||||||
|
unique_conv_ids, duplicate_messages = check_duplicate_ids(conv_list, "session")
|
||||||
|
conv_list = unique_conv_ids
|
||||||
|
|
||||||
|
for session_id in conv_list:
|
||||||
|
conv = API4ConversationService.query(id=session_id, dialog_id=agent_id)
|
||||||
|
if not conv:
|
||||||
|
errors.append(f"The agent doesn't own the session {session_id}")
|
||||||
|
continue
|
||||||
|
API4ConversationService.delete_by_id(session_id)
|
||||||
|
success_count += 1
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(
|
||||||
|
data={"success_count": success_count, "errors": errors},
|
||||||
|
message=f"Partially deleted {success_count} sessions with {len(errors)} errors"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message="; ".join(errors))
|
||||||
|
|
||||||
|
if duplicate_messages:
|
||||||
|
if success_count > 0:
|
||||||
|
return get_result(
|
||||||
|
message=f"Partially deleted {success_count} sessions with {len(duplicate_messages)} errors",
|
||||||
|
data={"success_count": success_count, "errors": duplicate_messages}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return get_error_data_result(message=";".join(duplicate_messages))
|
||||||
|
|
||||||
|
return get_result()
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route("/sessions/ask", methods=["POST"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def ask_about(tenant_id):
|
def ask_about(tenant_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -348,9 +680,7 @@ def ask_about(tenant_id):
|
|||||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
|
||||||
ensure_ascii=False) + "\n\n"
|
|
||||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||||
|
|
||||||
resp = Response(stream(), mimetype="text/event-stream")
|
resp = Response(stream(), mimetype="text/event-stream")
|
||||||
@ -361,7 +691,7 @@ def ask_about(tenant_id):
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/sessions/related_questions', methods=['POST']) # noqa: F821
|
@manager.route("/sessions/related_questions", methods=["POST"]) # noqa: F821
|
||||||
@token_required
|
@token_required
|
||||||
def related_questions(tenant_id):
|
def related_questions(tenant_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
@ -393,18 +723,27 @@ Reason:
|
|||||||
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
ans = chat_mdl.chat(
|
||||||
|
prompt,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": f"""
|
||||||
Keywords: {question}
|
Keywords: {question}
|
||||||
Related search terms:
|
Related search terms:
|
||||||
"""}], {"temperature": 0.9})
|
""",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
{"temperature": 0.9},
|
||||||
|
)
|
||||||
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/chatbots/<dialog_id>/completions', methods=['POST']) # noqa: F821
|
@manager.route("/chatbots/<dialog_id>/completions", methods=["POST"]) # noqa: F821
|
||||||
def chatbot_completions(dialog_id):
|
def chatbot_completions(dialog_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
|
|
||||||
token = request.headers.get('Authorization').split()
|
token = request.headers.get("Authorization").split()
|
||||||
if len(token) != 2:
|
if len(token) != 2:
|
||||||
return get_error_data_result(message='Authorization is not valid!"')
|
return get_error_data_result(message='Authorization is not valid!"')
|
||||||
token = token[1]
|
token = token[1]
|
||||||
@ -427,11 +766,11 @@ def chatbot_completions(dialog_id):
|
|||||||
return get_result(data=answer)
|
return get_result(data=answer)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/agentbots/<agent_id>/completions', methods=['POST']) # noqa: F821
|
@manager.route("/agentbots/<agent_id>/completions", methods=["POST"]) # noqa: F821
|
||||||
def agent_bot_completions(agent_id):
|
def agent_bot_completions(agent_id):
|
||||||
req = request.json
|
req = request.json
|
||||||
|
|
||||||
token = request.headers.get('Authorization').split()
|
token = request.headers.get("Authorization").split()
|
||||||
if len(token) != 2:
|
if len(token) != 2:
|
||||||
return get_error_data_result(message='Authorization is not valid!"')
|
return get_error_data_result(message='Authorization is not valid!"')
|
||||||
token = token[1]
|
token = token[1]
|
||||||
|
|||||||
@ -37,7 +37,6 @@ from timeit import default_timer as timer
|
|||||||
|
|
||||||
from rag.utils.redis_conn import REDIS_CONN
|
from rag.utils.redis_conn import REDIS_CONN
|
||||||
|
|
||||||
|
|
||||||
@manager.route("/version", methods=["GET"]) # noqa: F821
|
@manager.route("/version", methods=["GET"]) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def version():
|
def version():
|
||||||
@ -201,7 +200,7 @@ def new_token():
|
|||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(message="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
tenant_id = tenants[0].tenant_id
|
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||||
obj = {
|
obj = {
|
||||||
"tenant_id": tenant_id,
|
"tenant_id": tenant_id,
|
||||||
"token": generate_confirmation_token(tenant_id),
|
"token": generate_confirmation_token(tenant_id),
|
||||||
@ -256,7 +255,7 @@ def token_list():
|
|||||||
if not tenants:
|
if not tenants:
|
||||||
return get_data_error_result(message="Tenant not found!")
|
return get_data_error_result(message="Tenant not found!")
|
||||||
|
|
||||||
tenant_id = tenants[0].tenant_id
|
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||||
objs = APITokenService.query(tenant_id=tenant_id)
|
objs = APITokenService.query(tenant_id=tenant_id)
|
||||||
objs = [o.to_dict() for o in objs]
|
objs = [o.to_dict() for o in objs]
|
||||||
for o in objs:
|
for o in objs:
|
||||||
@ -298,3 +297,25 @@ def rm(token):
|
|||||||
[APIToken.tenant_id == current_user.id, APIToken.token == token]
|
[APIToken.tenant_id == current_user.id, APIToken.token == token]
|
||||||
)
|
)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/config', methods=['GET']) # noqa: F821
|
||||||
|
def get_config():
|
||||||
|
"""
|
||||||
|
Get system configuration.
|
||||||
|
---
|
||||||
|
tags:
|
||||||
|
- System
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: Return system configuration
|
||||||
|
schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
registerEnable:
|
||||||
|
type: integer 0 means disabled, 1 means enabled
|
||||||
|
description: Whether user registration is enabled
|
||||||
|
"""
|
||||||
|
return get_json_result(data={
|
||||||
|
"registerEnabled": settings.REGISTER_ENABLED
|
||||||
|
})
|
||||||
|
|||||||
@ -562,11 +562,19 @@ def user_add():
|
|||||||
schema:
|
schema:
|
||||||
type: object
|
type: object
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if not settings.REGISTER_ENABLED:
|
||||||
|
return get_json_result(
|
||||||
|
data=False,
|
||||||
|
message="User registration is disabled!",
|
||||||
|
code=settings.RetCode.OPERATING_ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
req = request.json
|
req = request.json
|
||||||
email_address = req["email"]
|
email_address = req["email"]
|
||||||
|
|
||||||
# Validate the email address
|
# Validate the email address
|
||||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,5}$", email_address):
|
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", email_address):
|
||||||
return get_json_result(
|
return get_json_result(
|
||||||
data=False,
|
data=False,
|
||||||
message=f"Invalid email address: {email_address}!",
|
message=f"Invalid email address: {email_address}!",
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -103,16 +103,12 @@ def init_llm_factory():
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
factory_llm_infos = json.load(
|
factory_llm_infos = settings.FACTORY_LLM_INFOS
|
||||||
open(
|
for factory_llm_info in factory_llm_infos:
|
||||||
os.path.join(get_project_base_directory(), "conf", "llm_factories.json"),
|
info = deepcopy(factory_llm_info)
|
||||||
"r",
|
llm_infos = info.pop("llm")
|
||||||
)
|
|
||||||
)
|
|
||||||
for factory_llm_info in factory_llm_infos["factory_llm_infos"]:
|
|
||||||
llm_infos = factory_llm_info.pop("llm")
|
|
||||||
try:
|
try:
|
||||||
LLMFactoriesService.save(**factory_llm_info)
|
LLMFactoriesService.save(**info)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
LLMService.filter_delete([LLM.fid == factory_llm_info["name"]])
|
LLMService.filter_delete([LLM.fid == factory_llm_info["name"]])
|
||||||
@ -152,7 +148,7 @@ def init_llm_factory():
|
|||||||
pass
|
pass
|
||||||
break
|
break
|
||||||
for kb_id in KnowledgebaseService.get_all_ids():
|
for kb_id in KnowledgebaseService.get_all_ids():
|
||||||
KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
|
KnowledgebaseService.update_document_number_in_init(kb_id=kb_id, doc_num=DocumentService.get_kb_doc_count(kb_id))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -160,7 +156,7 @@ def add_graph_templates():
|
|||||||
dir = os.path.join(get_project_base_directory(), "agent", "templates")
|
dir = os.path.join(get_project_base_directory(), "agent", "templates")
|
||||||
for fnm in os.listdir(dir):
|
for fnm in os.listdir(dir):
|
||||||
try:
|
try:
|
||||||
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
cnvs = json.load(open(os.path.join(dir, fnm), "r",encoding="utf-8"))
|
||||||
try:
|
try:
|
||||||
CanvasTemplateService.save(**cnvs)
|
CanvasTemplateService.save(**cnvs)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|||||||
@ -43,8 +43,12 @@ class API4ConversationService(CommonService):
|
|||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_list(cls, dialog_id, tenant_id,
|
def get_list(cls, dialog_id, tenant_id,
|
||||||
page_number, items_per_page,
|
page_number, items_per_page,
|
||||||
orderby, desc, id, user_id=None):
|
orderby, desc, id, user_id=None, include_dsl=True):
|
||||||
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
if include_dsl:
|
||||||
|
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
||||||
|
else:
|
||||||
|
fields = [field for field in cls.model._meta.fields.values() if field.name != 'dsl']
|
||||||
|
sessions = cls.model.select(*fields).where(cls.model.dialog_id == dialog_id)
|
||||||
if id:
|
if id:
|
||||||
sessions = sessions.where(cls.model.id == id)
|
sessions = sessions.where(cls.model.id == id)
|
||||||
if user_id:
|
if user_id:
|
||||||
|
|||||||
@ -18,13 +18,15 @@ import time
|
|||||||
import traceback
|
import traceback
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
from agent.canvas import Canvas
|
from agent.canvas import Canvas
|
||||||
from api.db.db_models import DB, CanvasTemplate, UserCanvas, API4Conversation
|
from api.db import TenantPermission
|
||||||
|
from api.db.db_models import DB, CanvasTemplate, User, UserCanvas, API4Conversation
|
||||||
from api.db.services.api_service import API4ConversationService
|
from api.db.services.api_service import API4ConversationService
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
from api.db.services.conversation_service import structure_answer
|
from api.db.services.conversation_service import structure_answer
|
||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
|
from api.utils.api_utils import get_data_openai
|
||||||
|
import tiktoken
|
||||||
|
from peewee import fn
|
||||||
class CanvasTemplateService(CommonService):
|
class CanvasTemplateService(CommonService):
|
||||||
model = CanvasTemplate
|
model = CanvasTemplate
|
||||||
|
|
||||||
@ -50,7 +52,74 @@ class UserCanvasService(CommonService):
|
|||||||
agents = agents.paginate(page_number, items_per_page)
|
agents = agents.paginate(page_number, items_per_page)
|
||||||
|
|
||||||
return list(agents.dicts())
|
return list(agents.dicts())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_by_tenant_id(cls, pid):
|
||||||
|
try:
|
||||||
|
|
||||||
|
fields = [
|
||||||
|
cls.model.id,
|
||||||
|
cls.model.avatar,
|
||||||
|
cls.model.title,
|
||||||
|
cls.model.dsl,
|
||||||
|
cls.model.description,
|
||||||
|
cls.model.permission,
|
||||||
|
cls.model.update_time,
|
||||||
|
cls.model.user_id,
|
||||||
|
cls.model.create_time,
|
||||||
|
cls.model.create_date,
|
||||||
|
cls.model.update_date,
|
||||||
|
User.nickname,
|
||||||
|
User.avatar.alias('tenant_avatar'),
|
||||||
|
]
|
||||||
|
angents = cls.model.select(*fields) \
|
||||||
|
.join(User, on=(cls.model.user_id == User.id)) \
|
||||||
|
.where(cls.model.id == pid)
|
||||||
|
# obj = cls.model.query(id=pid)[0]
|
||||||
|
return True, angents.dicts()[0]
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
return False, None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
||||||
|
page_number, items_per_page,
|
||||||
|
orderby, desc, keywords,
|
||||||
|
):
|
||||||
|
fields = [
|
||||||
|
cls.model.id,
|
||||||
|
cls.model.avatar,
|
||||||
|
cls.model.title,
|
||||||
|
cls.model.dsl,
|
||||||
|
cls.model.description,
|
||||||
|
cls.model.permission,
|
||||||
|
User.nickname,
|
||||||
|
User.avatar.alias('tenant_avatar'),
|
||||||
|
cls.model.update_time
|
||||||
|
]
|
||||||
|
if keywords:
|
||||||
|
angents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||||
|
((cls.model.user_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||||
|
TenantPermission.TEAM.value)) | (
|
||||||
|
cls.model.user_id == user_id)),
|
||||||
|
(fn.LOWER(cls.model.title).contains(keywords.lower()))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
angents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||||
|
((cls.model.user_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||||
|
TenantPermission.TEAM.value)) | (
|
||||||
|
cls.model.user_id == user_id))
|
||||||
|
)
|
||||||
|
if desc:
|
||||||
|
angents = angents.order_by(cls.model.getter_by(orderby).desc())
|
||||||
|
else:
|
||||||
|
angents = angents.order_by(cls.model.getter_by(orderby).asc())
|
||||||
|
count = angents.count()
|
||||||
|
angents = angents.paginate(page_number, items_per_page)
|
||||||
|
return list(angents.dicts()), count
|
||||||
|
|
||||||
|
|
||||||
def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||||
@ -86,21 +155,7 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
|||||||
"dsl": cvs.dsl
|
"dsl": cvs.dsl
|
||||||
}
|
}
|
||||||
API4ConversationService.save(**conv)
|
API4ConversationService.save(**conv)
|
||||||
if query:
|
conv = API4Conversation(**conv)
|
||||||
yield "data:" + json.dumps({"code": 0,
|
|
||||||
"message": "",
|
|
||||||
"data": {
|
|
||||||
"session_id": session_id,
|
|
||||||
"answer": canvas.get_prologue(),
|
|
||||||
"reference": [],
|
|
||||||
"param": canvas.get_preset_param()
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ensure_ascii=False) + "\n\n"
|
|
||||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
conv = API4Conversation(**conv)
|
|
||||||
else:
|
else:
|
||||||
e, conv = API4ConversationService.get_by_id(session_id)
|
e, conv = API4ConversationService.get_by_id(session_id)
|
||||||
assert e, "Session not found!"
|
assert e, "Session not found!"
|
||||||
@ -130,7 +185,7 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
|||||||
continue
|
continue
|
||||||
for k in ans.keys():
|
for k in ans.keys():
|
||||||
final_ans[k] = ans[k]
|
final_ans[k] = ans[k]
|
||||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
ans = {"answer": ans["content"], "reference": ans.get("reference", []), "param": canvas.get_preset_param()}
|
||||||
ans = structure_answer(conv, ans, message_id, session_id)
|
ans = structure_answer(conv, ans, message_id, session_id)
|
||||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||||
ensure_ascii=False) + "\n\n"
|
ensure_ascii=False) + "\n\n"
|
||||||
@ -160,8 +215,211 @@ def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kw
|
|||||||
canvas.reference.append(final_ans["reference"])
|
canvas.reference.append(final_ans["reference"])
|
||||||
conv.dsl = json.loads(str(canvas))
|
conv.dsl = json.loads(str(canvas))
|
||||||
|
|
||||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", []) , "param": canvas.get_preset_param()}
|
||||||
result = structure_answer(conv, result, message_id, session_id)
|
result = structure_answer(conv, result, message_id, session_id)
|
||||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
yield result
|
yield result
|
||||||
break
|
break
|
||||||
|
def completionOpenAI(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||||
|
"""Main function for OpenAI-compatible completions, structured similarly to the completion function."""
|
||||||
|
tiktokenenc = tiktoken.get_encoding("cl100k_base")
|
||||||
|
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||||
|
|
||||||
|
if not e:
|
||||||
|
yield get_data_openai(
|
||||||
|
id=session_id,
|
||||||
|
model=agent_id,
|
||||||
|
content="**ERROR**: Agent not found."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if cvs.user_id != tenant_id:
|
||||||
|
yield get_data_openai(
|
||||||
|
id=session_id,
|
||||||
|
model=agent_id,
|
||||||
|
content="**ERROR**: You do not own the agent"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not isinstance(cvs.dsl, str):
|
||||||
|
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||||
|
|
||||||
|
canvas = Canvas(cvs.dsl, tenant_id)
|
||||||
|
canvas.reset()
|
||||||
|
message_id = str(uuid4())
|
||||||
|
|
||||||
|
# Handle new session creation
|
||||||
|
if not session_id:
|
||||||
|
query = canvas.get_preset_param()
|
||||||
|
if query:
|
||||||
|
for ele in query:
|
||||||
|
if not ele["optional"]:
|
||||||
|
if not kwargs.get(ele["key"]):
|
||||||
|
yield get_data_openai(
|
||||||
|
id=None,
|
||||||
|
model=agent_id,
|
||||||
|
content=f"`{ele['key']}` is required",
|
||||||
|
completion_tokens=len(tiktokenenc.encode(f"`{ele['key']}` is required")),
|
||||||
|
prompt_tokens=len(tiktokenenc.encode(question if question else ""))
|
||||||
|
)
|
||||||
|
return
|
||||||
|
ele["value"] = kwargs[ele["key"]]
|
||||||
|
if ele["optional"]:
|
||||||
|
if kwargs.get(ele["key"]):
|
||||||
|
ele["value"] = kwargs[ele['key']]
|
||||||
|
else:
|
||||||
|
if "value" in ele:
|
||||||
|
ele.pop("value")
|
||||||
|
|
||||||
|
cvs.dsl = json.loads(str(canvas))
|
||||||
|
session_id = get_uuid()
|
||||||
|
conv = {
|
||||||
|
"id": session_id,
|
||||||
|
"dialog_id": cvs.id,
|
||||||
|
"user_id": kwargs.get("user_id", "") if isinstance(kwargs, dict) else "",
|
||||||
|
"message": [{"role": "assistant", "content": canvas.get_prologue(), "created_at": time.time()}],
|
||||||
|
"source": "agent",
|
||||||
|
"dsl": cvs.dsl
|
||||||
|
}
|
||||||
|
API4ConversationService.save(**conv)
|
||||||
|
conv = API4Conversation(**conv)
|
||||||
|
|
||||||
|
# Handle existing session
|
||||||
|
else:
|
||||||
|
e, conv = API4ConversationService.get_by_id(session_id)
|
||||||
|
if not e:
|
||||||
|
yield get_data_openai(
|
||||||
|
id=session_id,
|
||||||
|
model=agent_id,
|
||||||
|
content="**ERROR**: Session not found!"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
canvas = Canvas(json.dumps(conv.dsl), tenant_id)
|
||||||
|
canvas.messages.append({"role": "user", "content": question, "id": message_id})
|
||||||
|
canvas.add_user_input(question)
|
||||||
|
|
||||||
|
if not conv.message:
|
||||||
|
conv.message = []
|
||||||
|
conv.message.append({
|
||||||
|
"role": "user",
|
||||||
|
"content": question,
|
||||||
|
"id": message_id
|
||||||
|
})
|
||||||
|
|
||||||
|
if not conv.reference:
|
||||||
|
conv.reference = []
|
||||||
|
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
|
# Process request based on stream mode
|
||||||
|
final_ans = {"reference": [], "content": ""}
|
||||||
|
prompt_tokens = len(tiktokenenc.encode(str(question)))
|
||||||
|
|
||||||
|
if stream:
|
||||||
|
try:
|
||||||
|
completion_tokens = 0
|
||||||
|
for ans in canvas.run(stream=True):
|
||||||
|
if ans.get("running_status"):
|
||||||
|
completion_tokens += len(tiktokenenc.encode(ans.get("content", "")))
|
||||||
|
yield "data: " + json.dumps(
|
||||||
|
get_data_openai(
|
||||||
|
id=session_id,
|
||||||
|
model=agent_id,
|
||||||
|
content=ans["content"],
|
||||||
|
object="chat.completion.chunk",
|
||||||
|
completion_tokens=completion_tokens,
|
||||||
|
prompt_tokens=prompt_tokens
|
||||||
|
),
|
||||||
|
ensure_ascii=False
|
||||||
|
) + "\n\n"
|
||||||
|
continue
|
||||||
|
|
||||||
|
for k in ans.keys():
|
||||||
|
final_ans[k] = ans[k]
|
||||||
|
|
||||||
|
completion_tokens += len(tiktokenenc.encode(final_ans.get("content", "")))
|
||||||
|
yield "data: " + json.dumps(
|
||||||
|
get_data_openai(
|
||||||
|
id=session_id,
|
||||||
|
model=agent_id,
|
||||||
|
content=final_ans["content"],
|
||||||
|
object="chat.completion.chunk",
|
||||||
|
finish_reason="stop",
|
||||||
|
completion_tokens=completion_tokens,
|
||||||
|
prompt_tokens=prompt_tokens
|
||||||
|
),
|
||||||
|
ensure_ascii=False
|
||||||
|
) + "\n\n"
|
||||||
|
|
||||||
|
# Update conversation
|
||||||
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "created_at": time.time(), "id": message_id})
|
||||||
|
canvas.history.append(("assistant", final_ans["content"]))
|
||||||
|
if final_ans.get("reference"):
|
||||||
|
canvas.reference.append(final_ans["reference"])
|
||||||
|
conv.dsl = json.loads(str(canvas))
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
|
||||||
|
yield "data: [DONE]\n\n"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
conv.dsl = json.loads(str(canvas))
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
yield "data: " + json.dumps(
|
||||||
|
get_data_openai(
|
||||||
|
id=session_id,
|
||||||
|
model=agent_id,
|
||||||
|
content="**ERROR**: " + str(e),
|
||||||
|
finish_reason="stop",
|
||||||
|
completion_tokens=len(tiktokenenc.encode("**ERROR**: " + str(e))),
|
||||||
|
prompt_tokens=prompt_tokens
|
||||||
|
),
|
||||||
|
ensure_ascii=False
|
||||||
|
) + "\n\n"
|
||||||
|
yield "data: [DONE]\n\n"
|
||||||
|
|
||||||
|
else: # Non-streaming mode
|
||||||
|
try:
|
||||||
|
all_answer_content = ""
|
||||||
|
for answer in canvas.run(stream=False):
|
||||||
|
if answer.get("running_status"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||||
|
final_ans["reference"] = answer.get("reference", [])
|
||||||
|
all_answer_content += final_ans["content"]
|
||||||
|
|
||||||
|
final_ans["content"] = all_answer_content
|
||||||
|
|
||||||
|
# Update conversation
|
||||||
|
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "created_at": time.time(), "id": message_id})
|
||||||
|
canvas.history.append(("assistant", final_ans["content"]))
|
||||||
|
if final_ans.get("reference"):
|
||||||
|
canvas.reference.append(final_ans["reference"])
|
||||||
|
conv.dsl = json.loads(str(canvas))
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
|
||||||
|
# Return the response in OpenAI format
|
||||||
|
yield get_data_openai(
|
||||||
|
id=session_id,
|
||||||
|
model=agent_id,
|
||||||
|
content=final_ans["content"],
|
||||||
|
finish_reason="stop",
|
||||||
|
completion_tokens=len(tiktokenenc.encode(final_ans["content"])),
|
||||||
|
prompt_tokens=prompt_tokens,
|
||||||
|
param=canvas.get_preset_param() # Added param info like in completion
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
conv.dsl = json.loads(str(canvas))
|
||||||
|
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||||
|
yield get_data_openai(
|
||||||
|
id=session_id,
|
||||||
|
model=agent_id,
|
||||||
|
content="**ERROR**: " + str(e),
|
||||||
|
finish_reason="stop",
|
||||||
|
completion_tokens=len(tiktokenenc.encode("**ERROR**: " + str(e))),
|
||||||
|
prompt_tokens=prompt_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
|||||||
@ -22,17 +22,56 @@ from api.utils import datetime_format, current_timestamp, get_uuid
|
|||||||
|
|
||||||
|
|
||||||
class CommonService:
|
class CommonService:
|
||||||
|
"""Base service class that provides common database operations.
|
||||||
|
|
||||||
|
This class serves as a foundation for all service classes in the application,
|
||||||
|
implementing standard CRUD operations and common database query patterns.
|
||||||
|
It uses the Peewee ORM for database interactions and provides a consistent
|
||||||
|
interface for database operations across all derived service classes.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
model: The Peewee model class that this service operates on. Must be set by subclasses.
|
||||||
|
"""
|
||||||
model = None
|
model = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||||
|
"""Execute a database query with optional column selection and ordering.
|
||||||
|
|
||||||
|
This method provides a flexible way to query the database with various filters
|
||||||
|
and sorting options. It supports column selection, sort order control, and
|
||||||
|
additional filter conditions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cols (list, optional): List of column names to select. If None, selects all columns.
|
||||||
|
reverse (bool, optional): If True, sorts in descending order. If False, sorts in ascending order.
|
||||||
|
order_by (str, optional): Column name to sort results by.
|
||||||
|
**kwargs: Additional filter conditions passed as keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
peewee.ModelSelect: A query result containing matching records.
|
||||||
|
"""
|
||||||
return cls.model.query(cols=cols, reverse=reverse,
|
return cls.model.query(cols=cols, reverse=reverse,
|
||||||
order_by=order_by, **kwargs)
|
order_by=order_by, **kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_all(cls, cols=None, reverse=None, order_by=None):
|
def get_all(cls, cols=None, reverse=None, order_by=None):
|
||||||
|
"""Retrieve all records from the database with optional column selection and ordering.
|
||||||
|
|
||||||
|
This method fetches all records from the model's table with support for
|
||||||
|
column selection and result ordering. If no order_by is specified and reverse
|
||||||
|
is True, it defaults to ordering by create_time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cols (list, optional): List of column names to select. If None, selects all columns.
|
||||||
|
reverse (bool, optional): If True, sorts in descending order. If False, sorts in ascending order.
|
||||||
|
order_by (str, optional): Column name to sort results by. Defaults to 'create_time' if reverse is specified.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
peewee.ModelSelect: A query containing all matching records.
|
||||||
|
"""
|
||||||
if cols:
|
if cols:
|
||||||
query_records = cls.model.select(*cols)
|
query_records = cls.model.select(*cols)
|
||||||
else:
|
else:
|
||||||
@ -51,11 +90,36 @@ class CommonService:
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get(cls, **kwargs):
|
def get(cls, **kwargs):
|
||||||
|
"""Get a single record matching the given criteria.
|
||||||
|
|
||||||
|
This method retrieves a single record from the database that matches
|
||||||
|
the specified filter conditions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Filter conditions as keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Model instance: Single matching record.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
peewee.DoesNotExist: If no matching record is found.
|
||||||
|
"""
|
||||||
return cls.model.get(**kwargs)
|
return cls.model.get(**kwargs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_or_none(cls, **kwargs):
|
def get_or_none(cls, **kwargs):
|
||||||
|
"""Get a single record or None if not found.
|
||||||
|
|
||||||
|
This method attempts to retrieve a single record matching the given criteria,
|
||||||
|
returning None if no match is found instead of raising an exception.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Filter conditions as keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Model instance or None: Matching record if found, None otherwise.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
return cls.model.get(**kwargs)
|
return cls.model.get(**kwargs)
|
||||||
except peewee.DoesNotExist:
|
except peewee.DoesNotExist:
|
||||||
@ -64,14 +128,34 @@ class CommonService:
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def save(cls, **kwargs):
|
def save(cls, **kwargs):
|
||||||
# if "id" not in kwargs:
|
"""Save a new record to database.
|
||||||
# kwargs["id"] = get_uuid()
|
|
||||||
|
This method creates a new record in the database with the provided field values,
|
||||||
|
forcing an insert operation rather than an update.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Record field values as keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Model instance: The created record object.
|
||||||
|
"""
|
||||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||||
return sample_obj
|
return sample_obj
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def insert(cls, **kwargs):
|
def insert(cls, **kwargs):
|
||||||
|
"""Insert a new record with automatic ID and timestamps.
|
||||||
|
|
||||||
|
This method creates a new record with automatically generated ID and timestamp fields.
|
||||||
|
It handles the creation of create_time, create_date, update_time, and update_date fields.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Record field values as keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Model instance: The newly created record object.
|
||||||
|
"""
|
||||||
if "id" not in kwargs:
|
if "id" not in kwargs:
|
||||||
kwargs["id"] = get_uuid()
|
kwargs["id"] = get_uuid()
|
||||||
kwargs["create_time"] = current_timestamp()
|
kwargs["create_time"] = current_timestamp()
|
||||||
@ -84,6 +168,15 @@ class CommonService:
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def insert_many(cls, data_list, batch_size=100):
|
def insert_many(cls, data_list, batch_size=100):
|
||||||
|
"""Insert multiple records in batches.
|
||||||
|
|
||||||
|
This method efficiently inserts multiple records into the database using batch processing.
|
||||||
|
It automatically sets creation timestamps for all records.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_list (list): List of dictionaries containing record data to insert.
|
||||||
|
batch_size (int, optional): Number of records to insert in each batch. Defaults to 100.
|
||||||
|
"""
|
||||||
with DB.atomic():
|
with DB.atomic():
|
||||||
for d in data_list:
|
for d in data_list:
|
||||||
d["create_time"] = current_timestamp()
|
d["create_time"] = current_timestamp()
|
||||||
@ -94,6 +187,15 @@ class CommonService:
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_many_by_id(cls, data_list):
|
def update_many_by_id(cls, data_list):
|
||||||
|
"""Update multiple records by their IDs.
|
||||||
|
|
||||||
|
This method updates multiple records in the database, identified by their IDs.
|
||||||
|
It automatically updates the update_time and update_date fields for each record.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_list (list): List of dictionaries containing record data to update.
|
||||||
|
Each dictionary must include an 'id' field.
|
||||||
|
"""
|
||||||
with DB.atomic():
|
with DB.atomic():
|
||||||
for data in data_list:
|
for data in data_list:
|
||||||
data["update_time"] = current_timestamp()
|
data["update_time"] = current_timestamp()
|
||||||
@ -104,6 +206,12 @@ class CommonService:
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_by_id(cls, pid, data):
|
def update_by_id(cls, pid, data):
|
||||||
|
# Update a single record by ID
|
||||||
|
# Args:
|
||||||
|
# pid: Record ID
|
||||||
|
# data: Updated field values
|
||||||
|
# Returns:
|
||||||
|
# Number of records updated
|
||||||
data["update_time"] = current_timestamp()
|
data["update_time"] = current_timestamp()
|
||||||
data["update_date"] = datetime_format(datetime.now())
|
data["update_date"] = datetime_format(datetime.now())
|
||||||
num = cls.model.update(data).where(cls.model.id == pid).execute()
|
num = cls.model.update(data).where(cls.model.id == pid).execute()
|
||||||
@ -112,15 +220,28 @@ class CommonService:
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_id(cls, pid):
|
def get_by_id(cls, pid):
|
||||||
|
# Get a record by ID
|
||||||
|
# Args:
|
||||||
|
# pid: Record ID
|
||||||
|
# Returns:
|
||||||
|
# Tuple of (success, record)
|
||||||
try:
|
try:
|
||||||
obj = cls.model.query(id=pid)[0]
|
obj = cls.model.get_or_none(cls.model.id == pid)
|
||||||
return True, obj
|
if obj:
|
||||||
|
return True, obj
|
||||||
except Exception:
|
except Exception:
|
||||||
return False, None
|
pass
|
||||||
|
return False, None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_ids(cls, pids, cols=None):
|
def get_by_ids(cls, pids, cols=None):
|
||||||
|
# Get multiple records by their IDs
|
||||||
|
# Args:
|
||||||
|
# pids: List of record IDs
|
||||||
|
# cols: List of columns to select
|
||||||
|
# Returns:
|
||||||
|
# Query of matching records
|
||||||
if cols:
|
if cols:
|
||||||
objs = cls.model.select(*cols)
|
objs = cls.model.select(*cols)
|
||||||
else:
|
else:
|
||||||
@ -130,11 +251,21 @@ class CommonService:
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def delete_by_id(cls, pid):
|
def delete_by_id(cls, pid):
|
||||||
|
# Delete a record by ID
|
||||||
|
# Args:
|
||||||
|
# pid: Record ID
|
||||||
|
# Returns:
|
||||||
|
# Number of records deleted
|
||||||
return cls.model.delete().where(cls.model.id == pid).execute()
|
return cls.model.delete().where(cls.model.id == pid).execute()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def filter_delete(cls, filters):
|
def filter_delete(cls, filters):
|
||||||
|
# Delete records matching given filters
|
||||||
|
# Args:
|
||||||
|
# filters: List of filter conditions
|
||||||
|
# Returns:
|
||||||
|
# Number of records deleted
|
||||||
with DB.atomic():
|
with DB.atomic():
|
||||||
num = cls.model.delete().where(*filters).execute()
|
num = cls.model.delete().where(*filters).execute()
|
||||||
return num
|
return num
|
||||||
@ -142,11 +273,23 @@ class CommonService:
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def filter_update(cls, filters, update_data):
|
def filter_update(cls, filters, update_data):
|
||||||
|
# Update records matching given filters
|
||||||
|
# Args:
|
||||||
|
# filters: List of filter conditions
|
||||||
|
# update_data: Updated field values
|
||||||
|
# Returns:
|
||||||
|
# Number of records updated
|
||||||
with DB.atomic():
|
with DB.atomic():
|
||||||
return cls.model.update(update_data).where(*filters).execute()
|
return cls.model.update(update_data).where(*filters).execute()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def cut_list(tar_list, n):
|
def cut_list(tar_list, n):
|
||||||
|
# Split a list into chunks of size n
|
||||||
|
# Args:
|
||||||
|
# tar_list: List to split
|
||||||
|
# n: Chunk size
|
||||||
|
# Returns:
|
||||||
|
# List of tuples containing chunks
|
||||||
length = len(tar_list)
|
length = len(tar_list)
|
||||||
arr = range(length)
|
arr = range(length)
|
||||||
result = [tuple(tar_list[x:(x + n)]) for x in arr[::n]]
|
result = [tuple(tar_list[x:(x + n)]) for x in arr[::n]]
|
||||||
@ -156,6 +299,14 @@ class CommonService:
|
|||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def filter_scope_list(cls, in_key, in_filters_list,
|
def filter_scope_list(cls, in_key, in_filters_list,
|
||||||
filters=None, cols=None):
|
filters=None, cols=None):
|
||||||
|
# Get records matching IN clause filters with optional column selection
|
||||||
|
# Args:
|
||||||
|
# in_key: Field name for IN clause
|
||||||
|
# in_filters_list: List of values for IN clause
|
||||||
|
# filters: Additional filter conditions
|
||||||
|
# cols: List of columns to select
|
||||||
|
# Returns:
|
||||||
|
# List of matching records
|
||||||
in_filters_tuple_list = cls.cut_list(in_filters_list, 20)
|
in_filters_tuple_list = cls.cut_list(in_filters_list, 20)
|
||||||
if not filters:
|
if not filters:
|
||||||
filters = []
|
filters = []
|
||||||
|
|||||||
@ -23,6 +23,8 @@ from api.db.services.dialog_service import DialogService, chat
|
|||||||
from api.utils import get_uuid
|
from api.utils import get_uuid
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from rag.prompts import chunks_format
|
||||||
|
|
||||||
|
|
||||||
class ConversationService(CommonService):
|
class ConversationService(CommonService):
|
||||||
model = Conversation
|
model = Conversation
|
||||||
@ -53,18 +55,7 @@ def structure_answer(conv, ans, message_id, session_id):
|
|||||||
reference = {}
|
reference = {}
|
||||||
ans["reference"] = {}
|
ans["reference"] = {}
|
||||||
|
|
||||||
def get_value(d, k1, k2):
|
chunk_list = chunks_format(reference)
|
||||||
return d.get(k1, d.get(k2))
|
|
||||||
|
|
||||||
chunk_list = [{
|
|
||||||
"id": get_value(chunk, "chunk_id", "id"),
|
|
||||||
"content": get_value(chunk, "content", "content_with_weight"),
|
|
||||||
"document_id": get_value(chunk, "doc_id", "document_id"),
|
|
||||||
"document_name": get_value(chunk, "docnm_kwd", "document_name"),
|
|
||||||
"dataset_id": get_value(chunk, "kb_id", "dataset_id"),
|
|
||||||
"image_id": get_value(chunk, "image_id", "img_id"),
|
|
||||||
"positions": get_value(chunk, "positions", "position_int"),
|
|
||||||
} for chunk in reference.get("chunks", [])]
|
|
||||||
|
|
||||||
reference["chunks"] = chunk_list
|
reference["chunks"] = chunk_list
|
||||||
ans["id"] = message_id
|
ans["id"] = message_id
|
||||||
|
|||||||
@ -13,48 +13,79 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import logging
|
|
||||||
import binascii
|
import binascii
|
||||||
import os
|
from datetime import datetime
|
||||||
import json
|
import logging
|
||||||
import json_repair
|
|
||||||
import re
|
import re
|
||||||
from collections import defaultdict
|
import time
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from functools import partial
|
||||||
from timeit import default_timer as timer
|
from timeit import default_timer as timer
|
||||||
import datetime
|
|
||||||
from datetime import timedelta
|
from langfuse import Langfuse
|
||||||
from api.db import LLMType, ParserType, StatusEnum
|
|
||||||
from api.db.db_models import Dialog, DB
|
from agentic_reasoning import DeepResearcher
|
||||||
from api.db.services.common_service import CommonService
|
|
||||||
from api.db.services.document_service import DocumentService
|
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
|
||||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
|
||||||
from api import settings
|
from api import settings
|
||||||
from graphrag.utils import get_tags_from_cache, set_tags_to_cache
|
from api.db import LLMType, ParserType, StatusEnum
|
||||||
|
from api.db.db_models import DB, Dialog
|
||||||
|
from api.db.services.common_service import CommonService
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.db.services.langfuse_service import TenantLangfuseService
|
||||||
|
from api.db.services.llm_service import LLMBundle, TenantLLMService
|
||||||
|
from api.utils import current_timestamp, datetime_format
|
||||||
from rag.app.resume import forbidden_select_fields4resume
|
from rag.app.resume import forbidden_select_fields4resume
|
||||||
|
from rag.app.tag import label_question
|
||||||
from rag.nlp.search import index_name
|
from rag.nlp.search import index_name
|
||||||
from rag.settings import TAG_FLD
|
from rag.prompts import chunks_format, citation_prompt, full_question, kb_prompt, keyword_extraction, llm_id2llm_type, message_fit_in
|
||||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
from rag.utils import num_tokens_from_string, rmSpace
|
||||||
from api.utils.file_utils import get_project_base_directory
|
from rag.utils.tavily_conn import Tavily
|
||||||
|
|
||||||
|
|
||||||
class DialogService(CommonService):
|
class DialogService(CommonService):
|
||||||
model = Dialog
|
model = Dialog
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def save(cls, **kwargs):
|
||||||
|
"""Save a new record to database.
|
||||||
|
|
||||||
|
This method creates a new record in the database with the provided field values,
|
||||||
|
forcing an insert operation rather than an update.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
**kwargs: Record field values as keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Model instance: The created record object.
|
||||||
|
"""
|
||||||
|
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||||
|
return sample_obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def update_many_by_id(cls, data_list):
|
||||||
|
"""Update multiple records by their IDs.
|
||||||
|
|
||||||
|
This method updates multiple records in the database, identified by their IDs.
|
||||||
|
It automatically updates the update_time and update_date fields for each record.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_list (list): List of dictionaries containing record data to update.
|
||||||
|
Each dictionary must include an 'id' field.
|
||||||
|
"""
|
||||||
|
with DB.atomic():
|
||||||
|
for data in data_list:
|
||||||
|
data["update_time"] = current_timestamp()
|
||||||
|
data["update_date"] = datetime_format(datetime.now())
|
||||||
|
cls.model.update(data).where(cls.model.id == data["id"]).execute()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_list(cls, tenant_id,
|
def get_list(cls, tenant_id, page_number, items_per_page, orderby, desc, id, name):
|
||||||
page_number, items_per_page, orderby, desc, id, name):
|
|
||||||
chats = cls.model.select()
|
chats = cls.model.select()
|
||||||
if id:
|
if id:
|
||||||
chats = chats.where(cls.model.id == id)
|
chats = chats.where(cls.model.id == id)
|
||||||
if name:
|
if name:
|
||||||
chats = chats.where(cls.model.name == name)
|
chats = chats.where(cls.model.name == name)
|
||||||
chats = chats.where(
|
chats = chats.where((cls.model.tenant_id == tenant_id) & (cls.model.status == StatusEnum.VALID.value))
|
||||||
(cls.model.tenant_id == tenant_id)
|
|
||||||
& (cls.model.status == StatusEnum.VALID.value)
|
|
||||||
)
|
|
||||||
if desc:
|
if desc:
|
||||||
chats = chats.order_by(cls.model.getter_by(orderby).desc())
|
chats = chats.order_by(cls.model.getter_by(orderby).desc())
|
||||||
else:
|
else:
|
||||||
@ -65,131 +96,63 @@ class DialogService(CommonService):
|
|||||||
return list(chats.dicts())
|
return list(chats.dicts())
|
||||||
|
|
||||||
|
|
||||||
def message_fit_in(msg, max_length=4000):
|
def chat_solo(dialog, messages, stream=True):
|
||||||
def count():
|
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||||
nonlocal msg
|
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||||
tks_cnts = []
|
else:
|
||||||
for m in msg:
|
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||||
tks_cnts.append(
|
|
||||||
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
|
||||||
total = 0
|
|
||||||
for m in tks_cnts:
|
|
||||||
total += m["count"]
|
|
||||||
return total
|
|
||||||
|
|
||||||
c = count()
|
prompt_config = dialog.prompt_config
|
||||||
if c < max_length:
|
tts_mdl = None
|
||||||
return c, msg
|
if prompt_config.get("tts"):
|
||||||
|
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"]
|
||||||
if len(msg) > 1:
|
if stream:
|
||||||
msg_.append(msg[-1])
|
last_ans = ""
|
||||||
msg = msg_
|
for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
|
||||||
c = count()
|
answer = ans
|
||||||
if c < max_length:
|
delta_ans = ans[len(last_ans) :]
|
||||||
return c, msg
|
if num_tokens_from_string(delta_ans) < 16:
|
||||||
|
continue
|
||||||
ll = num_tokens_from_string(msg_[0]["content"])
|
last_ans = answer
|
||||||
ll2 = num_tokens_from_string(msg_[-1]["content"])
|
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||||
if ll / (ll + ll2) > 0.8:
|
if delta_ans:
|
||||||
m = msg_[0]["content"]
|
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
else:
|
||||||
msg[0]["content"] = m
|
answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
|
||||||
return max_length, msg
|
user_content = msg[-1].get("content", "[content not available]")
|
||||||
|
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||||
m = msg_[1]["content"]
|
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
|
||||||
m = encoder.decode(encoder.encode(m)[:max_length - ll2])
|
|
||||||
msg[1]["content"] = m
|
|
||||||
return max_length, msg
|
|
||||||
|
|
||||||
|
|
||||||
def llm_id2llm_type(llm_id):
|
|
||||||
llm_id, _ = TenantLLMService.split_model_name_and_factory(llm_id)
|
|
||||||
fnm = os.path.join(get_project_base_directory(), "conf")
|
|
||||||
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
|
||||||
for llm_factory in llm_factories["factory_llm_infos"]:
|
|
||||||
for llm in llm_factory["llm"]:
|
|
||||||
if llm_id == llm["llm_name"]:
|
|
||||||
return llm["model_type"].strip(",")[-1]
|
|
||||||
|
|
||||||
|
|
||||||
def kb_prompt(kbinfos, max_tokens):
|
|
||||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
|
||||||
used_token_count = 0
|
|
||||||
chunks_num = 0
|
|
||||||
for i, c in enumerate(knowledges):
|
|
||||||
used_token_count += num_tokens_from_string(c)
|
|
||||||
chunks_num += 1
|
|
||||||
if max_tokens * 0.97 < used_token_count:
|
|
||||||
knowledges = knowledges[:i]
|
|
||||||
break
|
|
||||||
|
|
||||||
docs = DocumentService.get_by_ids([ck["doc_id"] for ck in kbinfos["chunks"][:chunks_num]])
|
|
||||||
docs = {d.id: d.meta_fields for d in docs}
|
|
||||||
|
|
||||||
doc2chunks = defaultdict(lambda: {"chunks": [], "meta": []})
|
|
||||||
for ck in kbinfos["chunks"][:chunks_num]:
|
|
||||||
doc2chunks[ck["docnm_kwd"]]["chunks"].append(ck["content_with_weight"])
|
|
||||||
doc2chunks[ck["docnm_kwd"]]["meta"] = docs.get(ck["doc_id"], {})
|
|
||||||
|
|
||||||
knowledges = []
|
|
||||||
for nm, cks_meta in doc2chunks.items():
|
|
||||||
txt = f"Document: {nm} \n"
|
|
||||||
for k,v in cks_meta["meta"].items():
|
|
||||||
txt += f"{k}: {v}\n"
|
|
||||||
txt += "Relevant fragments as following:\n"
|
|
||||||
for i, chunk in enumerate(cks_meta["chunks"], 1):
|
|
||||||
txt += f"{i}. {chunk}\n"
|
|
||||||
knowledges.append(txt)
|
|
||||||
return knowledges
|
|
||||||
|
|
||||||
|
|
||||||
def label_question(question, kbs):
|
|
||||||
tags = None
|
|
||||||
tag_kb_ids = []
|
|
||||||
for kb in kbs:
|
|
||||||
if kb.parser_config.get("tag_kb_ids"):
|
|
||||||
tag_kb_ids.extend(kb.parser_config["tag_kb_ids"])
|
|
||||||
if tag_kb_ids:
|
|
||||||
all_tags = get_tags_from_cache(tag_kb_ids)
|
|
||||||
if not all_tags:
|
|
||||||
all_tags = settings.retrievaler.all_tags_in_portion(kb.tenant_id, tag_kb_ids)
|
|
||||||
set_tags_to_cache(all_tags, tag_kb_ids)
|
|
||||||
else:
|
|
||||||
all_tags = json.loads(all_tags)
|
|
||||||
tag_kbs = KnowledgebaseService.get_by_ids(tag_kb_ids)
|
|
||||||
tags = settings.retrievaler.tag_query(question,
|
|
||||||
list(set([kb.tenant_id for kb in tag_kbs])),
|
|
||||||
tag_kb_ids,
|
|
||||||
all_tags,
|
|
||||||
kb.parser_config.get("topn_tags", 3)
|
|
||||||
)
|
|
||||||
return tags
|
|
||||||
|
|
||||||
|
|
||||||
def chat(dialog, messages, stream=True, **kwargs):
|
def chat(dialog, messages, stream=True, **kwargs):
|
||||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||||
|
if not dialog.kb_ids:
|
||||||
|
for ans in chat_solo(dialog, messages, stream):
|
||||||
|
yield ans
|
||||||
|
return
|
||||||
|
|
||||||
chat_start_ts = timer()
|
chat_start_ts = timer()
|
||||||
|
|
||||||
# Get llm model name and model provider name
|
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||||
llm_id, model_provider = TenantLLMService.split_model_name_and_factory(dialog.llm_id)
|
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||||
|
|
||||||
# Get llm model instance by model and provide name
|
|
||||||
llm = LLMService.query(llm_name=llm_id) if not model_provider else LLMService.query(llm_name=llm_id, fid=model_provider)
|
|
||||||
|
|
||||||
if not llm:
|
|
||||||
# Model name is provided by tenant, but not system built-in
|
|
||||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not model_provider else \
|
|
||||||
TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id, llm_factory=model_provider)
|
|
||||||
if not llm:
|
|
||||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
|
||||||
max_tokens = 8192
|
|
||||||
else:
|
else:
|
||||||
max_tokens = llm[0].max_tokens
|
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||||
|
|
||||||
|
max_tokens = llm_model_config.get("max_tokens", 8192)
|
||||||
|
|
||||||
check_llm_ts = timer()
|
check_llm_ts = timer()
|
||||||
|
|
||||||
|
langfuse_tracer = None
|
||||||
|
langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=dialog.tenant_id)
|
||||||
|
if langfuse_keys:
|
||||||
|
langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
|
||||||
|
if langfuse.auth_check():
|
||||||
|
langfuse_tracer = langfuse
|
||||||
|
langfuse.trace = langfuse_tracer.trace(name=f"{dialog.name}-{llm_model_config['llm_name']}")
|
||||||
|
|
||||||
|
check_langfuse_tracer_ts = timer()
|
||||||
|
|
||||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||||
if len(embedding_list) != 1:
|
if len(embedding_list) != 1:
|
||||||
@ -204,9 +167,6 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||||
if "doc_ids" in messages[-1]:
|
if "doc_ids" in messages[-1]:
|
||||||
attachments = messages[-1]["doc_ids"]
|
attachments = messages[-1]["doc_ids"]
|
||||||
for m in messages[:-1]:
|
|
||||||
if "doc_ids" in m:
|
|
||||||
attachments.extend(m["doc_ids"])
|
|
||||||
|
|
||||||
create_retriever_ts = timer()
|
create_retriever_ts = timer()
|
||||||
|
|
||||||
@ -220,6 +180,9 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||||
else:
|
else:
|
||||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||||
|
toolcall_session, tools = kwargs.get("toolcall_session"), kwargs.get("tools")
|
||||||
|
if toolcall_session and tools:
|
||||||
|
chat_mdl.bind_tools(toolcall_session, tools)
|
||||||
|
|
||||||
bind_llm_ts = timer()
|
bind_llm_ts = timer()
|
||||||
|
|
||||||
@ -242,8 +205,7 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
if p["key"] not in kwargs and not p["optional"]:
|
if p["key"] not in kwargs and not p["optional"]:
|
||||||
raise KeyError("Miss parameter: " + p["key"])
|
raise KeyError("Miss parameter: " + p["key"])
|
||||||
if p["key"] not in kwargs:
|
if p["key"] not in kwargs:
|
||||||
prompt_config["system"] = prompt_config["system"].replace(
|
prompt_config["system"] = prompt_config["system"].replace("{%s}" % p["key"], " ")
|
||||||
"{%s}" % p["key"], " ")
|
|
||||||
|
|
||||||
if len(questions) > 1 and prompt_config.get("refine_multiturn"):
|
if len(questions) > 1 and prompt_config.get("refine_multiturn"):
|
||||||
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
||||||
@ -258,9 +220,11 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
|
|
||||||
bind_reranker_ts = timer()
|
bind_reranker_ts = timer()
|
||||||
generate_keyword_ts = bind_reranker_ts
|
generate_keyword_ts = bind_reranker_ts
|
||||||
|
thought = ""
|
||||||
|
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||||
|
|
||||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
knowledges = []
|
||||||
else:
|
else:
|
||||||
if prompt_config.get("keyword", False):
|
if prompt_config.get("keyword", False):
|
||||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||||
@ -268,67 +232,111 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
|
|
||||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||||
|
|
||||||
kbinfos = retriever.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
knowledges = []
|
||||||
dialog.similarity_threshold,
|
if prompt_config.get("reasoning", False):
|
||||||
dialog.vector_similarity_weight,
|
reasoner = DeepResearcher(
|
||||||
doc_ids=attachments,
|
chat_mdl,
|
||||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl,
|
prompt_config,
|
||||||
rank_feature=label_question(" ".join(questions), kbs)
|
partial(retriever.retrieval, embd_mdl=embd_mdl, tenant_ids=tenant_ids, kb_ids=dialog.kb_ids, page=1, page_size=dialog.top_n, similarity_threshold=0.2, vector_similarity_weight=0.3),
|
||||||
)
|
)
|
||||||
if prompt_config.get("use_kg"):
|
|
||||||
ck = settings.kg_retrievaler.retrieval(" ".join(questions),
|
for think in reasoner.thinking(kbinfos, " ".join(questions)):
|
||||||
tenant_ids,
|
if isinstance(think, str):
|
||||||
dialog.kb_ids,
|
thought = think
|
||||||
embd_mdl,
|
knowledges = [t for t in think.split("\n") if t]
|
||||||
LLMBundle(dialog.tenant_id, LLMType.CHAT))
|
elif stream:
|
||||||
if ck["content_with_weight"]:
|
yield think
|
||||||
kbinfos["chunks"].insert(0, ck)
|
else:
|
||||||
|
kbinfos = retriever.retrieval(
|
||||||
|
" ".join(questions),
|
||||||
|
embd_mdl,
|
||||||
|
tenant_ids,
|
||||||
|
dialog.kb_ids,
|
||||||
|
1,
|
||||||
|
dialog.top_n,
|
||||||
|
dialog.similarity_threshold,
|
||||||
|
dialog.vector_similarity_weight,
|
||||||
|
doc_ids=attachments,
|
||||||
|
top=dialog.top_k,
|
||||||
|
aggs=False,
|
||||||
|
rerank_mdl=rerank_mdl,
|
||||||
|
rank_feature=label_question(" ".join(questions), kbs),
|
||||||
|
)
|
||||||
|
if prompt_config.get("tavily_api_key"):
|
||||||
|
tav = Tavily(prompt_config["tavily_api_key"])
|
||||||
|
tav_res = tav.retrieve_chunks(" ".join(questions))
|
||||||
|
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||||
|
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||||
|
if prompt_config.get("use_kg"):
|
||||||
|
ck = settings.kg_retrievaler.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl, LLMBundle(dialog.tenant_id, LLMType.CHAT))
|
||||||
|
if ck["content_with_weight"]:
|
||||||
|
kbinfos["chunks"].insert(0, ck)
|
||||||
|
|
||||||
|
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||||
|
|
||||||
|
logging.debug("{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||||
|
|
||||||
retrieval_ts = timer()
|
retrieval_ts = timer()
|
||||||
|
|
||||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
|
||||||
logging.debug(
|
|
||||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
|
||||||
|
|
||||||
if not knowledges and prompt_config.get("empty_response"):
|
if not knowledges and prompt_config.get("empty_response"):
|
||||||
empty_res = prompt_config["empty_response"]
|
empty_res = prompt_config["empty_response"]
|
||||||
yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
|
yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions), "audio_binary": tts(tts_mdl, empty_res)}
|
||||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||||
|
|
||||||
kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
|
kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
|
||||||
gen_conf = dialog.llm_setting
|
gen_conf = dialog.llm_setting
|
||||||
|
|
||||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||||
msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
|
prompt4citation = ""
|
||||||
for m in messages if m["role"] != "system"])
|
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
prompt4citation = citation_prompt()
|
||||||
|
msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"])
|
||||||
|
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.95))
|
||||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||||
prompt = msg[0]["content"]
|
prompt = msg[0]["content"]
|
||||||
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
|
||||||
|
|
||||||
if "max_tokens" in gen_conf:
|
if "max_tokens" in gen_conf:
|
||||||
gen_conf["max_tokens"] = min(
|
gen_conf["max_tokens"] = min(gen_conf["max_tokens"], max_tokens - used_token_count)
|
||||||
gen_conf["max_tokens"],
|
|
||||||
max_tokens - used_token_count)
|
|
||||||
|
|
||||||
def decorate_answer(answer):
|
def decorate_answer(answer):
|
||||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts
|
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions, langfuse_tracer
|
||||||
|
|
||||||
finish_chat_ts = timer()
|
|
||||||
|
|
||||||
refs = []
|
refs = []
|
||||||
|
ans = answer.split("</think>")
|
||||||
|
think = ""
|
||||||
|
if len(ans) == 2:
|
||||||
|
think = ans[0] + "</think>"
|
||||||
|
answer = ans[1]
|
||||||
|
|
||||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||||
answer, idx = retriever.insert_citations(answer,
|
answer = re.sub(r"##[ij]\$\$", "", answer, flags=re.DOTALL)
|
||||||
[ck["content_ltks"]
|
idx = set([])
|
||||||
for ck in kbinfos["chunks"]],
|
if not re.search(r"##[0-9]+\$\$", answer):
|
||||||
[ck["vector"]
|
answer, idx = retriever.insert_citations(
|
||||||
for ck in kbinfos["chunks"]],
|
answer,
|
||||||
embd_mdl,
|
[ck["content_ltks"] for ck in kbinfos["chunks"]],
|
||||||
tkweight=1 - dialog.vector_similarity_weight,
|
[ck["vector"] for ck in kbinfos["chunks"]],
|
||||||
vtweight=dialog.vector_similarity_weight)
|
embd_mdl,
|
||||||
|
tkweight=1 - dialog.vector_similarity_weight,
|
||||||
|
vtweight=dialog.vector_similarity_weight,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
for match in re.finditer(r"##([0-9]+)\$\$", answer):
|
||||||
|
i = int(match.group(1))
|
||||||
|
if i < len(kbinfos["chunks"]):
|
||||||
|
idx.add(i)
|
||||||
|
|
||||||
|
# handle (ID: 1), ID: 2 etc.
|
||||||
|
for match in re.finditer(r"\(\s*ID:\s*(\d+)\s*\)|ID[: ]+\s*(\d+)", answer):
|
||||||
|
full_match = match.group(0)
|
||||||
|
id = match.group(1) or match.group(2)
|
||||||
|
if id:
|
||||||
|
i = int(id)
|
||||||
|
if i < len(kbinfos["chunks"]):
|
||||||
|
idx.add(i)
|
||||||
|
answer = answer.replace(full_match, f"##{i}$$")
|
||||||
|
|
||||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||||
recall_docs = [
|
recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
|
||||||
if not recall_docs:
|
if not recall_docs:
|
||||||
recall_docs = kbinfos["doc_aggs"]
|
recall_docs = kbinfos["doc_aggs"]
|
||||||
kbinfos["doc_aggs"] = recall_docs
|
kbinfos["doc_aggs"] = recall_docs
|
||||||
@ -344,7 +352,8 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
|
|
||||||
total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
|
total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
|
||||||
check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
|
check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
|
||||||
create_retriever_time_cost = (create_retriever_ts - check_llm_ts) * 1000
|
check_langfuse_tracer_cost = (check_langfuse_tracer_ts - check_llm_ts) * 1000
|
||||||
|
create_retriever_time_cost = (create_retriever_ts - check_langfuse_tracer_ts) * 1000
|
||||||
bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
|
bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
|
||||||
bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
|
bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
|
||||||
refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
|
refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
|
||||||
@ -353,27 +362,59 @@ def chat(dialog, messages, stream=True, **kwargs):
|
|||||||
retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
|
retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
|
||||||
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
|
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
|
||||||
|
|
||||||
prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms"
|
tk_num = num_tokens_from_string(think + answer)
|
||||||
return {"answer": answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt)}
|
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||||
|
prompt = (
|
||||||
|
f"{prompt}\n\n"
|
||||||
|
"## Time elapsed:\n"
|
||||||
|
f" - Total: {total_time_cost:.1f}ms\n"
|
||||||
|
f" - Check LLM: {check_llm_time_cost:.1f}ms\n"
|
||||||
|
f" - Check Langfuse tracer: {check_langfuse_tracer_cost:.1f}ms\n"
|
||||||
|
f" - Create retriever: {create_retriever_time_cost:.1f}ms\n"
|
||||||
|
f" - Bind embedding: {bind_embedding_time_cost:.1f}ms\n"
|
||||||
|
f" - Bind LLM: {bind_llm_time_cost:.1f}ms\n"
|
||||||
|
f" - Multi-turn optimization: {refine_question_time_cost:.1f}ms\n"
|
||||||
|
f" - Bind reranker: {bind_reranker_time_cost:.1f}ms\n"
|
||||||
|
f" - Generate keyword: {generate_keyword_time_cost:.1f}ms\n"
|
||||||
|
f" - Retrieval: {retrieval_time_cost:.1f}ms\n"
|
||||||
|
f" - Generate answer: {generate_result_time_cost:.1f}ms\n\n"
|
||||||
|
"## Token usage:\n"
|
||||||
|
f" - Generated tokens(approximately): {tk_num}\n"
|
||||||
|
f" - Token speed: {int(tk_num / (generate_result_time_cost / 1000.0))}/s"
|
||||||
|
)
|
||||||
|
|
||||||
|
langfuse_output = "\n" + re.sub(r"^.*?(### Query:.*)", r"\1", prompt, flags=re.DOTALL)
|
||||||
|
langfuse_output = {"time_elapsed:": re.sub(r"\n", " \n", langfuse_output), "created_at": time.time()}
|
||||||
|
|
||||||
|
# Add a condition check to call the end method only if langfuse_tracer exists
|
||||||
|
if langfuse_tracer and "langfuse_generation" in locals():
|
||||||
|
langfuse_generation.end(output=langfuse_output)
|
||||||
|
|
||||||
|
return {"answer": think + answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
|
||||||
|
|
||||||
|
if langfuse_tracer:
|
||||||
|
langfuse_generation = langfuse_tracer.trace.generation(name="chat", model=llm_model_config["llm_name"], input={"prompt": prompt, "prompt4citation": prompt4citation, "messages": msg})
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
last_ans = ""
|
last_ans = ""
|
||||||
answer = ""
|
answer = ""
|
||||||
for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
|
for ans in chat_mdl.chat_streamly(prompt + prompt4citation, msg[1:], gen_conf):
|
||||||
|
if thought:
|
||||||
|
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||||
answer = ans
|
answer = ans
|
||||||
delta_ans = ans[len(last_ans):]
|
delta_ans = ans[len(last_ans) :]
|
||||||
if num_tokens_from_string(delta_ans) < 16:
|
if num_tokens_from_string(delta_ans) < 16:
|
||||||
continue
|
continue
|
||||||
last_ans = answer
|
last_ans = answer
|
||||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||||
delta_ans = answer[len(last_ans):]
|
delta_ans = answer[len(last_ans) :]
|
||||||
if delta_ans:
|
if delta_ans:
|
||||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||||
yield decorate_answer(answer)
|
yield decorate_answer(thought + answer)
|
||||||
else:
|
else:
|
||||||
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
answer = chat_mdl.chat(prompt + prompt4citation, msg[1:], gen_conf)
|
||||||
logging.debug("User: {}|Assistant: {}".format(
|
user_content = msg[-1].get("content", "[content not available]")
|
||||||
msg[-1]["content"], answer))
|
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||||
res = decorate_answer(answer)
|
res = decorate_answer(answer)
|
||||||
res["audio_binary"] = tts(tts_mdl, answer)
|
res["audio_binary"] = tts(tts_mdl, answer)
|
||||||
yield res
|
yield res
|
||||||
@ -389,26 +430,22 @@ Table of database fields are as follows:
|
|||||||
Question are as follows:
|
Question are as follows:
|
||||||
{}
|
{}
|
||||||
Please write the SQL, only SQL, without any other explanations or text.
|
Please write the SQL, only SQL, without any other explanations or text.
|
||||||
""".format(
|
""".format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question)
|
||||||
index_name(tenant_id),
|
|
||||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
|
||||||
question
|
|
||||||
)
|
|
||||||
tried_times = 0
|
tried_times = 0
|
||||||
|
|
||||||
def get_table():
|
def get_table():
|
||||||
nonlocal sys_prompt, user_prompt, question, tried_times
|
nonlocal sys_prompt, user_prompt, question, tried_times
|
||||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {
|
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {"temperature": 0.06})
|
||||||
"temperature": 0.06})
|
sql = re.sub(r"<think>.*</think>", "", sql, flags=re.DOTALL)
|
||||||
logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
|
logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
|
||||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||||
sql = re.sub(r" +", " ", sql)
|
sql = re.sub(r" +", " ", sql)
|
||||||
sql = re.sub(r"([;;]|```).*", "", sql)
|
sql = re.sub(r"([;;]|```).*", "", sql)
|
||||||
if sql[:len("select ")] != "select ":
|
if sql[: len("select ")] != "select ":
|
||||||
return None, None
|
return None, None
|
||||||
if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
|
if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
|
||||||
if sql[:len("select *")] != "select *":
|
if sql[: len("select *")] != "select *":
|
||||||
sql = "select doc_id,docnm_kwd," + sql[6:]
|
sql = "select doc_id,docnm_kwd," + sql[6:]
|
||||||
else:
|
else:
|
||||||
flds = []
|
flds = []
|
||||||
@ -432,11 +469,11 @@ Please write the SQL, only SQL, without any other explanations or text.
|
|||||||
Table name: {};
|
Table name: {};
|
||||||
Table of database fields are as follows:
|
Table of database fields are as follows:
|
||||||
{}
|
{}
|
||||||
|
|
||||||
Question are as follows:
|
Question are as follows:
|
||||||
{}
|
{}
|
||||||
Please write the SQL, only SQL, without any other explanations or text.
|
Please write the SQL, only SQL, without any other explanations or text.
|
||||||
|
|
||||||
|
|
||||||
The SQL error you provided last time is as follows:
|
The SQL error you provided last time is as follows:
|
||||||
{}
|
{}
|
||||||
@ -445,11 +482,7 @@ Please write the SQL, only SQL, without any other explanations or text.
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
Please correct the error and write SQL again, only SQL, without any other explanations or text.
|
Please correct the error and write SQL again, only SQL, without any other explanations or text.
|
||||||
""".format(
|
""".format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question, sql, tbl["error"])
|
||||||
index_name(tenant_id),
|
|
||||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
|
||||||
question, sql, tbl["error"]
|
|
||||||
)
|
|
||||||
tbl, sql = get_table()
|
tbl, sql = get_table()
|
||||||
logging.debug("TRY it again: {}".format(sql))
|
logging.debug("TRY it again: {}".format(sql))
|
||||||
|
|
||||||
@ -457,24 +490,18 @@ Please write the SQL, only SQL, without any other explanations or text.
|
|||||||
if tbl.get("error") or len(tbl["rows"]) == 0:
|
if tbl.get("error") or len(tbl["rows"]) == 0:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
docid_idx = set([ii for ii, c in enumerate(
|
docid_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "doc_id"])
|
||||||
tbl["columns"]) if c["name"] == "doc_id"])
|
doc_name_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||||
doc_name_idx = set([ii for ii, c in enumerate(
|
column_idx = [ii for ii in range(len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
|
||||||
tbl["columns"]) if c["name"] == "docnm_kwd"])
|
|
||||||
column_idx = [ii for ii in range(
|
|
||||||
len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
|
|
||||||
|
|
||||||
# compose Markdown table
|
# compose Markdown table
|
||||||
columns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
columns = (
|
||||||
tbl["columns"][i]["name"])) for i in
|
"|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||||
column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
)
|
||||||
|
|
||||||
line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + \
|
line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + ("|------|" if docid_idx and docid_idx else "")
|
||||||
("|------|" if docid_idx and docid_idx else "")
|
|
||||||
|
|
||||||
rows = ["|" +
|
rows = ["|" + "|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") + "|" for r in tbl["rows"]]
|
||||||
"|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") +
|
|
||||||
"|" for r in tbl["rows"]]
|
|
||||||
rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
|
rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
|
||||||
if quota:
|
if quota:
|
||||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||||
@ -484,11 +511,7 @@ Please write the SQL, only SQL, without any other explanations or text.
|
|||||||
|
|
||||||
if not docid_idx or not doc_name_idx:
|
if not docid_idx or not doc_name_idx:
|
||||||
logging.warning("SQL missing field: " + sql)
|
logging.warning("SQL missing field: " + sql)
|
||||||
return {
|
return {"answer": "\n".join([columns, line, rows]), "reference": {"chunks": [], "doc_aggs": []}, "prompt": sys_prompt}
|
||||||
"answer": "\n".join([columns, line, rows]),
|
|
||||||
"reference": {"chunks": [], "doc_aggs": []},
|
|
||||||
"prompt": sys_prompt
|
|
||||||
}
|
|
||||||
|
|
||||||
docid_idx = list(docid_idx)[0]
|
docid_idx = list(docid_idx)[0]
|
||||||
doc_name_idx = list(doc_name_idx)[0]
|
doc_name_idx = list(doc_name_idx)[0]
|
||||||
@ -499,179 +522,14 @@ Please write the SQL, only SQL, without any other explanations or text.
|
|||||||
doc_aggs[r[docid_idx]]["count"] += 1
|
doc_aggs[r[docid_idx]]["count"] += 1
|
||||||
return {
|
return {
|
||||||
"answer": "\n".join([columns, line, rows]),
|
"answer": "\n".join([columns, line, rows]),
|
||||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
|
"reference": {
|
||||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
|
"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
|
||||||
doc_aggs.items()]},
|
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in doc_aggs.items()],
|
||||||
"prompt": sys_prompt
|
},
|
||||||
|
"prompt": sys_prompt,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def relevant(tenant_id, llm_id, question, contents: list):
|
|
||||||
if llm_id2llm_type(llm_id) == "image2text":
|
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
|
||||||
else:
|
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
|
||||||
prompt = """
|
|
||||||
You are a grader assessing relevance of a retrieved document to a user question.
|
|
||||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
|
||||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
|
||||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
|
||||||
No other words needed except 'yes' or 'no'.
|
|
||||||
"""
|
|
||||||
if not contents:
|
|
||||||
return False
|
|
||||||
contents = "Documents: \n" + " - ".join(contents)
|
|
||||||
contents = f"Question: {question}\n" + contents
|
|
||||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
|
||||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
|
||||||
if ans.lower().find("yes") >= 0:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def rewrite(tenant_id, llm_id, question):
|
|
||||||
if llm_id2llm_type(llm_id) == "image2text":
|
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
|
||||||
else:
|
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
|
||||||
prompt = """
|
|
||||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
|
||||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
|
||||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
|
||||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
|
||||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
|
||||||
And return 5 versions of question and one is from translation.
|
|
||||||
Just list the question. No other words are needed.
|
|
||||||
"""
|
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
|
||||||
return ans
|
|
||||||
|
|
||||||
|
|
||||||
def keyword_extraction(chat_mdl, content, topn=3):
|
|
||||||
prompt = f"""
|
|
||||||
Role: You're a text analyzer.
|
|
||||||
Task: extract the most important keywords/phrases of a given piece of text content.
|
|
||||||
Requirements:
|
|
||||||
- Summarize the text content, and give top {topn} important keywords/phrases.
|
|
||||||
- The keywords MUST be in language of the given piece of text content.
|
|
||||||
- The keywords are delimited by ENGLISH COMMA.
|
|
||||||
- Keywords ONLY in output.
|
|
||||||
|
|
||||||
### Text Content
|
|
||||||
{content}
|
|
||||||
|
|
||||||
"""
|
|
||||||
msg = [
|
|
||||||
{"role": "system", "content": prompt},
|
|
||||||
{"role": "user", "content": "Output: "}
|
|
||||||
]
|
|
||||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
|
||||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
|
||||||
if isinstance(kwd, tuple):
|
|
||||||
kwd = kwd[0]
|
|
||||||
if kwd.find("**ERROR**") >= 0:
|
|
||||||
return ""
|
|
||||||
return kwd
|
|
||||||
|
|
||||||
|
|
||||||
def question_proposal(chat_mdl, content, topn=3):
|
|
||||||
prompt = f"""
|
|
||||||
Role: You're a text analyzer.
|
|
||||||
Task: propose {topn} questions about a given piece of text content.
|
|
||||||
Requirements:
|
|
||||||
- Understand and summarize the text content, and propose top {topn} important questions.
|
|
||||||
- The questions SHOULD NOT have overlapping meanings.
|
|
||||||
- The questions SHOULD cover the main content of the text as much as possible.
|
|
||||||
- The questions MUST be in language of the given piece of text content.
|
|
||||||
- One question per line.
|
|
||||||
- Question ONLY in output.
|
|
||||||
|
|
||||||
### Text Content
|
|
||||||
{content}
|
|
||||||
|
|
||||||
"""
|
|
||||||
msg = [
|
|
||||||
{"role": "system", "content": prompt},
|
|
||||||
{"role": "user", "content": "Output: "}
|
|
||||||
]
|
|
||||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
|
||||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
|
||||||
if isinstance(kwd, tuple):
|
|
||||||
kwd = kwd[0]
|
|
||||||
if kwd.find("**ERROR**") >= 0:
|
|
||||||
return ""
|
|
||||||
return kwd
|
|
||||||
|
|
||||||
|
|
||||||
def full_question(tenant_id, llm_id, messages):
|
|
||||||
if llm_id2llm_type(llm_id) == "image2text":
|
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
|
||||||
else:
|
|
||||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
|
||||||
conv = []
|
|
||||||
for m in messages:
|
|
||||||
if m["role"] not in ["user", "assistant"]:
|
|
||||||
continue
|
|
||||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
|
||||||
conv = "\n".join(conv)
|
|
||||||
today = datetime.date.today().isoformat()
|
|
||||||
yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
|
|
||||||
tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
|
|
||||||
prompt = f"""
|
|
||||||
Role: A helpful assistant
|
|
||||||
|
|
||||||
Task and steps:
|
|
||||||
1. Generate a full user question that would follow the conversation.
|
|
||||||
2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
|
|
||||||
|
|
||||||
Requirements & Restrictions:
|
|
||||||
- Text generated MUST be in the same language of the original user's question.
|
|
||||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
|
||||||
- DON'T generate anything except a refined question.
|
|
||||||
|
|
||||||
######################
|
|
||||||
-Examples-
|
|
||||||
######################
|
|
||||||
|
|
||||||
# Example 1
|
|
||||||
## Conversation
|
|
||||||
USER: What is the name of Donald Trump's father?
|
|
||||||
ASSISTANT: Fred Trump.
|
|
||||||
USER: And his mother?
|
|
||||||
###############
|
|
||||||
Output: What's the name of Donald Trump's mother?
|
|
||||||
|
|
||||||
------------
|
|
||||||
# Example 2
|
|
||||||
## Conversation
|
|
||||||
USER: What is the name of Donald Trump's father?
|
|
||||||
ASSISTANT: Fred Trump.
|
|
||||||
USER: And his mother?
|
|
||||||
ASSISTANT: Mary Trump.
|
|
||||||
User: What's her full name?
|
|
||||||
###############
|
|
||||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
|
||||||
|
|
||||||
------------
|
|
||||||
# Example 3
|
|
||||||
## Conversation
|
|
||||||
USER: What's the weather today in London?
|
|
||||||
ASSISTANT: Cloudy.
|
|
||||||
USER: What's about tomorrow in Rochester?
|
|
||||||
###############
|
|
||||||
Output: What's the weather in Rochester on {tomorrow}?
|
|
||||||
######################
|
|
||||||
|
|
||||||
# Real Data
|
|
||||||
## Conversation
|
|
||||||
{conv}
|
|
||||||
###############
|
|
||||||
"""
|
|
||||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
|
|
||||||
return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
|
|
||||||
|
|
||||||
|
|
||||||
def tts(tts_mdl, text):
|
def tts(tts_mdl, text):
|
||||||
if not tts_mdl or not text:
|
if not tts_mdl or not text:
|
||||||
return
|
return
|
||||||
@ -692,10 +550,7 @@ def ask(question, kb_ids, tenant_id):
|
|||||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||||
max_tokens = chat_mdl.max_length
|
max_tokens = chat_mdl.max_length
|
||||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||||
kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids,
|
kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False, rank_feature=label_question(question, kbs))
|
||||||
1, 12, 0.1, 0.3, aggs=False,
|
|
||||||
rank_feature=label_question(question, kbs)
|
|
||||||
)
|
|
||||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||||
prompt = """
|
prompt = """
|
||||||
Role: You're a smart assistant. Your name is Miss R.
|
Role: You're a smart assistant. Your name is Miss R.
|
||||||
@ -717,17 +572,9 @@ def ask(question, kb_ids, tenant_id):
|
|||||||
|
|
||||||
def decorate_answer(answer):
|
def decorate_answer(answer):
|
||||||
nonlocal knowledges, kbinfos, prompt
|
nonlocal knowledges, kbinfos, prompt
|
||||||
answer, idx = retriever.insert_citations(answer,
|
answer, idx = retriever.insert_citations(answer, [ck["content_ltks"] for ck in kbinfos["chunks"]], [ck["vector"] for ck in kbinfos["chunks"]], embd_mdl, tkweight=0.7, vtweight=0.3)
|
||||||
[ck["content_ltks"]
|
|
||||||
for ck in kbinfos["chunks"]],
|
|
||||||
[ck["vector"]
|
|
||||||
for ck in kbinfos["chunks"]],
|
|
||||||
embd_mdl,
|
|
||||||
tkweight=0.7,
|
|
||||||
vtweight=0.3)
|
|
||||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||||
recall_docs = [
|
recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
|
||||||
if not recall_docs:
|
if not recall_docs:
|
||||||
recall_docs = kbinfos["doc_aggs"]
|
recall_docs = kbinfos["doc_aggs"]
|
||||||
kbinfos["doc_aggs"] = recall_docs
|
kbinfos["doc_aggs"] = recall_docs
|
||||||
@ -738,71 +585,11 @@ def ask(question, kb_ids, tenant_id):
|
|||||||
|
|
||||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||||
|
refs["chunks"] = chunks_format(refs)
|
||||||
return {"answer": answer, "reference": refs}
|
return {"answer": answer, "reference": refs}
|
||||||
|
|
||||||
answer = ""
|
answer = ""
|
||||||
for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
|
for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
|
||||||
answer = ans
|
answer = ans
|
||||||
yield {"answer": answer, "reference": {}}
|
yield {"answer": answer, "reference": {}}
|
||||||
yield decorate_answer(answer)
|
yield decorate_answer(answer)
|
||||||
|
|
||||||
|
|
||||||
def content_tagging(chat_mdl, content, all_tags, examples, topn=3):
|
|
||||||
prompt = f"""
|
|
||||||
Role: You're a text analyzer.
|
|
||||||
|
|
||||||
Task: Tag (put on some labels) to a given piece of text content based on the examples and the entire tag set.
|
|
||||||
|
|
||||||
Steps::
|
|
||||||
- Comprehend the tag/label set.
|
|
||||||
- Comprehend examples which all consist of both text content and assigned tags with relevance score in format of JSON.
|
|
||||||
- Summarize the text content, and tag it with top {topn} most relevant tags from the set of tag/label and the corresponding relevance score.
|
|
||||||
|
|
||||||
Requirements
|
|
||||||
- The tags MUST be from the tag set.
|
|
||||||
- The output MUST be in JSON format only, the key is tag and the value is its relevance score.
|
|
||||||
- The relevance score must be range from 1 to 10.
|
|
||||||
- Keywords ONLY in output.
|
|
||||||
|
|
||||||
# TAG SET
|
|
||||||
{", ".join(all_tags)}
|
|
||||||
|
|
||||||
"""
|
|
||||||
for i, ex in enumerate(examples):
|
|
||||||
prompt += """
|
|
||||||
# Examples {}
|
|
||||||
### Text Content
|
|
||||||
{}
|
|
||||||
|
|
||||||
Output:
|
|
||||||
{}
|
|
||||||
|
|
||||||
""".format(i, ex["content"], json.dumps(ex[TAG_FLD], indent=2, ensure_ascii=False))
|
|
||||||
|
|
||||||
prompt += f"""
|
|
||||||
# Real Data
|
|
||||||
### Text Content
|
|
||||||
{content}
|
|
||||||
|
|
||||||
"""
|
|
||||||
msg = [
|
|
||||||
{"role": "system", "content": prompt},
|
|
||||||
{"role": "user", "content": "Output: "}
|
|
||||||
]
|
|
||||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
|
||||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.5})
|
|
||||||
if isinstance(kwd, tuple):
|
|
||||||
kwd = kwd[0]
|
|
||||||
if kwd.find("**ERROR**") >= 0:
|
|
||||||
raise Exception(kwd)
|
|
||||||
|
|
||||||
try:
|
|
||||||
return json_repair.loads(kwd)
|
|
||||||
except json_repair.JSONDecodeError:
|
|
||||||
try:
|
|
||||||
result = kwd.replace(prompt[:-1], '').replace('user', '').replace('model', '').strip()
|
|
||||||
result = '{' + result.split('{')[1].split('}')[0] + '}'
|
|
||||||
return json_repair.loads(result)
|
|
||||||
except Exception as e:
|
|
||||||
logging.exception(f"JSON parsing error: {result} -> {e}")
|
|
||||||
raise e
|
|
||||||
@ -13,9 +13,8 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import logging
|
|
||||||
import xxhash
|
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
@ -23,23 +22,21 @@ from copy import deepcopy
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
|
import trio
|
||||||
|
import xxhash
|
||||||
from peewee import fn
|
from peewee import fn
|
||||||
|
|
||||||
from api.db.db_utils import bulk_insert_into_db
|
|
||||||
from api import settings
|
from api import settings
|
||||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
from api.db import FileType, LLMType, ParserType, StatusEnum, TaskStatus, UserTenantRole
|
||||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
from api.db.db_models import DB, Document, Knowledgebase, Task, Tenant, UserTenant
|
||||||
from rag.settings import SVR_QUEUE_NAME
|
from api.db.db_utils import bulk_insert_into_db
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
|
||||||
from rag.nlp import search, rag_tokenizer
|
|
||||||
|
|
||||||
from api.db import FileType, TaskStatus, ParserType, LLMType
|
|
||||||
from api.db.db_models import DB, Knowledgebase, Tenant, Task, UserTenant
|
|
||||||
from api.db.db_models import Document
|
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db import StatusEnum
|
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||||
|
from rag.nlp import rag_tokenizer, search
|
||||||
|
from rag.settings import get_svr_queue_name
|
||||||
from rag.utils.redis_conn import REDIS_CONN
|
from rag.utils.redis_conn import REDIS_CONN
|
||||||
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
|
|
||||||
|
|
||||||
class DocumentService(CommonService):
|
class DocumentService(CommonService):
|
||||||
@ -96,9 +93,7 @@ class DocumentService(CommonService):
|
|||||||
def insert(cls, doc):
|
def insert(cls, doc):
|
||||||
if not cls.save(**doc):
|
if not cls.save(**doc):
|
||||||
raise RuntimeError("Database error (Document)!")
|
raise RuntimeError("Database error (Document)!")
|
||||||
e, kb = KnowledgebaseService.get_by_id(doc["kb_id"])
|
if not KnowledgebaseService.atomic_increase_doc_num_by_id(doc["kb_id"]):
|
||||||
if not KnowledgebaseService.update_by_id(
|
|
||||||
kb.id, {"doc_num": kb.doc_num + 1}):
|
|
||||||
raise RuntimeError("Database error (Knowledgebase)!")
|
raise RuntimeError("Database error (Knowledgebase)!")
|
||||||
return Document(**doc)
|
return Document(**doc)
|
||||||
|
|
||||||
@ -108,13 +103,13 @@ class DocumentService(CommonService):
|
|||||||
cls.clear_chunk_num(doc.id)
|
cls.clear_chunk_num(doc.id)
|
||||||
try:
|
try:
|
||||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "community_report"], "source_id": doc.id},
|
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "source_id": doc.id},
|
||||||
{"remove": {"source_id": doc.id}},
|
{"remove": {"source_id": doc.id}},
|
||||||
search.index_name(tenant_id), doc.kb_id)
|
search.index_name(tenant_id), doc.kb_id)
|
||||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]},
|
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]},
|
||||||
{"removed_kwd": "Y"},
|
{"removed_kwd": "Y"},
|
||||||
search.index_name(tenant_id), doc.kb_id)
|
search.index_name(tenant_id), doc.kb_id)
|
||||||
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "community_report"], "must_not": {"exists": "source_id"}},
|
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "must_not": {"exists": "source_id"}},
|
||||||
search.index_name(tenant_id), doc.kb_id)
|
search.index_name(tenant_id), doc.kb_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
@ -174,9 +169,9 @@ class DocumentService(CommonService):
|
|||||||
"Document not found which is supposed to be there")
|
"Document not found which is supposed to be there")
|
||||||
num = Knowledgebase.update(
|
num = Knowledgebase.update(
|
||||||
token_num=Knowledgebase.token_num +
|
token_num=Knowledgebase.token_num +
|
||||||
token_num,
|
token_num,
|
||||||
chunk_num=Knowledgebase.chunk_num +
|
chunk_num=Knowledgebase.chunk_num +
|
||||||
chunk_num).where(
|
chunk_num).where(
|
||||||
Knowledgebase.id == kb_id).execute()
|
Knowledgebase.id == kb_id).execute()
|
||||||
return num
|
return num
|
||||||
|
|
||||||
@ -192,9 +187,9 @@ class DocumentService(CommonService):
|
|||||||
"Document not found which is supposed to be there")
|
"Document not found which is supposed to be there")
|
||||||
num = Knowledgebase.update(
|
num = Knowledgebase.update(
|
||||||
token_num=Knowledgebase.token_num -
|
token_num=Knowledgebase.token_num -
|
||||||
token_num,
|
token_num,
|
||||||
chunk_num=Knowledgebase.chunk_num -
|
chunk_num=Knowledgebase.chunk_num -
|
||||||
chunk_num
|
chunk_num
|
||||||
).where(
|
).where(
|
||||||
Knowledgebase.id == kb_id).execute()
|
Knowledgebase.id == kb_id).execute()
|
||||||
return num
|
return num
|
||||||
@ -207,9 +202,9 @@ class DocumentService(CommonService):
|
|||||||
|
|
||||||
num = Knowledgebase.update(
|
num = Knowledgebase.update(
|
||||||
token_num=Knowledgebase.token_num -
|
token_num=Knowledgebase.token_num -
|
||||||
doc.token_num,
|
doc.token_num,
|
||||||
chunk_num=Knowledgebase.chunk_num -
|
chunk_num=Knowledgebase.chunk_num -
|
||||||
doc.chunk_num,
|
doc.chunk_num,
|
||||||
doc_num=Knowledgebase.doc_num - 1
|
doc_num=Knowledgebase.doc_num - 1
|
||||||
).where(
|
).where(
|
||||||
Knowledgebase.id == doc.kb_id).execute()
|
Knowledgebase.id == doc.kb_id).execute()
|
||||||
@ -221,7 +216,7 @@ class DocumentService(CommonService):
|
|||||||
docs = cls.model.select(
|
docs = cls.model.select(
|
||||||
Knowledgebase.tenant_id).join(
|
Knowledgebase.tenant_id).join(
|
||||||
Knowledgebase, on=(
|
Knowledgebase, on=(
|
||||||
Knowledgebase.id == cls.model.kb_id)).where(
|
Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||||
docs = docs.dicts()
|
docs = docs.dicts()
|
||||||
if not docs:
|
if not docs:
|
||||||
@ -243,7 +238,7 @@ class DocumentService(CommonService):
|
|||||||
docs = cls.model.select(
|
docs = cls.model.select(
|
||||||
Knowledgebase.tenant_id).join(
|
Knowledgebase.tenant_id).join(
|
||||||
Knowledgebase, on=(
|
Knowledgebase, on=(
|
||||||
Knowledgebase.id == cls.model.kb_id)).where(
|
Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
cls.model.name == name, Knowledgebase.status == StatusEnum.VALID.value)
|
cls.model.name == name, Knowledgebase.status == StatusEnum.VALID.value)
|
||||||
docs = docs.dicts()
|
docs = docs.dicts()
|
||||||
if not docs:
|
if not docs:
|
||||||
@ -256,7 +251,7 @@ class DocumentService(CommonService):
|
|||||||
docs = cls.model.select(
|
docs = cls.model.select(
|
||||||
cls.model.id).join(
|
cls.model.id).join(
|
||||||
Knowledgebase, on=(
|
Knowledgebase, on=(
|
||||||
Knowledgebase.id == cls.model.kb_id)
|
Knowledgebase.id == cls.model.kb_id)
|
||||||
).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||||
).where(cls.model.id == doc_id, UserTenant.user_id == user_id).paginate(0, 1)
|
).where(cls.model.id == doc_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||||
docs = docs.dicts()
|
docs = docs.dicts()
|
||||||
@ -267,11 +262,18 @@ class DocumentService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def accessible4deletion(cls, doc_id, user_id):
|
def accessible4deletion(cls, doc_id, user_id):
|
||||||
docs = cls.model.select(
|
docs = cls.model.select(cls.model.id
|
||||||
cls.model.id).join(
|
).join(
|
||||||
Knowledgebase, on=(
|
Knowledgebase, on=(
|
||||||
Knowledgebase.id == cls.model.kb_id)
|
Knowledgebase.id == cls.model.kb_id)
|
||||||
).where(cls.model.id == doc_id, Knowledgebase.created_by == user_id).paginate(0, 1)
|
).join(
|
||||||
|
UserTenant, on=(
|
||||||
|
(UserTenant.tenant_id == Knowledgebase.created_by) & (UserTenant.user_id == user_id))
|
||||||
|
).where(
|
||||||
|
cls.model.id == doc_id,
|
||||||
|
UserTenant.status == StatusEnum.VALID.value,
|
||||||
|
((UserTenant.role == UserTenantRole.NORMAL) | (UserTenant.role == UserTenantRole.OWNER))
|
||||||
|
).paginate(0, 1)
|
||||||
docs = docs.dicts()
|
docs = docs.dicts()
|
||||||
if not docs:
|
if not docs:
|
||||||
return False
|
return False
|
||||||
@ -283,7 +285,7 @@ class DocumentService(CommonService):
|
|||||||
docs = cls.model.select(
|
docs = cls.model.select(
|
||||||
Knowledgebase.embd_id).join(
|
Knowledgebase.embd_id).join(
|
||||||
Knowledgebase, on=(
|
Knowledgebase, on=(
|
||||||
Knowledgebase.id == cls.model.kb_id)).where(
|
Knowledgebase.id == cls.model.kb_id)).where(
|
||||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||||
docs = docs.dicts()
|
docs = docs.dicts()
|
||||||
if not docs:
|
if not docs:
|
||||||
@ -306,9 +308,9 @@ class DocumentService(CommonService):
|
|||||||
Tenant.asr_id,
|
Tenant.asr_id,
|
||||||
Tenant.llm_id,
|
Tenant.llm_id,
|
||||||
)
|
)
|
||||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id))
|
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id))
|
||||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||||
.where(cls.model.id == doc_id)
|
.where(cls.model.id == doc_id)
|
||||||
)
|
)
|
||||||
configs = configs.dicts()
|
configs = configs.dicts()
|
||||||
if not configs:
|
if not configs:
|
||||||
@ -336,6 +338,8 @@ class DocumentService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_parser_config(cls, id, config):
|
def update_parser_config(cls, id, config):
|
||||||
|
if not config:
|
||||||
|
return
|
||||||
e, d = cls.get_by_id(id)
|
e, d = cls.get_by_id(id)
|
||||||
if not e:
|
if not e:
|
||||||
raise LookupError(f"Document({id}) not found.")
|
raise LookupError(f"Document({id}) not found.")
|
||||||
@ -373,15 +377,14 @@ class DocumentService(CommonService):
|
|||||||
"process_begin_at": get_format_time()
|
"process_begin_at": get_format_time()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def update_meta_fields(cls, doc_id, meta_fields):
|
||||||
|
return cls.update_by_id(doc_id, {"meta_fields": meta_fields})
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_progress(cls):
|
def update_progress(cls):
|
||||||
MSG = {
|
|
||||||
"raptor": "Start RAPTOR (Recursive Abstractive Processing for Tree-Organized Retrieval).",
|
|
||||||
"graphrag": "Start Graph Extraction",
|
|
||||||
"graph_resolution": "Start Graph Resolution",
|
|
||||||
"graph_community": "Start Graph Community Reports Generation"
|
|
||||||
}
|
|
||||||
docs = cls.get_unfinished_docs()
|
docs = cls.get_unfinished_docs()
|
||||||
for d in docs:
|
for d in docs:
|
||||||
try:
|
try:
|
||||||
@ -392,37 +395,33 @@ class DocumentService(CommonService):
|
|||||||
prg = 0
|
prg = 0
|
||||||
finished = True
|
finished = True
|
||||||
bad = 0
|
bad = 0
|
||||||
|
has_raptor = False
|
||||||
|
has_graphrag = False
|
||||||
e, doc = DocumentService.get_by_id(d["id"])
|
e, doc = DocumentService.get_by_id(d["id"])
|
||||||
status = doc.run # TaskStatus.RUNNING.value
|
status = doc.run # TaskStatus.RUNNING.value
|
||||||
|
priority = 0
|
||||||
for t in tsks:
|
for t in tsks:
|
||||||
if 0 <= t.progress < 1:
|
if 0 <= t.progress < 1:
|
||||||
finished = False
|
finished = False
|
||||||
prg += t.progress if t.progress >= 0 else 0
|
|
||||||
if t.progress_msg not in msg:
|
|
||||||
msg.append(t.progress_msg)
|
|
||||||
if t.progress == -1:
|
if t.progress == -1:
|
||||||
bad += 1
|
bad += 1
|
||||||
|
prg += t.progress if t.progress >= 0 else 0
|
||||||
|
msg.append(t.progress_msg)
|
||||||
|
if t.task_type == "raptor":
|
||||||
|
has_raptor = True
|
||||||
|
elif t.task_type == "graphrag":
|
||||||
|
has_graphrag = True
|
||||||
|
priority = max(priority, t.priority)
|
||||||
prg /= len(tsks)
|
prg /= len(tsks)
|
||||||
if finished and bad:
|
if finished and bad:
|
||||||
prg = -1
|
prg = -1
|
||||||
status = TaskStatus.FAIL.value
|
status = TaskStatus.FAIL.value
|
||||||
elif finished:
|
elif finished:
|
||||||
m = "\n".join(sorted(msg))
|
if d["parser_config"].get("raptor", {}).get("use_raptor") and not has_raptor:
|
||||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and m.find(MSG["raptor"]) < 0:
|
queue_raptor_o_graphrag_tasks(d, "raptor", priority)
|
||||||
queue_raptor_o_graphrag_tasks(d, "raptor", MSG["raptor"])
|
|
||||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") and m.find(MSG["graphrag"]) < 0:
|
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") and not has_graphrag:
|
||||||
queue_raptor_o_graphrag_tasks(d, "graphrag", MSG["graphrag"])
|
queue_raptor_o_graphrag_tasks(d, "graphrag", priority)
|
||||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
|
||||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") \
|
|
||||||
and d["parser_config"].get("graphrag", {}).get("resolution") \
|
|
||||||
and m.find(MSG["graph_resolution"]) < 0:
|
|
||||||
queue_raptor_o_graphrag_tasks(d, "graph_resolution", MSG["graph_resolution"])
|
|
||||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
|
||||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") \
|
|
||||||
and d["parser_config"].get("graphrag", {}).get("community") \
|
|
||||||
and m.find(MSG["graph_community"]) < 0:
|
|
||||||
queue_raptor_o_graphrag_tasks(d, "graph_community", MSG["graph_community"])
|
|
||||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||||
else:
|
else:
|
||||||
status = TaskStatus.DONE.value
|
status = TaskStatus.DONE.value
|
||||||
@ -431,7 +430,7 @@ class DocumentService(CommonService):
|
|||||||
info = {
|
info = {
|
||||||
"process_duation": datetime.timestamp(
|
"process_duation": datetime.timestamp(
|
||||||
datetime.now()) -
|
datetime.now()) -
|
||||||
d["process_begin_at"].timestamp(),
|
d["process_begin_at"].timestamp(),
|
||||||
"run": status}
|
"run": status}
|
||||||
if prg != 0:
|
if prg != 0:
|
||||||
info["progress"] = prg
|
info["progress"] = prg
|
||||||
@ -459,7 +458,7 @@ class DocumentService(CommonService):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def queue_raptor_o_graphrag_tasks(doc, ty, msg):
|
def queue_raptor_o_graphrag_tasks(doc, ty, priority):
|
||||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||||
hasher = xxhash.xxh64()
|
hasher = xxhash.xxh64()
|
||||||
for field in sorted(chunking_config.keys()):
|
for field in sorted(chunking_config.keys()):
|
||||||
@ -472,7 +471,8 @@ def queue_raptor_o_graphrag_tasks(doc, ty, msg):
|
|||||||
"doc_id": doc["id"],
|
"doc_id": doc["id"],
|
||||||
"from_page": 100000000,
|
"from_page": 100000000,
|
||||||
"to_page": 100000000,
|
"to_page": 100000000,
|
||||||
"progress_msg": datetime.now().strftime("%H:%M:%S") + " " + msg
|
"task_type": ty,
|
||||||
|
"progress_msg": datetime.now().strftime("%H:%M:%S") + " created task " + ty
|
||||||
}
|
}
|
||||||
|
|
||||||
task = new_task()
|
task = new_task()
|
||||||
@ -481,18 +481,17 @@ def queue_raptor_o_graphrag_tasks(doc, ty, msg):
|
|||||||
hasher.update(ty.encode("utf-8"))
|
hasher.update(ty.encode("utf-8"))
|
||||||
task["digest"] = hasher.hexdigest()
|
task["digest"] = hasher.hexdigest()
|
||||||
bulk_insert_into_db(Task, [task], True)
|
bulk_insert_into_db(Task, [task], True)
|
||||||
task["task_type"] = ty
|
assert REDIS_CONN.queue_product(get_svr_queue_name(priority), message=task), "Can't access Redis. Please check the Redis' status."
|
||||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
|
||||||
|
|
||||||
|
|
||||||
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||||
from rag.app import presentation, picture, naive, audio, email
|
from api.db.services.api_service import API4ConversationService
|
||||||
|
from api.db.services.conversation_service import ConversationService
|
||||||
from api.db.services.dialog_service import DialogService
|
from api.db.services.dialog_service import DialogService
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.db.services.user_service import TenantService
|
from api.db.services.user_service import TenantService
|
||||||
from api.db.services.api_service import API4ConversationService
|
from rag.app import audio, email, naive, picture, presentation
|
||||||
from api.db.services.conversation_service import ConversationService
|
|
||||||
|
|
||||||
e, conv = ConversationService.get_by_id(conversation_id)
|
e, conv = ConversationService.get_by_id(conversation_id)
|
||||||
if not e:
|
if not e:
|
||||||
@ -500,6 +499,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
assert e, "Conversation not found!"
|
assert e, "Conversation not found!"
|
||||||
|
|
||||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||||
|
if not dia.kb_ids:
|
||||||
|
raise LookupError("No knowledge base associated with this conversation. "
|
||||||
|
"Please add a knowledge base before uploading documents")
|
||||||
kb_id = dia.kb_ids[0]
|
kb_id = dia.kb_ids[0]
|
||||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||||
if not e:
|
if not e:
|
||||||
@ -588,10 +590,11 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
|||||||
cks = [c for c in docs if c["doc_id"] == doc_id]
|
cks = [c for c in docs if c["doc_id"] == doc_id]
|
||||||
|
|
||||||
if parser_ids[doc_id] != ParserType.PICTURE.value:
|
if parser_ids[doc_id] != ParserType.PICTURE.value:
|
||||||
|
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||||
mindmap = MindMapExtractor(llm_bdl)
|
mindmap = MindMapExtractor(llm_bdl)
|
||||||
try:
|
try:
|
||||||
mind_map = json.dumps(mindmap([c["content_with_weight"] for c in docs if c["doc_id"] == doc_id]).output,
|
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in docs if c["doc_id"] == doc_id])
|
||||||
ensure_ascii=False, indent=2)
|
mind_map = json.dumps(mind_map.output, ensure_ascii=False, indent=2)
|
||||||
if len(mind_map) < 32:
|
if len(mind_map) < 32:
|
||||||
raise Exception("Few content: " + mind_map)
|
raise Exception("Few content: " + mind_map)
|
||||||
cks.append({
|
cks.append({
|
||||||
|
|||||||
@ -34,12 +34,24 @@ from rag.utils.storage_factory import STORAGE_IMPL
|
|||||||
|
|
||||||
|
|
||||||
class FileService(CommonService):
|
class FileService(CommonService):
|
||||||
|
# Service class for managing file operations and storage
|
||||||
model = File
|
model = File
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_pf_id(cls, tenant_id, pf_id, page_number, items_per_page,
|
def get_by_pf_id(cls, tenant_id, pf_id, page_number, items_per_page,
|
||||||
orderby, desc, keywords):
|
orderby, desc, keywords):
|
||||||
|
# Get files by parent folder ID with pagination and filtering
|
||||||
|
# Args:
|
||||||
|
# tenant_id: ID of the tenant
|
||||||
|
# pf_id: Parent folder ID
|
||||||
|
# page_number: Page number for pagination
|
||||||
|
# items_per_page: Number of items per page
|
||||||
|
# orderby: Field to order by
|
||||||
|
# desc: Boolean indicating descending order
|
||||||
|
# keywords: Search keywords
|
||||||
|
# Returns:
|
||||||
|
# Tuple of (file_list, total_count)
|
||||||
if keywords:
|
if keywords:
|
||||||
files = cls.model.select().where(
|
files = cls.model.select().where(
|
||||||
(cls.model.tenant_id == tenant_id),
|
(cls.model.tenant_id == tenant_id),
|
||||||
@ -80,6 +92,11 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_kb_id_by_file_id(cls, file_id):
|
def get_kb_id_by_file_id(cls, file_id):
|
||||||
|
# Get knowledge base IDs associated with a file
|
||||||
|
# Args:
|
||||||
|
# file_id: File ID
|
||||||
|
# Returns:
|
||||||
|
# List of dictionaries containing knowledge base IDs and names
|
||||||
kbs = (cls.model.select(*[Knowledgebase.id, Knowledgebase.name])
|
kbs = (cls.model.select(*[Knowledgebase.id, Knowledgebase.name])
|
||||||
.join(File2Document, on=(File2Document.file_id == file_id))
|
.join(File2Document, on=(File2Document.file_id == file_id))
|
||||||
.join(Document, on=(File2Document.document_id == Document.id))
|
.join(Document, on=(File2Document.document_id == Document.id))
|
||||||
@ -95,6 +112,12 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_pf_id_name(cls, id, name):
|
def get_by_pf_id_name(cls, id, name):
|
||||||
|
# Get file by parent folder ID and name
|
||||||
|
# Args:
|
||||||
|
# id: Parent folder ID
|
||||||
|
# name: File name
|
||||||
|
# Returns:
|
||||||
|
# File object or None if not found
|
||||||
file = cls.model.select().where((cls.model.parent_id == id) & (cls.model.name == name))
|
file = cls.model.select().where((cls.model.parent_id == id) & (cls.model.name == name))
|
||||||
if file.count():
|
if file.count():
|
||||||
e, file = cls.get_by_id(file[0].id)
|
e, file = cls.get_by_id(file[0].id)
|
||||||
@ -106,6 +129,14 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_id_list_by_id(cls, id, name, count, res):
|
def get_id_list_by_id(cls, id, name, count, res):
|
||||||
|
# Recursively get list of file IDs by traversing folder structure
|
||||||
|
# Args:
|
||||||
|
# id: Starting folder ID
|
||||||
|
# name: List of folder names to traverse
|
||||||
|
# count: Current depth in traversal
|
||||||
|
# res: List to store results
|
||||||
|
# Returns:
|
||||||
|
# List of file IDs
|
||||||
if count < len(name):
|
if count < len(name):
|
||||||
file = cls.get_by_pf_id_name(id, name[count])
|
file = cls.get_by_pf_id_name(id, name[count])
|
||||||
if file:
|
if file:
|
||||||
@ -119,6 +150,12 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_all_innermost_file_ids(cls, folder_id, result_ids):
|
def get_all_innermost_file_ids(cls, folder_id, result_ids):
|
||||||
|
# Get IDs of all files in the deepest level of folders
|
||||||
|
# Args:
|
||||||
|
# folder_id: Starting folder ID
|
||||||
|
# result_ids: List to store results
|
||||||
|
# Returns:
|
||||||
|
# List of file IDs
|
||||||
subfolders = cls.model.select().where(cls.model.parent_id == folder_id)
|
subfolders = cls.model.select().where(cls.model.parent_id == folder_id)
|
||||||
if subfolders.exists():
|
if subfolders.exists():
|
||||||
for subfolder in subfolders:
|
for subfolder in subfolders:
|
||||||
@ -130,6 +167,14 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def create_folder(cls, file, parent_id, name, count):
|
def create_folder(cls, file, parent_id, name, count):
|
||||||
|
# Recursively create folder structure
|
||||||
|
# Args:
|
||||||
|
# file: Current file object
|
||||||
|
# parent_id: Parent folder ID
|
||||||
|
# name: List of folder names to create
|
||||||
|
# count: Current depth in creation
|
||||||
|
# Returns:
|
||||||
|
# Created file object
|
||||||
if count > len(name) - 2:
|
if count > len(name) - 2:
|
||||||
return file
|
return file
|
||||||
else:
|
else:
|
||||||
@ -148,6 +193,11 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def is_parent_folder_exist(cls, parent_id):
|
def is_parent_folder_exist(cls, parent_id):
|
||||||
|
# Check if parent folder exists
|
||||||
|
# Args:
|
||||||
|
# parent_id: Parent folder ID
|
||||||
|
# Returns:
|
||||||
|
# Boolean indicating if folder exists
|
||||||
parent_files = cls.model.select().where(cls.model.id == parent_id)
|
parent_files = cls.model.select().where(cls.model.id == parent_id)
|
||||||
if parent_files.count():
|
if parent_files.count():
|
||||||
return True
|
return True
|
||||||
@ -157,6 +207,11 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_root_folder(cls, tenant_id):
|
def get_root_folder(cls, tenant_id):
|
||||||
|
# Get or create root folder for tenant
|
||||||
|
# Args:
|
||||||
|
# tenant_id: Tenant ID
|
||||||
|
# Returns:
|
||||||
|
# Root folder dictionary
|
||||||
for file in cls.model.select().where((cls.model.tenant_id == tenant_id),
|
for file in cls.model.select().where((cls.model.tenant_id == tenant_id),
|
||||||
(cls.model.parent_id == cls.model.id)
|
(cls.model.parent_id == cls.model.id)
|
||||||
):
|
):
|
||||||
@ -179,6 +234,11 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_kb_folder(cls, tenant_id):
|
def get_kb_folder(cls, tenant_id):
|
||||||
|
# Get knowledge base folder for tenant
|
||||||
|
# Args:
|
||||||
|
# tenant_id: Tenant ID
|
||||||
|
# Returns:
|
||||||
|
# Knowledge base folder dictionary
|
||||||
for root in cls.model.select().where(
|
for root in cls.model.select().where(
|
||||||
(cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
(cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||||
for folder in cls.model.select().where(
|
for folder in cls.model.select().where(
|
||||||
@ -190,6 +250,16 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def new_a_file_from_kb(cls, tenant_id, name, parent_id, ty=FileType.FOLDER.value, size=0, location=""):
|
def new_a_file_from_kb(cls, tenant_id, name, parent_id, ty=FileType.FOLDER.value, size=0, location=""):
|
||||||
|
# Create a new file from knowledge base
|
||||||
|
# Args:
|
||||||
|
# tenant_id: Tenant ID
|
||||||
|
# name: File name
|
||||||
|
# parent_id: Parent folder ID
|
||||||
|
# ty: File type
|
||||||
|
# size: File size
|
||||||
|
# location: File location
|
||||||
|
# Returns:
|
||||||
|
# Created file dictionary
|
||||||
for file in cls.query(tenant_id=tenant_id, parent_id=parent_id, name=name):
|
for file in cls.query(tenant_id=tenant_id, parent_id=parent_id, name=name):
|
||||||
return file.to_dict()
|
return file.to_dict()
|
||||||
file = {
|
file = {
|
||||||
@ -209,6 +279,10 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def init_knowledgebase_docs(cls, root_id, tenant_id):
|
def init_knowledgebase_docs(cls, root_id, tenant_id):
|
||||||
|
# Initialize knowledge base documents
|
||||||
|
# Args:
|
||||||
|
# root_id: Root folder ID
|
||||||
|
# tenant_id: Tenant ID
|
||||||
for _ in cls.model.select().where((cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)\
|
for _ in cls.model.select().where((cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)\
|
||||||
& (cls.model.parent_id == root_id)):
|
& (cls.model.parent_id == root_id)):
|
||||||
return
|
return
|
||||||
@ -222,6 +296,11 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_parent_folder(cls, file_id):
|
def get_parent_folder(cls, file_id):
|
||||||
|
# Get parent folder of a file
|
||||||
|
# Args:
|
||||||
|
# file_id: File ID
|
||||||
|
# Returns:
|
||||||
|
# Parent folder object
|
||||||
file = cls.model.select().where(cls.model.id == file_id)
|
file = cls.model.select().where(cls.model.id == file_id)
|
||||||
if file.count():
|
if file.count():
|
||||||
e, file = cls.get_by_id(file[0].parent_id)
|
e, file = cls.get_by_id(file[0].parent_id)
|
||||||
@ -234,6 +313,11 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_all_parent_folders(cls, start_id):
|
def get_all_parent_folders(cls, start_id):
|
||||||
|
# Get all parent folders in path
|
||||||
|
# Args:
|
||||||
|
# start_id: Starting file ID
|
||||||
|
# Returns:
|
||||||
|
# List of parent folder objects
|
||||||
parent_folders = []
|
parent_folders = []
|
||||||
current_id = start_id
|
current_id = start_id
|
||||||
while current_id:
|
while current_id:
|
||||||
@ -249,6 +333,11 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def insert(cls, file):
|
def insert(cls, file):
|
||||||
|
# Insert a new file record
|
||||||
|
# Args:
|
||||||
|
# file: File data dictionary
|
||||||
|
# Returns:
|
||||||
|
# Created file object
|
||||||
if not cls.save(**file):
|
if not cls.save(**file):
|
||||||
raise RuntimeError("Database error (File)!")
|
raise RuntimeError("Database error (File)!")
|
||||||
return File(**file)
|
return File(**file)
|
||||||
@ -256,6 +345,7 @@ class FileService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def delete(cls, file):
|
def delete(cls, file):
|
||||||
|
#
|
||||||
return cls.delete_by_id(file.id)
|
return cls.delete_by_id(file.id)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
@ -13,22 +13,115 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from api.db import StatusEnum, TenantPermission
|
from datetime import datetime
|
||||||
from api.db.db_models import Knowledgebase, DB, Tenant, User, UserTenant,Document
|
|
||||||
from api.db.services.common_service import CommonService
|
|
||||||
from peewee import fn
|
from peewee import fn
|
||||||
|
|
||||||
|
from api.db import StatusEnum, TenantPermission
|
||||||
|
from api.db.db_models import DB, Document, Knowledgebase, Tenant, User, UserTenant
|
||||||
|
from api.db.services.common_service import CommonService
|
||||||
|
from api.utils import current_timestamp, datetime_format
|
||||||
|
|
||||||
|
|
||||||
class KnowledgebaseService(CommonService):
|
class KnowledgebaseService(CommonService):
|
||||||
|
"""Service class for managing knowledge base operations.
|
||||||
|
|
||||||
|
This class extends CommonService to provide specialized functionality for knowledge base
|
||||||
|
management, including document parsing status tracking, access control, and configuration
|
||||||
|
management. It handles operations such as listing, creating, updating, and deleting
|
||||||
|
knowledge bases, as well as managing their associated documents and permissions.
|
||||||
|
|
||||||
|
The class implements a comprehensive set of methods for:
|
||||||
|
- Document parsing status verification
|
||||||
|
- Knowledge base access control
|
||||||
|
- Parser configuration management
|
||||||
|
- Tenant-based knowledge base organization
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
model: The Knowledgebase model class for database operations.
|
||||||
|
"""
|
||||||
model = Knowledgebase
|
model = Knowledgebase
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def list_documents_by_ids(cls,kb_ids):
|
def accessible4deletion(cls, kb_id, user_id):
|
||||||
doc_ids=cls.model.select(Document.id.alias("document_id")).join(Document,on=(cls.model.id == Document.kb_id)).where(
|
"""Check if a knowledge base can be deleted by a specific user.
|
||||||
|
|
||||||
|
This method verifies whether a user has permission to delete a knowledge base
|
||||||
|
by checking if they are the creator of that knowledge base.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
kb_id (str): The unique identifier of the knowledge base to check.
|
||||||
|
user_id (str): The unique identifier of the user attempting the deletion.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the user has permission to delete the knowledge base,
|
||||||
|
False if the user doesn't have permission or the knowledge base doesn't exist.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> KnowledgebaseService.accessible4deletion("kb123", "user456")
|
||||||
|
True
|
||||||
|
|
||||||
|
Note:
|
||||||
|
- This method only checks creator permissions
|
||||||
|
- A return value of False can mean either:
|
||||||
|
1. The knowledge base doesn't exist
|
||||||
|
2. The user is not the creator of the knowledge base
|
||||||
|
"""
|
||||||
|
# Check if a knowledge base can be deleted by a user
|
||||||
|
docs = cls.model.select(
|
||||||
|
cls.model.id).where(cls.model.id == kb_id, cls.model.created_by == user_id).paginate(0, 1)
|
||||||
|
docs = docs.dicts()
|
||||||
|
if not docs:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def is_parsed_done(cls, kb_id):
|
||||||
|
# Check if all documents in the knowledge base have completed parsing
|
||||||
|
#
|
||||||
|
# Args:
|
||||||
|
# kb_id: Knowledge base ID
|
||||||
|
#
|
||||||
|
# Returns:
|
||||||
|
# If all documents are parsed successfully, returns (True, None)
|
||||||
|
# If any document is not fully parsed, returns (False, error_message)
|
||||||
|
from api.db import TaskStatus
|
||||||
|
from api.db.services.document_service import DocumentService
|
||||||
|
|
||||||
|
# Get knowledge base information
|
||||||
|
kbs = cls.query(id=kb_id)
|
||||||
|
if not kbs:
|
||||||
|
return False, "Knowledge base not found"
|
||||||
|
kb = kbs[0]
|
||||||
|
|
||||||
|
# Get all documents in the knowledge base
|
||||||
|
docs, _ = DocumentService.get_by_kb_id(kb_id, 1, 1000, "create_time", True, "")
|
||||||
|
|
||||||
|
# Check parsing status of each document
|
||||||
|
for doc in docs:
|
||||||
|
# If document is being parsed, don't allow chat creation
|
||||||
|
if doc['run'] == TaskStatus.RUNNING.value or doc['run'] == TaskStatus.CANCEL.value or doc['run'] == TaskStatus.FAIL.value:
|
||||||
|
return False, f"Document '{doc['name']}' in dataset '{kb.name}' is still being parsed. Please wait until all documents are parsed before starting a chat."
|
||||||
|
# If document is not yet parsed and has no chunks, don't allow chat creation
|
||||||
|
if doc['run'] == TaskStatus.UNSTART.value and doc['chunk_num'] == 0:
|
||||||
|
return False, f"Document '{doc['name']}' in dataset '{kb.name}' has not been parsed yet. Please parse all documents before starting a chat."
|
||||||
|
|
||||||
|
return True, None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def list_documents_by_ids(cls, kb_ids):
|
||||||
|
# Get document IDs associated with given knowledge base IDs
|
||||||
|
# Args:
|
||||||
|
# kb_ids: List of knowledge base IDs
|
||||||
|
# Returns:
|
||||||
|
# List of document IDs
|
||||||
|
doc_ids = cls.model.select(Document.id.alias("document_id")).join(Document, on=(cls.model.id == Document.kb_id)).where(
|
||||||
cls.model.id.in_(kb_ids)
|
cls.model.id.in_(kb_ids)
|
||||||
)
|
)
|
||||||
doc_ids =list(doc_ids.dicts())
|
doc_ids = list(doc_ids.dicts())
|
||||||
doc_ids = [doc["document_id"] for doc in doc_ids]
|
doc_ids = [doc["document_id"] for doc in doc_ids]
|
||||||
return doc_ids
|
return doc_ids
|
||||||
|
|
||||||
@ -39,12 +132,25 @@ class KnowledgebaseService(CommonService):
|
|||||||
orderby, desc, keywords,
|
orderby, desc, keywords,
|
||||||
parser_id=None
|
parser_id=None
|
||||||
):
|
):
|
||||||
|
# Get knowledge bases by tenant IDs with pagination and filtering
|
||||||
|
# Args:
|
||||||
|
# joined_tenant_ids: List of tenant IDs
|
||||||
|
# user_id: Current user ID
|
||||||
|
# page_number: Page number for pagination
|
||||||
|
# items_per_page: Number of items per page
|
||||||
|
# orderby: Field to order by
|
||||||
|
# desc: Boolean indicating descending order
|
||||||
|
# keywords: Search keywords
|
||||||
|
# parser_id: Optional parser ID filter
|
||||||
|
# Returns:
|
||||||
|
# Tuple of (knowledge_base_list, total_count)
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.id,
|
cls.model.id,
|
||||||
cls.model.avatar,
|
cls.model.avatar,
|
||||||
cls.model.name,
|
cls.model.name,
|
||||||
cls.model.language,
|
cls.model.language,
|
||||||
cls.model.description,
|
cls.model.description,
|
||||||
|
cls.model.tenant_id,
|
||||||
cls.model.permission,
|
cls.model.permission,
|
||||||
cls.model.doc_num,
|
cls.model.doc_num,
|
||||||
cls.model.token_num,
|
cls.model.token_num,
|
||||||
@ -79,13 +185,19 @@ class KnowledgebaseService(CommonService):
|
|||||||
|
|
||||||
count = kbs.count()
|
count = kbs.count()
|
||||||
|
|
||||||
kbs = kbs.paginate(page_number, items_per_page)
|
if page_number and items_per_page:
|
||||||
|
kbs = kbs.paginate(page_number, items_per_page)
|
||||||
|
|
||||||
return list(kbs.dicts()), count
|
return list(kbs.dicts()), count
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_kb_ids(cls, tenant_id):
|
def get_kb_ids(cls, tenant_id):
|
||||||
|
# Get all knowledge base IDs for a tenant
|
||||||
|
# Args:
|
||||||
|
# tenant_id: Tenant ID
|
||||||
|
# Returns:
|
||||||
|
# List of knowledge base IDs
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.id,
|
cls.model.id,
|
||||||
]
|
]
|
||||||
@ -96,9 +208,13 @@ class KnowledgebaseService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_detail(cls, kb_id):
|
def get_detail(cls, kb_id):
|
||||||
|
# Get detailed information about a knowledge base
|
||||||
|
# Args:
|
||||||
|
# kb_id: Knowledge base ID
|
||||||
|
# Returns:
|
||||||
|
# Dictionary containing knowledge base details
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.id,
|
cls.model.id,
|
||||||
# Tenant.embd_id,
|
|
||||||
cls.model.embd_id,
|
cls.model.embd_id,
|
||||||
cls.model.avatar,
|
cls.model.avatar,
|
||||||
cls.model.name,
|
cls.model.name,
|
||||||
@ -112,24 +228,28 @@ class KnowledgebaseService(CommonService):
|
|||||||
cls.model.parser_config,
|
cls.model.parser_config,
|
||||||
cls.model.pagerank]
|
cls.model.pagerank]
|
||||||
kbs = cls.model.select(*fields).join(Tenant, on=(
|
kbs = cls.model.select(*fields).join(Tenant, on=(
|
||||||
(Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value))).where(
|
(Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value))).where(
|
||||||
(cls.model.id == kb_id),
|
(cls.model.id == kb_id),
|
||||||
(cls.model.status == StatusEnum.VALID.value)
|
(cls.model.status == StatusEnum.VALID.value)
|
||||||
)
|
)
|
||||||
if not kbs:
|
if not kbs:
|
||||||
return
|
return
|
||||||
d = kbs[0].to_dict()
|
d = kbs[0].to_dict()
|
||||||
# d["embd_id"] = kbs[0].tenant.embd_id
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_parser_config(cls, id, config):
|
def update_parser_config(cls, id, config):
|
||||||
|
# Update parser configuration for a knowledge base
|
||||||
|
# Args:
|
||||||
|
# id: Knowledge base ID
|
||||||
|
# config: New parser configuration
|
||||||
e, m = cls.get_by_id(id)
|
e, m = cls.get_by_id(id)
|
||||||
if not e:
|
if not e:
|
||||||
raise LookupError(f"knowledgebase({id}) not found.")
|
raise LookupError(f"knowledgebase({id}) not found.")
|
||||||
|
|
||||||
def dfs_update(old, new):
|
def dfs_update(old, new):
|
||||||
|
# Deep update of nested configuration
|
||||||
for k, v in new.items():
|
for k, v in new.items():
|
||||||
if k not in old:
|
if k not in old:
|
||||||
old[k] = v
|
old[k] = v
|
||||||
@ -149,6 +269,11 @@ class KnowledgebaseService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_field_map(cls, ids):
|
def get_field_map(cls, ids):
|
||||||
|
# Get field mappings for knowledge bases
|
||||||
|
# Args:
|
||||||
|
# ids: List of knowledge base IDs
|
||||||
|
# Returns:
|
||||||
|
# Dictionary of field mappings
|
||||||
conf = {}
|
conf = {}
|
||||||
for k in cls.get_by_ids(ids):
|
for k in cls.get_by_ids(ids):
|
||||||
if k.parser_config and "field_map" in k.parser_config:
|
if k.parser_config and "field_map" in k.parser_config:
|
||||||
@ -158,6 +283,12 @@ class KnowledgebaseService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_name(cls, kb_name, tenant_id):
|
def get_by_name(cls, kb_name, tenant_id):
|
||||||
|
# Get knowledge base by name and tenant ID
|
||||||
|
# Args:
|
||||||
|
# kb_name: Knowledge base name
|
||||||
|
# tenant_id: Tenant ID
|
||||||
|
# Returns:
|
||||||
|
# Tuple of (exists, knowledge_base)
|
||||||
kb = cls.model.select().where(
|
kb = cls.model.select().where(
|
||||||
(cls.model.name == kb_name)
|
(cls.model.name == kb_name)
|
||||||
& (cls.model.tenant_id == tenant_id)
|
& (cls.model.tenant_id == tenant_id)
|
||||||
@ -170,12 +301,27 @@ class KnowledgebaseService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_all_ids(cls):
|
def get_all_ids(cls):
|
||||||
|
# Get all knowledge base IDs
|
||||||
|
# Returns:
|
||||||
|
# List of all knowledge base IDs
|
||||||
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]
|
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_list(cls, joined_tenant_ids, user_id,
|
def get_list(cls, joined_tenant_ids, user_id,
|
||||||
page_number, items_per_page, orderby, desc, id, name):
|
page_number, items_per_page, orderby, desc, id, name):
|
||||||
|
# Get list of knowledge bases with filtering and pagination
|
||||||
|
# Args:
|
||||||
|
# joined_tenant_ids: List of tenant IDs
|
||||||
|
# user_id: Current user ID
|
||||||
|
# page_number: Page number for pagination
|
||||||
|
# items_per_page: Number of items per page
|
||||||
|
# orderby: Field to order by
|
||||||
|
# desc: Boolean indicating descending order
|
||||||
|
# id: Optional ID filter
|
||||||
|
# name: Optional name filter
|
||||||
|
# Returns:
|
||||||
|
# List of knowledge bases
|
||||||
kbs = cls.model.select()
|
kbs = cls.model.select()
|
||||||
if id:
|
if id:
|
||||||
kbs = kbs.where(cls.model.id == id)
|
kbs = kbs.where(cls.model.id == id)
|
||||||
@ -184,7 +330,7 @@ class KnowledgebaseService(CommonService):
|
|||||||
kbs = kbs.where(
|
kbs = kbs.where(
|
||||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||||
TenantPermission.TEAM.value)) | (
|
TenantPermission.TEAM.value)) | (
|
||||||
cls.model.tenant_id == user_id))
|
cls.model.tenant_id == user_id))
|
||||||
& (cls.model.status == StatusEnum.VALID.value)
|
& (cls.model.status == StatusEnum.VALID.value)
|
||||||
)
|
)
|
||||||
if desc:
|
if desc:
|
||||||
@ -199,9 +345,15 @@ class KnowledgebaseService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def accessible(cls, kb_id, user_id):
|
def accessible(cls, kb_id, user_id):
|
||||||
|
# Check if a knowledge base is accessible by a user
|
||||||
|
# Args:
|
||||||
|
# kb_id: Knowledge base ID
|
||||||
|
# user_id: User ID
|
||||||
|
# Returns:
|
||||||
|
# Boolean indicating accessibility
|
||||||
docs = cls.model.select(
|
docs = cls.model.select(
|
||||||
cls.model.id).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
cls.model.id).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||||
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||||
docs = docs.dicts()
|
docs = docs.dicts()
|
||||||
if not docs:
|
if not docs:
|
||||||
return False
|
return False
|
||||||
@ -210,26 +362,64 @@ class KnowledgebaseService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_kb_by_id(cls, kb_id, user_id):
|
def get_kb_by_id(cls, kb_id, user_id):
|
||||||
|
# Get knowledge base by ID and user ID
|
||||||
|
# Args:
|
||||||
|
# kb_id: Knowledge base ID
|
||||||
|
# user_id: User ID
|
||||||
|
# Returns:
|
||||||
|
# List containing knowledge base information
|
||||||
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||||
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||||
kbs = kbs.dicts()
|
kbs = kbs.dicts()
|
||||||
return list(kbs)
|
return list(kbs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_kb_by_name(cls, kb_name, user_id):
|
def get_kb_by_name(cls, kb_name, user_id):
|
||||||
|
# Get knowledge base by name and user ID
|
||||||
|
# Args:
|
||||||
|
# kb_name: Knowledge base name
|
||||||
|
# user_id: User ID
|
||||||
|
# Returns:
|
||||||
|
# List containing knowledge base information
|
||||||
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||||
).where(cls.model.name == kb_name, UserTenant.user_id == user_id).paginate(0, 1)
|
).where(cls.model.name == kb_name, UserTenant.user_id == user_id).paginate(0, 1)
|
||||||
kbs = kbs.dicts()
|
kbs = kbs.dicts()
|
||||||
return list(kbs)
|
return list(kbs)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def accessible4deletion(cls, kb_id, user_id):
|
def atomic_increase_doc_num_by_id(cls, kb_id):
|
||||||
docs = cls.model.select(
|
data = {}
|
||||||
cls.model.id).where(cls.model.id == kb_id, cls.model.created_by == user_id).paginate(0, 1)
|
data["update_time"] = current_timestamp()
|
||||||
docs = docs.dicts()
|
data["update_date"] = datetime_format(datetime.now())
|
||||||
if not docs:
|
data["doc_num"] = cls.model.doc_num + 1
|
||||||
return False
|
num = cls.model.update(data).where(cls.model.id == kb_id).execute()
|
||||||
return True
|
return num
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def update_document_number_in_init(cls, kb_id, doc_num):
|
||||||
|
"""
|
||||||
|
Only use this function when init system
|
||||||
|
"""
|
||||||
|
ok, kb = cls.get_by_id(kb_id)
|
||||||
|
if not ok:
|
||||||
|
return
|
||||||
|
kb.doc_num = doc_num
|
||||||
|
|
||||||
|
dirty_fields = kb.dirty_fields
|
||||||
|
if cls.model._meta.combined.get("update_time") in dirty_fields:
|
||||||
|
dirty_fields.remove(cls.model._meta.combined["update_time"])
|
||||||
|
|
||||||
|
if cls.model._meta.combined.get("update_date") in dirty_fields:
|
||||||
|
dirty_fields.remove(cls.model._meta.combined["update_date"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
kb.save(only=dirty_fields)
|
||||||
|
except ValueError as e:
|
||||||
|
if str(e) == "no data to save!":
|
||||||
|
pass # that's OK
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|||||||
71
api/db/services/langfuse_service.py
Normal file
71
api/db/services/langfuse_service.py
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
import peewee
|
||||||
|
|
||||||
|
from api.db.db_models import DB, TenantLangfuse
|
||||||
|
from api.db.services.common_service import CommonService
|
||||||
|
from api.utils import current_timestamp, datetime_format
|
||||||
|
|
||||||
|
|
||||||
|
class TenantLangfuseService(CommonService):
|
||||||
|
"""
|
||||||
|
All methods that modify the status should be enclosed within a DB.atomic() context to ensure atomicity
|
||||||
|
and maintain data integrity in case of errors during execution.
|
||||||
|
"""
|
||||||
|
|
||||||
|
model = TenantLangfuse
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def filter_by_tenant(cls, tenant_id):
|
||||||
|
fields = [cls.model.tenant_id, cls.model.host, cls.model.secret_key, cls.model.public_key]
|
||||||
|
try:
|
||||||
|
keys = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id).first()
|
||||||
|
return keys
|
||||||
|
except peewee.DoesNotExist:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def filter_by_tenant_with_info(cls, tenant_id):
|
||||||
|
fields = [cls.model.tenant_id, cls.model.host, cls.model.secret_key, cls.model.public_key]
|
||||||
|
try:
|
||||||
|
keys = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id).dicts().first()
|
||||||
|
return keys
|
||||||
|
except peewee.DoesNotExist:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def update_by_tenant(cls, tenant_id, langfuse_keys):
|
||||||
|
langfuse_keys["update_time"] = current_timestamp()
|
||||||
|
langfuse_keys["update_date"] = datetime_format(datetime.now())
|
||||||
|
return cls.model.update(**langfuse_keys).where(cls.model.tenant_id == tenant_id).execute()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def save(cls, **kwargs):
|
||||||
|
kwargs["create_time"] = current_timestamp()
|
||||||
|
kwargs["create_date"] = datetime_format(datetime.now())
|
||||||
|
kwargs["update_time"] = current_timestamp()
|
||||||
|
kwargs["update_date"] = datetime_format(datetime.now())
|
||||||
|
obj = cls.model.create(**kwargs)
|
||||||
|
return obj
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def delete_model(cls, langfuse_model):
|
||||||
|
langfuse_model.delete_instance()
|
||||||
@ -13,17 +13,17 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
|
||||||
|
|
||||||
from api.db.services.user_service import TenantService
|
from langfuse import Langfuse
|
||||||
from api.utils.file_utils import get_project_base_directory
|
|
||||||
from rag.llm import EmbeddingModel, CvModel, ChatModel, RerankModel, Seq2txtModel, TTSModel
|
from api import settings
|
||||||
from api.db import LLMType
|
from api.db import LLMType
|
||||||
from api.db.db_models import DB
|
from api.db.db_models import DB, LLM, LLMFactories, TenantLLM
|
||||||
from api.db.db_models import LLMFactories, LLM, TenantLLM
|
|
||||||
from api.db.services.common_service import CommonService
|
from api.db.services.common_service import CommonService
|
||||||
|
from api.db.services.langfuse_service import TenantLangfuseService
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from rag.llm import ChatModel, CvModel, EmbeddingModel, RerankModel, Seq2txtModel, TTSModel
|
||||||
|
|
||||||
|
|
||||||
class LLMFactoriesService(CommonService):
|
class LLMFactoriesService(CommonService):
|
||||||
@ -52,16 +52,8 @@ class TenantLLMService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_my_llms(cls, tenant_id):
|
def get_my_llms(cls, tenant_id):
|
||||||
fields = [
|
fields = [cls.model.llm_factory, LLMFactories.logo, LLMFactories.tags, cls.model.model_type, cls.model.llm_name, cls.model.used_tokens]
|
||||||
cls.model.llm_factory,
|
objs = cls.model.select(*fields).join(LLMFactories, on=(cls.model.llm_factory == LLMFactories.name)).where(cls.model.tenant_id == tenant_id, ~cls.model.api_key.is_null()).dicts()
|
||||||
LLMFactories.logo,
|
|
||||||
LLMFactories.tags,
|
|
||||||
cls.model.model_type,
|
|
||||||
cls.model.llm_name,
|
|
||||||
cls.model.used_tokens
|
|
||||||
]
|
|
||||||
objs = cls.model.select(*fields).join(LLMFactories, on=(cls.model.llm_factory == LLMFactories.name)).where(
|
|
||||||
cls.model.tenant_id == tenant_id, ~cls.model.api_key.is_null()).dicts()
|
|
||||||
|
|
||||||
return list(objs)
|
return list(objs)
|
||||||
|
|
||||||
@ -75,7 +67,7 @@ class TenantLLMService(CommonService):
|
|||||||
|
|
||||||
# model name must be xxx@yyy
|
# model name must be xxx@yyy
|
||||||
try:
|
try:
|
||||||
model_factories = json.load(open(os.path.join(get_project_base_directory(), "conf/llm_factories.json"), "r"))["factory_llm_infos"]
|
model_factories = settings.FACTORY_LLM_INFOS
|
||||||
model_providers = set([f["name"] for f in model_factories])
|
model_providers = set([f["name"] for f in model_factories])
|
||||||
if arr[-1] not in model_providers:
|
if arr[-1] not in model_providers:
|
||||||
return model_name, None
|
return model_name, None
|
||||||
@ -86,8 +78,7 @@ class TenantLLMService(CommonService):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def model_instance(cls, tenant_id, llm_type,
|
def get_model_config(cls, tenant_id, llm_type, llm_name=None):
|
||||||
llm_name=None, lang="Chinese"):
|
|
||||||
e, tenant = TenantService.get_by_id(tenant_id)
|
e, tenant = TenantService.get_by_id(tenant_id)
|
||||||
if not e:
|
if not e:
|
||||||
raise LookupError("Tenant not found")
|
raise LookupError("Tenant not found")
|
||||||
@ -111,6 +102,9 @@ class TenantLLMService(CommonService):
|
|||||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
|
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||||
if model_config:
|
if model_config:
|
||||||
model_config = model_config.to_dict()
|
model_config = model_config.to_dict()
|
||||||
|
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||||
|
if llm:
|
||||||
|
model_config["is_tools"] = llm[0].is_tools
|
||||||
if not model_config:
|
if not model_config:
|
||||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||||
@ -118,47 +112,41 @@ class TenantLLMService(CommonService):
|
|||||||
model_config = {"llm_factory": llm[0].fid, "api_key": "", "llm_name": mdlnm, "api_base": ""}
|
model_config = {"llm_factory": llm[0].fid, "api_key": "", "llm_name": mdlnm, "api_base": ""}
|
||||||
if not model_config:
|
if not model_config:
|
||||||
if mdlnm == "flag-embedding":
|
if mdlnm == "flag-embedding":
|
||||||
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "",
|
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "", "llm_name": llm_name, "api_base": ""}
|
||||||
"llm_name": llm_name, "api_base": ""}
|
|
||||||
else:
|
else:
|
||||||
if not mdlnm:
|
if not mdlnm:
|
||||||
raise LookupError(f"Type of {llm_type} model is not set.")
|
raise LookupError(f"Type of {llm_type} model is not set.")
|
||||||
raise LookupError("Model({}) not authorized".format(mdlnm))
|
raise LookupError("Model({}) not authorized".format(mdlnm))
|
||||||
|
return model_config
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def model_instance(cls, tenant_id, llm_type, llm_name=None, lang="Chinese"):
|
||||||
|
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||||
if llm_type == LLMType.EMBEDDING.value:
|
if llm_type == LLMType.EMBEDDING.value:
|
||||||
if model_config["llm_factory"] not in EmbeddingModel:
|
if model_config["llm_factory"] not in EmbeddingModel:
|
||||||
return
|
return
|
||||||
return EmbeddingModel[model_config["llm_factory"]](
|
return EmbeddingModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||||
model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
|
||||||
|
|
||||||
if llm_type == LLMType.RERANK:
|
if llm_type == LLMType.RERANK:
|
||||||
if model_config["llm_factory"] not in RerankModel:
|
if model_config["llm_factory"] not in RerankModel:
|
||||||
return
|
return
|
||||||
return RerankModel[model_config["llm_factory"]](
|
return RerankModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||||
model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
|
||||||
|
|
||||||
if llm_type == LLMType.IMAGE2TEXT.value:
|
if llm_type == LLMType.IMAGE2TEXT.value:
|
||||||
if model_config["llm_factory"] not in CvModel:
|
if model_config["llm_factory"] not in CvModel:
|
||||||
return
|
return
|
||||||
return CvModel[model_config["llm_factory"]](
|
return CvModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], lang, base_url=model_config["api_base"])
|
||||||
model_config["api_key"], model_config["llm_name"], lang,
|
|
||||||
base_url=model_config["api_base"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if llm_type == LLMType.CHAT.value:
|
if llm_type == LLMType.CHAT.value:
|
||||||
if model_config["llm_factory"] not in ChatModel:
|
if model_config["llm_factory"] not in ChatModel:
|
||||||
return
|
return
|
||||||
return ChatModel[model_config["llm_factory"]](
|
return ChatModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||||
model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
|
||||||
|
|
||||||
if llm_type == LLMType.SPEECH2TEXT:
|
if llm_type == LLMType.SPEECH2TEXT:
|
||||||
if model_config["llm_factory"] not in Seq2txtModel:
|
if model_config["llm_factory"] not in Seq2txtModel:
|
||||||
return
|
return
|
||||||
return Seq2txtModel[model_config["llm_factory"]](
|
return Seq2txtModel[model_config["llm_factory"]](key=model_config["api_key"], model_name=model_config["llm_name"], lang=lang, base_url=model_config["api_base"])
|
||||||
key=model_config["api_key"], model_name=model_config["llm_name"],
|
|
||||||
lang=lang,
|
|
||||||
base_url=model_config["api_base"]
|
|
||||||
)
|
|
||||||
if llm_type == LLMType.TTS:
|
if llm_type == LLMType.TTS:
|
||||||
if model_config["llm_factory"] not in TTSModel:
|
if model_config["llm_factory"] not in TTSModel:
|
||||||
return
|
return
|
||||||
@ -171,135 +159,225 @@ class TenantLLMService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
|
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
|
||||||
|
try:
|
||||||
|
if not DB.is_connection_usable():
|
||||||
|
DB.connect()
|
||||||
|
except Exception:
|
||||||
|
DB.close()
|
||||||
|
DB.connect()
|
||||||
e, tenant = TenantService.get_by_id(tenant_id)
|
e, tenant = TenantService.get_by_id(tenant_id)
|
||||||
if not e:
|
if not e:
|
||||||
raise LookupError("Tenant not found")
|
logging.error(f"Tenant not found: {tenant_id}")
|
||||||
|
return 0
|
||||||
|
|
||||||
if llm_type == LLMType.EMBEDDING.value:
|
llm_map = {
|
||||||
mdlnm = tenant.embd_id
|
LLMType.EMBEDDING.value: tenant.embd_id,
|
||||||
elif llm_type == LLMType.SPEECH2TEXT.value:
|
LLMType.SPEECH2TEXT.value: tenant.asr_id,
|
||||||
mdlnm = tenant.asr_id
|
LLMType.IMAGE2TEXT.value: tenant.img2txt_id,
|
||||||
elif llm_type == LLMType.IMAGE2TEXT.value:
|
LLMType.CHAT.value: tenant.llm_id if not llm_name else llm_name,
|
||||||
mdlnm = tenant.img2txt_id
|
LLMType.RERANK.value: tenant.rerank_id if not llm_name else llm_name,
|
||||||
elif llm_type == LLMType.CHAT.value:
|
LLMType.TTS.value: tenant.tts_id if not llm_name else llm_name,
|
||||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
}
|
||||||
elif llm_type == LLMType.RERANK:
|
|
||||||
mdlnm = tenant.rerank_id if not llm_name else llm_name
|
mdlnm = llm_map.get(llm_type)
|
||||||
elif llm_type == LLMType.TTS:
|
if mdlnm is None:
|
||||||
mdlnm = tenant.tts_id if not llm_name else llm_name
|
logging.error(f"LLM type error: {llm_type}")
|
||||||
else:
|
return 0
|
||||||
assert False, "LLM type error"
|
|
||||||
|
|
||||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||||
|
|
||||||
num = 0
|
|
||||||
try:
|
try:
|
||||||
if llm_factory:
|
num = (
|
||||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory)
|
cls.model.update(used_tokens=cls.model.used_tokens + used_tokens)
|
||||||
else:
|
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == llm_name, cls.model.llm_factory == llm_factory if llm_factory else True)
|
||||||
tenant_llms = cls.query(tenant_id=tenant_id, llm_name=llm_name)
|
.execute()
|
||||||
if not tenant_llms:
|
)
|
||||||
return num
|
|
||||||
else:
|
|
||||||
tenant_llm = tenant_llms[0]
|
|
||||||
num = cls.model.update(used_tokens=tenant_llm.used_tokens + used_tokens) \
|
|
||||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_factory == tenant_llm.llm_factory, cls.model.llm_name == llm_name) \
|
|
||||||
.execute()
|
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.exception("TenantLLMService.increase_usage got exception")
|
logging.exception("TenantLLMService.increase_usage got exception,Failed to update used_tokens for tenant_id=%s, llm_name=%s", tenant_id, llm_name)
|
||||||
|
return 0
|
||||||
|
|
||||||
return num
|
return num
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_openai_models(cls):
|
def get_openai_models(cls):
|
||||||
objs = cls.model.select().where(
|
objs = cls.model.select().where((cls.model.llm_factory == "OpenAI"), ~(cls.model.llm_name == "text-embedding-3-small"), ~(cls.model.llm_name == "text-embedding-3-large")).dicts()
|
||||||
(cls.model.llm_factory == "OpenAI"),
|
|
||||||
~(cls.model.llm_name == "text-embedding-3-small"),
|
|
||||||
~(cls.model.llm_name == "text-embedding-3-large")
|
|
||||||
).dicts()
|
|
||||||
return list(objs)
|
return list(objs)
|
||||||
|
|
||||||
|
|
||||||
class LLMBundle(object):
|
class LLMBundle:
|
||||||
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese"):
|
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese"):
|
||||||
self.tenant_id = tenant_id
|
self.tenant_id = tenant_id
|
||||||
self.llm_type = llm_type
|
self.llm_type = llm_type
|
||||||
self.llm_name = llm_name
|
self.llm_name = llm_name
|
||||||
self.mdl = TenantLLMService.model_instance(
|
self.mdl = TenantLLMService.model_instance(tenant_id, llm_type, llm_name, lang=lang)
|
||||||
tenant_id, llm_type, llm_name, lang=lang)
|
assert self.mdl, "Can't find model for {}/{}/{}".format(tenant_id, llm_type, llm_name)
|
||||||
assert self.mdl, "Can't find model for {}/{}/{}".format(
|
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||||
tenant_id, llm_type, llm_name)
|
self.max_length = model_config.get("max_tokens", 8192)
|
||||||
self.max_length = 8192
|
|
||||||
for lm in LLMService.query(llm_name=llm_name):
|
self.is_tools = model_config.get("is_tools", False)
|
||||||
self.max_length = lm.max_tokens
|
|
||||||
break
|
langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=tenant_id)
|
||||||
|
if langfuse_keys:
|
||||||
|
langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
|
||||||
|
if langfuse.auth_check():
|
||||||
|
self.langfuse = langfuse
|
||||||
|
self.trace = self.langfuse.trace(name=f"{self.llm_type}-{self.llm_name}")
|
||||||
|
else:
|
||||||
|
self.langfuse = None
|
||||||
|
|
||||||
|
def bind_tools(self, toolcall_session, tools):
|
||||||
|
if not self.is_tools:
|
||||||
|
return
|
||||||
|
self.mdl.bind_tools(toolcall_session, tools)
|
||||||
|
|
||||||
def encode(self, texts: list):
|
def encode(self, texts: list):
|
||||||
|
if self.langfuse:
|
||||||
|
generation = self.trace.generation(name="encode", model=self.llm_name, input={"texts": texts})
|
||||||
|
|
||||||
embeddings, used_tokens = self.mdl.encode(texts)
|
embeddings, used_tokens = self.mdl.encode(texts)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
logging.error("LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
logging.error(
|
|
||||||
"LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
if self.langfuse:
|
||||||
|
generation.end(usage_details={"total_tokens": used_tokens})
|
||||||
|
|
||||||
return embeddings, used_tokens
|
return embeddings, used_tokens
|
||||||
|
|
||||||
def encode_queries(self, query: str):
|
def encode_queries(self, query: str):
|
||||||
|
if self.langfuse:
|
||||||
|
generation = self.trace.generation(name="encode_queries", model=self.llm_name, input={"query": query})
|
||||||
|
|
||||||
emd, used_tokens = self.mdl.encode_queries(query)
|
emd, used_tokens = self.mdl.encode_queries(query)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
logging.error("LLMBundle.encode_queries can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
logging.error(
|
|
||||||
"LLMBundle.encode_queries can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
if self.langfuse:
|
||||||
|
generation.end(usage_details={"total_tokens": used_tokens})
|
||||||
|
|
||||||
return emd, used_tokens
|
return emd, used_tokens
|
||||||
|
|
||||||
def similarity(self, query: str, texts: list):
|
def similarity(self, query: str, texts: list):
|
||||||
|
if self.langfuse:
|
||||||
|
generation = self.trace.generation(name="similarity", model=self.llm_name, input={"query": query, "texts": texts})
|
||||||
|
|
||||||
sim, used_tokens = self.mdl.similarity(query, texts)
|
sim, used_tokens = self.mdl.similarity(query, texts)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
logging.error("LLMBundle.similarity can't update token usage for {}/RERANK used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
logging.error(
|
|
||||||
"LLMBundle.similarity can't update token usage for {}/RERANK used_tokens: {}".format(self.tenant_id, used_tokens))
|
if self.langfuse:
|
||||||
|
generation.end(usage_details={"total_tokens": used_tokens})
|
||||||
|
|
||||||
return sim, used_tokens
|
return sim, used_tokens
|
||||||
|
|
||||||
def describe(self, image, max_tokens=300):
|
def describe(self, image, max_tokens=300):
|
||||||
txt, used_tokens = self.mdl.describe(image, max_tokens)
|
if self.langfuse:
|
||||||
if not TenantLLMService.increase_usage(
|
generation = self.trace.generation(name="describe", metadata={"model": self.llm_name})
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
|
||||||
logging.error(
|
txt, used_tokens = self.mdl.describe(image)
|
||||||
"LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||||
|
logging.error("LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
|
|
||||||
|
if self.langfuse:
|
||||||
|
generation.end(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||||
|
|
||||||
|
return txt
|
||||||
|
|
||||||
|
def describe_with_prompt(self, image, prompt):
|
||||||
|
if self.langfuse:
|
||||||
|
generation = self.trace.generation(name="describe_with_prompt", metadata={"model": self.llm_name, "prompt": prompt})
|
||||||
|
|
||||||
|
txt, used_tokens = self.mdl.describe_with_prompt(image, prompt)
|
||||||
|
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||||
|
logging.error("LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
|
|
||||||
|
if self.langfuse:
|
||||||
|
generation.end(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||||
|
|
||||||
return txt
|
return txt
|
||||||
|
|
||||||
def transcription(self, audio):
|
def transcription(self, audio):
|
||||||
|
if self.langfuse:
|
||||||
|
generation = self.trace.generation(name="transcription", metadata={"model": self.llm_name})
|
||||||
|
|
||||||
txt, used_tokens = self.mdl.transcription(audio)
|
txt, used_tokens = self.mdl.transcription(audio)
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||||
self.tenant_id, self.llm_type, used_tokens):
|
logging.error("LLMBundle.transcription can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||||
logging.error(
|
|
||||||
"LLMBundle.transcription can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
if self.langfuse:
|
||||||
|
generation.end(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||||
|
|
||||||
return txt
|
return txt
|
||||||
|
|
||||||
def tts(self, text):
|
def tts(self, text):
|
||||||
|
if self.langfuse:
|
||||||
|
span = self.trace.span(name="tts", input={"text": text})
|
||||||
|
|
||||||
for chunk in self.mdl.tts(text):
|
for chunk in self.mdl.tts(text):
|
||||||
if isinstance(chunk, int):
|
if isinstance(chunk, int):
|
||||||
if not TenantLLMService.increase_usage(
|
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||||
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
logging.error("LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||||
logging.error(
|
|
||||||
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
|
||||||
return
|
return
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
|
if self.langfuse:
|
||||||
|
span.end()
|
||||||
|
|
||||||
|
def _remove_reasoning_content(self, txt: str) -> str:
|
||||||
|
first_think_start = txt.find("<think>")
|
||||||
|
if first_think_start == -1:
|
||||||
|
return txt
|
||||||
|
|
||||||
|
last_think_end = txt.rfind("</think>")
|
||||||
|
if last_think_end == -1:
|
||||||
|
return txt
|
||||||
|
|
||||||
|
if last_think_end < first_think_start:
|
||||||
|
return txt
|
||||||
|
|
||||||
|
return txt[last_think_end + len("</think>") :]
|
||||||
|
|
||||||
def chat(self, system, history, gen_conf):
|
def chat(self, system, history, gen_conf):
|
||||||
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
|
if self.langfuse:
|
||||||
if isinstance(txt, int) and not TenantLLMService.increase_usage(
|
generation = self.trace.generation(name="chat", model=self.llm_name, input={"system": system, "history": history})
|
||||||
self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
|
||||||
logging.error(
|
chat = self.mdl.chat
|
||||||
"LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name,
|
if self.is_tools and self.mdl.is_tools:
|
||||||
used_tokens))
|
chat = self.mdl.chat_with_tools
|
||||||
|
|
||||||
|
txt, used_tokens = chat(system, history, gen_conf)
|
||||||
|
txt = self._remove_reasoning_content(txt)
|
||||||
|
|
||||||
|
if isinstance(txt, int) and not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||||
|
logging.error("LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name, used_tokens))
|
||||||
|
|
||||||
|
if self.langfuse:
|
||||||
|
generation.end(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||||
|
|
||||||
return txt
|
return txt
|
||||||
|
|
||||||
def chat_streamly(self, system, history, gen_conf):
|
def chat_streamly(self, system, history, gen_conf):
|
||||||
for txt in self.mdl.chat_streamly(system, history, gen_conf):
|
if self.langfuse:
|
||||||
|
generation = self.trace.generation(name="chat_streamly", model=self.llm_name, input={"system": system, "history": history})
|
||||||
|
|
||||||
|
ans = ""
|
||||||
|
chat_streamly = self.mdl.chat_streamly
|
||||||
|
total_tokens = 0
|
||||||
|
if self.is_tools and self.mdl.is_tools:
|
||||||
|
chat_streamly = self.mdl.chat_streamly_with_tools
|
||||||
|
|
||||||
|
for txt in chat_streamly(system, history, gen_conf):
|
||||||
if isinstance(txt, int):
|
if isinstance(txt, int):
|
||||||
if not TenantLLMService.increase_usage(
|
total_tokens = txt
|
||||||
self.tenant_id, self.llm_type, txt, self.llm_name):
|
if self.langfuse:
|
||||||
logging.error(
|
generation.end(output={"output": ans})
|
||||||
"LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name,
|
break
|
||||||
txt))
|
|
||||||
return
|
if txt.endswith("</think>"):
|
||||||
yield txt
|
ans = ans.rstrip("</think>")
|
||||||
|
|
||||||
|
ans += txt
|
||||||
|
yield ans
|
||||||
|
if total_tokens > 0:
|
||||||
|
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||||
|
logging.error("LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name, txt))
|
||||||
|
|||||||
@ -28,7 +28,7 @@ from api.db.services.common_service import CommonService
|
|||||||
from api.db.services.document_service import DocumentService
|
from api.db.services.document_service import DocumentService
|
||||||
from api.utils import current_timestamp, get_uuid
|
from api.utils import current_timestamp, get_uuid
|
||||||
from deepdoc.parser.excel_parser import RAGFlowExcelParser
|
from deepdoc.parser.excel_parser import RAGFlowExcelParser
|
||||||
from rag.settings import SVR_QUEUE_NAME
|
from rag.settings import get_svr_queue_name
|
||||||
from rag.utils.storage_factory import STORAGE_IMPL
|
from rag.utils.storage_factory import STORAGE_IMPL
|
||||||
from rag.utils.redis_conn import REDIS_CONN
|
from rag.utils.redis_conn import REDIS_CONN
|
||||||
from api import settings
|
from api import settings
|
||||||
@ -36,6 +36,12 @@ from rag.nlp import search
|
|||||||
|
|
||||||
|
|
||||||
def trim_header_by_lines(text: str, max_length) -> str:
|
def trim_header_by_lines(text: str, max_length) -> str:
|
||||||
|
# Trim header text to maximum length while preserving line breaks
|
||||||
|
# Args:
|
||||||
|
# text: Input text to trim
|
||||||
|
# max_length: Maximum allowed length
|
||||||
|
# Returns:
|
||||||
|
# Trimmed text
|
||||||
len_text = len(text)
|
len_text = len(text)
|
||||||
if len_text <= max_length:
|
if len_text <= max_length:
|
||||||
return text
|
return text
|
||||||
@ -46,11 +52,37 @@ def trim_header_by_lines(text: str, max_length) -> str:
|
|||||||
|
|
||||||
|
|
||||||
class TaskService(CommonService):
|
class TaskService(CommonService):
|
||||||
|
"""Service class for managing document processing tasks.
|
||||||
|
|
||||||
|
This class extends CommonService to provide specialized functionality for document
|
||||||
|
processing task management, including task creation, progress tracking, and chunk
|
||||||
|
management. It handles various document types (PDF, Excel, etc.) and manages their
|
||||||
|
processing lifecycle.
|
||||||
|
|
||||||
|
The class implements a robust task queue system with retry mechanisms and progress
|
||||||
|
tracking, supporting both synchronous and asynchronous task execution.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
model: The Task model class for database operations.
|
||||||
|
"""
|
||||||
model = Task
|
model = Task
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_task(cls, task_id):
|
def get_task(cls, task_id):
|
||||||
|
"""Retrieve detailed task information by task ID.
|
||||||
|
|
||||||
|
This method fetches comprehensive task details including associated document,
|
||||||
|
knowledge base, and tenant information. It also handles task retry logic and
|
||||||
|
progress updates.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task_id (str): The unique identifier of the task to retrieve.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Task details dictionary containing all task information and related metadata.
|
||||||
|
Returns None if task is not found or has exceeded retry limit.
|
||||||
|
"""
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.id,
|
cls.model.id,
|
||||||
cls.model.doc_id,
|
cls.model.doc_id,
|
||||||
@ -105,6 +137,18 @@ class TaskService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_tasks(cls, doc_id: str):
|
def get_tasks(cls, doc_id: str):
|
||||||
|
"""Retrieve all tasks associated with a document.
|
||||||
|
|
||||||
|
This method fetches all processing tasks for a given document, ordered by page
|
||||||
|
number and creation time. It includes task progress and chunk information.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
doc_id (str): The unique identifier of the document.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list[dict]: List of task dictionaries containing task details.
|
||||||
|
Returns None if no tasks are found.
|
||||||
|
"""
|
||||||
fields = [
|
fields = [
|
||||||
cls.model.id,
|
cls.model.id,
|
||||||
cls.model.from_page,
|
cls.model.from_page,
|
||||||
@ -124,11 +168,31 @@ class TaskService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_chunk_ids(cls, id: str, chunk_ids: str):
|
def update_chunk_ids(cls, id: str, chunk_ids: str):
|
||||||
|
"""Update the chunk IDs associated with a task.
|
||||||
|
|
||||||
|
This method updates the chunk_ids field of a task, which stores the IDs of
|
||||||
|
processed document chunks in a space-separated string format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
id (str): The unique identifier of the task.
|
||||||
|
chunk_ids (str): Space-separated string of chunk identifiers.
|
||||||
|
"""
|
||||||
cls.model.update(chunk_ids=chunk_ids).where(cls.model.id == id).execute()
|
cls.model.update(chunk_ids=chunk_ids).where(cls.model.id == id).execute()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_ongoing_doc_name(cls):
|
def get_ongoing_doc_name(cls):
|
||||||
|
"""Get names of documents that are currently being processed.
|
||||||
|
|
||||||
|
This method retrieves information about documents that are in the processing state,
|
||||||
|
including their locations and associated IDs. It uses database locking to ensure
|
||||||
|
thread safety when accessing the task information.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list[tuple]: A list of tuples, each containing (parent_id/kb_id, location)
|
||||||
|
for documents currently being processed. Returns empty list if
|
||||||
|
no documents are being processed.
|
||||||
|
"""
|
||||||
with DB.lock("get_task", -1):
|
with DB.lock("get_task", -1):
|
||||||
docs = (
|
docs = (
|
||||||
cls.model.select(
|
cls.model.select(
|
||||||
@ -172,6 +236,18 @@ class TaskService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def do_cancel(cls, id):
|
def do_cancel(cls, id):
|
||||||
|
"""Check if a task should be cancelled based on its document status.
|
||||||
|
|
||||||
|
This method determines whether a task should be cancelled by checking the
|
||||||
|
associated document's run status and progress. A task should be cancelled
|
||||||
|
if its document is marked for cancellation or has negative progress.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
id (str): The unique identifier of the task to check.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if the task should be cancelled, False otherwise.
|
||||||
|
"""
|
||||||
task = cls.model.get_by_id(id)
|
task = cls.model.get_by_id(id)
|
||||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||||
@ -179,6 +255,18 @@ class TaskService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def update_progress(cls, id, info):
|
def update_progress(cls, id, info):
|
||||||
|
"""Update the progress information for a task.
|
||||||
|
|
||||||
|
This method updates both the progress message and completion percentage of a task.
|
||||||
|
It handles platform-specific behavior (macOS vs others) and uses database locking
|
||||||
|
when necessary to ensure thread safety.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
id (str): The unique identifier of the task to update.
|
||||||
|
info (dict): Dictionary containing progress information with keys:
|
||||||
|
- progress_msg (str, optional): Progress message to append
|
||||||
|
- progress (float, optional): Progress percentage (0.0 to 1.0)
|
||||||
|
"""
|
||||||
if os.environ.get("MACOS"):
|
if os.environ.get("MACOS"):
|
||||||
if info["progress_msg"]:
|
if info["progress_msg"]:
|
||||||
task = cls.model.get_by_id(id)
|
task = cls.model.get_by_id(id)
|
||||||
@ -201,7 +289,26 @@ class TaskService(CommonService):
|
|||||||
).execute()
|
).execute()
|
||||||
|
|
||||||
|
|
||||||
def queue_tasks(doc: dict, bucket: str, name: str):
|
def queue_tasks(doc: dict, bucket: str, name: str, priority: int):
|
||||||
|
"""Create and queue document processing tasks.
|
||||||
|
|
||||||
|
This function creates processing tasks for a document based on its type and configuration.
|
||||||
|
It handles different document types (PDF, Excel, etc.) differently and manages task
|
||||||
|
chunking and configuration. It also implements task reuse optimization by checking
|
||||||
|
for previously completed tasks.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
doc (dict): Document dictionary containing metadata and configuration.
|
||||||
|
bucket (str): Storage bucket name where the document is stored.
|
||||||
|
name (str): File name of the document.
|
||||||
|
priority (int, optional): Priority level for task queueing (default is 0).
|
||||||
|
|
||||||
|
Note:
|
||||||
|
- For PDF documents, tasks are created per page range based on configuration
|
||||||
|
- For Excel documents, tasks are created per row range
|
||||||
|
- Task digests are calculated for optimization and reuse
|
||||||
|
- Previous task chunks may be reused if available
|
||||||
|
"""
|
||||||
def new_task():
|
def new_task():
|
||||||
return {"id": get_uuid(), "doc_id": doc["id"], "progress": 0.0, "from_page": 0, "to_page": 100000000}
|
return {"id": get_uuid(), "doc_id": doc["id"], "progress": 0.0, "from_page": 0, "to_page": 100000000}
|
||||||
|
|
||||||
@ -252,6 +359,7 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
|||||||
task_digest = hasher.hexdigest()
|
task_digest = hasher.hexdigest()
|
||||||
task["digest"] = task_digest
|
task["digest"] = task_digest
|
||||||
task["progress"] = 0.0
|
task["progress"] = 0.0
|
||||||
|
task["priority"] = priority
|
||||||
|
|
||||||
prev_tasks = TaskService.get_tasks(doc["id"])
|
prev_tasks = TaskService.get_tasks(doc["id"])
|
||||||
ck_num = 0
|
ck_num = 0
|
||||||
@ -274,11 +382,31 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
|||||||
unfinished_task_array = [task for task in parse_task_array if task["progress"] < 1.0]
|
unfinished_task_array = [task for task in parse_task_array if task["progress"] < 1.0]
|
||||||
for unfinished_task in unfinished_task_array:
|
for unfinished_task in unfinished_task_array:
|
||||||
assert REDIS_CONN.queue_product(
|
assert REDIS_CONN.queue_product(
|
||||||
SVR_QUEUE_NAME, message=unfinished_task
|
get_svr_queue_name(priority), message=unfinished_task
|
||||||
), "Can't access Redis. Please check the Redis' status."
|
), "Can't access Redis. Please check the Redis' status."
|
||||||
|
|
||||||
|
|
||||||
def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
|
def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
|
||||||
|
"""Attempt to reuse chunks from previous tasks for optimization.
|
||||||
|
|
||||||
|
This function checks if chunks from previously completed tasks can be reused for
|
||||||
|
the current task, which can significantly improve processing efficiency. It matches
|
||||||
|
tasks based on page ranges and configuration digests.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (dict): Current task dictionary to potentially reuse chunks for.
|
||||||
|
prev_tasks (list[dict]): List of previous task dictionaries to check for reuse.
|
||||||
|
chunking_config (dict): Configuration dictionary for chunk processing.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Number of chunks successfully reused. Returns 0 if no chunks could be reused.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Chunks can only be reused if:
|
||||||
|
- A previous task exists with matching page range and configuration digest
|
||||||
|
- The previous task was completed successfully (progress = 1.0)
|
||||||
|
- The previous task has valid chunk IDs
|
||||||
|
"""
|
||||||
idx = 0
|
idx = 0
|
||||||
while idx < len(prev_tasks):
|
while idx < len(prev_tasks):
|
||||||
prev_task = prev_tasks[idx]
|
prev_task = prev_tasks[idx]
|
||||||
|
|||||||
43
api/db/services/user_canvas_version.py
Normal file
43
api/db/services/user_canvas_version.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
from api.db.db_models import UserCanvasVersion, DB
|
||||||
|
from api.db.services.common_service import CommonService
|
||||||
|
from peewee import DoesNotExist
|
||||||
|
|
||||||
|
class UserCanvasVersionService(CommonService):
|
||||||
|
model = UserCanvasVersion
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def list_by_canvas_id(cls, user_canvas_id):
|
||||||
|
try:
|
||||||
|
user_canvas_version = cls.model.select(
|
||||||
|
*[cls.model.id,
|
||||||
|
cls.model.create_time,
|
||||||
|
cls.model.title,
|
||||||
|
cls.model.create_date,
|
||||||
|
cls.model.update_date,
|
||||||
|
cls.model.user_canvas_id,
|
||||||
|
cls.model.update_time]
|
||||||
|
).where(cls.model.user_canvas_id == user_canvas_id)
|
||||||
|
return user_canvas_version
|
||||||
|
except DoesNotExist:
|
||||||
|
return None
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def delete_all_versions(cls, user_canvas_id):
|
||||||
|
try:
|
||||||
|
user_canvas_version = cls.model.select().where(cls.model.user_canvas_id == user_canvas_id).order_by(cls.model.create_time.desc())
|
||||||
|
if user_canvas_version.count() > 20:
|
||||||
|
for i in range(20, user_canvas_version.count()):
|
||||||
|
cls.delete(user_canvas_version[i].id)
|
||||||
|
return True
|
||||||
|
except DoesNotExist:
|
||||||
|
return None
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -29,11 +29,27 @@ from rag.settings import MINIO
|
|||||||
|
|
||||||
|
|
||||||
class UserService(CommonService):
|
class UserService(CommonService):
|
||||||
|
"""Service class for managing user-related database operations.
|
||||||
|
|
||||||
|
This class extends CommonService to provide specialized functionality for user management,
|
||||||
|
including authentication, user creation, updates, and deletions.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
model: The User model class for database operations.
|
||||||
|
"""
|
||||||
model = User
|
model = User
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def filter_by_id(cls, user_id):
|
def filter_by_id(cls, user_id):
|
||||||
|
"""Retrieve a user by their ID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id: The unique identifier of the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
User object if found, None otherwise.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
user = cls.model.select().where(cls.model.id == user_id).get()
|
user = cls.model.select().where(cls.model.id == user_id).get()
|
||||||
return user
|
return user
|
||||||
@ -43,6 +59,15 @@ class UserService(CommonService):
|
|||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def query_user(cls, email, password):
|
def query_user(cls, email, password):
|
||||||
|
"""Authenticate a user with email and password.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
email: User's email address.
|
||||||
|
password: User's password in plain text.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
User object if authentication successful, None otherwise.
|
||||||
|
"""
|
||||||
user = cls.model.select().where((cls.model.email == email),
|
user = cls.model.select().where((cls.model.email == email),
|
||||||
(cls.model.status == StatusEnum.VALID.value)).first()
|
(cls.model.status == StatusEnum.VALID.value)).first()
|
||||||
if user and check_password_hash(str(user.password), password):
|
if user and check_password_hash(str(user.password), password):
|
||||||
@ -85,6 +110,14 @@ class UserService(CommonService):
|
|||||||
|
|
||||||
|
|
||||||
class TenantService(CommonService):
|
class TenantService(CommonService):
|
||||||
|
"""Service class for managing tenant-related database operations.
|
||||||
|
|
||||||
|
This class extends CommonService to provide functionality for tenant management,
|
||||||
|
including tenant information retrieval and credit management.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
model: The Tenant model class for database operations.
|
||||||
|
"""
|
||||||
model = Tenant
|
model = Tenant
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -136,8 +169,25 @@ class TenantService(CommonService):
|
|||||||
|
|
||||||
|
|
||||||
class UserTenantService(CommonService):
|
class UserTenantService(CommonService):
|
||||||
|
"""Service class for managing user-tenant relationship operations.
|
||||||
|
|
||||||
|
This class extends CommonService to handle the many-to-many relationship
|
||||||
|
between users and tenants, managing user roles and tenant memberships.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
model: The UserTenant model class for database operations.
|
||||||
|
"""
|
||||||
model = UserTenant
|
model = UserTenant
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def filter_by_id(cls, user_tenant_id):
|
||||||
|
try:
|
||||||
|
user_tenant = cls.model.select().where((cls.model.id == user_tenant_id) & (cls.model.status == StatusEnum.VALID.value)).get()
|
||||||
|
return user_tenant
|
||||||
|
except peewee.DoesNotExist:
|
||||||
|
return None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def save(cls, **kwargs):
|
def save(cls, **kwargs):
|
||||||
@ -150,6 +200,7 @@ class UserTenantService(CommonService):
|
|||||||
@DB.connection_context()
|
@DB.connection_context()
|
||||||
def get_by_tenant_id(cls, tenant_id):
|
def get_by_tenant_id(cls, tenant_id):
|
||||||
fields = [
|
fields = [
|
||||||
|
cls.model.id,
|
||||||
cls.model.user_id,
|
cls.model.user_id,
|
||||||
cls.model.status,
|
cls.model.status,
|
||||||
cls.model.role,
|
cls.model.role,
|
||||||
@ -181,3 +232,21 @@ class UserTenantService(CommonService):
|
|||||||
return list(cls.model.select(*fields)
|
return list(cls.model.select(*fields)
|
||||||
.join(User, on=((cls.model.tenant_id == User.id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value)))
|
.join(User, on=((cls.model.tenant_id == User.id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value)))
|
||||||
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def get_num_members(cls, user_id: str):
|
||||||
|
cnt_members = cls.model.select(peewee.fn.COUNT(cls.model.id)).where(cls.model.tenant_id == user_id).scalar()
|
||||||
|
return cnt_members
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@DB.connection_context()
|
||||||
|
def filter_by_tenant_and_user_id(cls, tenant_id, user_id):
|
||||||
|
try:
|
||||||
|
user_tenant = cls.model.select().where(
|
||||||
|
(cls.model.tenant_id == tenant_id) & (cls.model.status == StatusEnum.VALID.value) &
|
||||||
|
(cls.model.user_id == user_id)
|
||||||
|
).first()
|
||||||
|
return user_tenant
|
||||||
|
except peewee.DoesNotExist:
|
||||||
|
return None
|
||||||
@ -28,6 +28,8 @@ import sys
|
|||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
import threading
|
||||||
|
import uuid
|
||||||
|
|
||||||
from werkzeug.serving import run_simple
|
from werkzeug.serving import run_simple
|
||||||
from api import settings
|
from api import settings
|
||||||
@ -41,16 +43,32 @@ from api.db.init_data import init_web_data
|
|||||||
from api.versions import get_ragflow_version
|
from api.versions import get_ragflow_version
|
||||||
from api.utils import show_configs
|
from api.utils import show_configs
|
||||||
from rag.settings import print_rag_settings
|
from rag.settings import print_rag_settings
|
||||||
|
from rag.utils.redis_conn import RedisDistributedLock
|
||||||
|
|
||||||
|
stop_event = threading.Event()
|
||||||
|
|
||||||
|
RAGFLOW_DEBUGPY_LISTEN = int(os.environ.get('RAGFLOW_DEBUGPY_LISTEN', "0"))
|
||||||
|
|
||||||
def update_progress():
|
def update_progress():
|
||||||
while True:
|
lock_value = str(uuid.uuid4())
|
||||||
time.sleep(6)
|
redis_lock = RedisDistributedLock("update_progress", lock_value=lock_value, timeout=60)
|
||||||
|
logging.info(f"update_progress lock_value: {lock_value}")
|
||||||
|
while not stop_event.is_set():
|
||||||
try:
|
try:
|
||||||
DocumentService.update_progress()
|
if redis_lock.acquire():
|
||||||
|
DocumentService.update_progress()
|
||||||
|
redis_lock.release()
|
||||||
|
stop_event.wait(6)
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.exception("update_progress exception")
|
logging.exception("update_progress exception")
|
||||||
|
finally:
|
||||||
|
redis_lock.release()
|
||||||
|
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
logging.info("Received interrupt signal, shutting down...")
|
||||||
|
stop_event.set()
|
||||||
|
time.sleep(1)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
logging.info(r"""
|
logging.info(r"""
|
||||||
@ -71,6 +89,11 @@ if __name__ == '__main__':
|
|||||||
settings.init_settings()
|
settings.init_settings()
|
||||||
print_rag_settings()
|
print_rag_settings()
|
||||||
|
|
||||||
|
if RAGFLOW_DEBUGPY_LISTEN > 0:
|
||||||
|
logging.info(f"debugpy listen on {RAGFLOW_DEBUGPY_LISTEN}")
|
||||||
|
import debugpy
|
||||||
|
debugpy.listen(("0.0.0.0", RAGFLOW_DEBUGPY_LISTEN))
|
||||||
|
|
||||||
# init db
|
# init db
|
||||||
init_web_db()
|
init_web_db()
|
||||||
init_web_data()
|
init_web_data()
|
||||||
@ -96,6 +119,9 @@ if __name__ == '__main__':
|
|||||||
RuntimeConfig.init_env()
|
RuntimeConfig.init_env()
|
||||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||||
|
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
|
||||||
thread = ThreadPoolExecutor(max_workers=1)
|
thread = ThreadPoolExecutor(max_workers=1)
|
||||||
thread.submit(update_progress)
|
thread.submit(update_progress)
|
||||||
|
|
||||||
@ -112,4 +138,6 @@ if __name__ == '__main__':
|
|||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
stop_event.set()
|
||||||
|
time.sleep(1)
|
||||||
os.kill(os.getpid(), signal.SIGKILL)
|
os.kill(os.getpid(), signal.SIGKILL)
|
||||||
|
|||||||
@ -16,6 +16,7 @@
|
|||||||
import os
|
import os
|
||||||
from datetime import date
|
from datetime import date
|
||||||
from enum import IntEnum, Enum
|
from enum import IntEnum, Enum
|
||||||
|
import json
|
||||||
import rag.utils.es_conn
|
import rag.utils.es_conn
|
||||||
import rag.utils.infinity_conn
|
import rag.utils.infinity_conn
|
||||||
|
|
||||||
@ -24,6 +25,7 @@ from rag.nlp import search
|
|||||||
from graphrag import search as kg_search
|
from graphrag import search as kg_search
|
||||||
from api.utils import get_base_config, decrypt_database_config
|
from api.utils import get_base_config, decrypt_database_config
|
||||||
from api.constants import RAG_FLOW_SERVICE_NAME
|
from api.constants import RAG_FLOW_SERVICE_NAME
|
||||||
|
from api.utils.file_utils import get_project_base_directory
|
||||||
|
|
||||||
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
||||||
|
|
||||||
@ -40,6 +42,7 @@ PARSERS = None
|
|||||||
HOST_IP = None
|
HOST_IP = None
|
||||||
HOST_PORT = None
|
HOST_PORT = None
|
||||||
SECRET_KEY = None
|
SECRET_KEY = None
|
||||||
|
FACTORY_LLM_INFOS = None
|
||||||
|
|
||||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||||
@ -59,88 +62,54 @@ docStoreConn = None
|
|||||||
retrievaler = None
|
retrievaler = None
|
||||||
kg_retrievaler = None
|
kg_retrievaler = None
|
||||||
|
|
||||||
|
# user registration switch
|
||||||
|
REGISTER_ENABLED = 1
|
||||||
|
|
||||||
|
|
||||||
def init_settings():
|
def init_settings():
|
||||||
global LLM, LLM_FACTORY, LLM_BASE_URL, LIGHTEN, DATABASE_TYPE, DATABASE
|
global LLM, LLM_FACTORY, LLM_BASE_URL, LIGHTEN, DATABASE_TYPE, DATABASE, FACTORY_LLM_INFOS, REGISTER_ENABLED
|
||||||
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
LIGHTEN = int(os.environ.get('LIGHTEN', "0"))
|
||||||
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
DATABASE_TYPE = os.getenv("DB_TYPE", 'mysql')
|
||||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||||
LLM = get_base_config("user_default_llm", {})
|
LLM = get_base_config("user_default_llm", {})
|
||||||
|
LLM_DEFAULT_MODELS = LLM.get("default_models", {})
|
||||||
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
|
LLM_FACTORY = LLM.get("factory", "Tongyi-Qianwen")
|
||||||
LLM_BASE_URL = LLM.get("base_url")
|
LLM_BASE_URL = LLM.get("base_url")
|
||||||
|
try:
|
||||||
|
REGISTER_ENABLED = int(os.environ.get("REGISTER_ENABLED", "1"))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(os.path.join(get_project_base_directory(), "conf", "llm_factories.json"), "r") as f:
|
||||||
|
FACTORY_LLM_INFOS = json.load(f)["factory_llm_infos"]
|
||||||
|
except Exception:
|
||||||
|
FACTORY_LLM_INFOS = []
|
||||||
|
|
||||||
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
|
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
|
||||||
if not LIGHTEN:
|
if not LIGHTEN:
|
||||||
default_llm = {
|
EMBEDDING_MDL = "BAAI/bge-large-zh-v1.5@BAAI"
|
||||||
"Tongyi-Qianwen": {
|
|
||||||
"chat_model": "qwen-plus",
|
|
||||||
"embedding_model": "text-embedding-v2",
|
|
||||||
"image2text_model": "qwen-vl-max",
|
|
||||||
"asr_model": "paraformer-realtime-8k-v1",
|
|
||||||
},
|
|
||||||
"OpenAI": {
|
|
||||||
"chat_model": "gpt-3.5-turbo",
|
|
||||||
"embedding_model": "text-embedding-ada-002",
|
|
||||||
"image2text_model": "gpt-4-vision-preview",
|
|
||||||
"asr_model": "whisper-1",
|
|
||||||
},
|
|
||||||
"Azure-OpenAI": {
|
|
||||||
"chat_model": "gpt-35-turbo",
|
|
||||||
"embedding_model": "text-embedding-ada-002",
|
|
||||||
"image2text_model": "gpt-4-vision-preview",
|
|
||||||
"asr_model": "whisper-1",
|
|
||||||
},
|
|
||||||
"ZHIPU-AI": {
|
|
||||||
"chat_model": "glm-3-turbo",
|
|
||||||
"embedding_model": "embedding-2",
|
|
||||||
"image2text_model": "glm-4v",
|
|
||||||
"asr_model": "",
|
|
||||||
},
|
|
||||||
"Ollama": {
|
|
||||||
"chat_model": "qwen-14B-chat",
|
|
||||||
"embedding_model": "flag-embedding",
|
|
||||||
"image2text_model": "",
|
|
||||||
"asr_model": "",
|
|
||||||
},
|
|
||||||
"Moonshot": {
|
|
||||||
"chat_model": "moonshot-v1-8k",
|
|
||||||
"embedding_model": "",
|
|
||||||
"image2text_model": "",
|
|
||||||
"asr_model": "",
|
|
||||||
},
|
|
||||||
"DeepSeek": {
|
|
||||||
"chat_model": "deepseek-chat",
|
|
||||||
"embedding_model": "",
|
|
||||||
"image2text_model": "",
|
|
||||||
"asr_model": "",
|
|
||||||
},
|
|
||||||
"VolcEngine": {
|
|
||||||
"chat_model": "",
|
|
||||||
"embedding_model": "",
|
|
||||||
"image2text_model": "",
|
|
||||||
"asr_model": "",
|
|
||||||
},
|
|
||||||
"BAAI": {
|
|
||||||
"chat_model": "",
|
|
||||||
"embedding_model": "BAAI/bge-large-zh-v1.5",
|
|
||||||
"image2text_model": "",
|
|
||||||
"asr_model": "",
|
|
||||||
"rerank_model": "BAAI/bge-reranker-v2-m3",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if LLM_FACTORY:
|
if LLM_DEFAULT_MODELS:
|
||||||
CHAT_MDL = default_llm[LLM_FACTORY]["chat_model"] + f"@{LLM_FACTORY}"
|
CHAT_MDL = LLM_DEFAULT_MODELS.get("chat_model", CHAT_MDL)
|
||||||
ASR_MDL = default_llm[LLM_FACTORY]["asr_model"] + f"@{LLM_FACTORY}"
|
EMBEDDING_MDL = LLM_DEFAULT_MODELS.get("embedding_model", EMBEDDING_MDL)
|
||||||
IMAGE2TEXT_MDL = default_llm[LLM_FACTORY]["image2text_model"] + f"@{LLM_FACTORY}"
|
RERANK_MDL = LLM_DEFAULT_MODELS.get("rerank_model", RERANK_MDL)
|
||||||
EMBEDDING_MDL = default_llm["BAAI"]["embedding_model"] + "@BAAI"
|
ASR_MDL = LLM_DEFAULT_MODELS.get("asr_model", ASR_MDL)
|
||||||
RERANK_MDL = default_llm["BAAI"]["rerank_model"] + "@BAAI"
|
IMAGE2TEXT_MDL = LLM_DEFAULT_MODELS.get("image2text_model", IMAGE2TEXT_MDL)
|
||||||
|
|
||||||
|
# factory can be specified in the config name with "@". LLM_FACTORY will be used if not specified
|
||||||
|
CHAT_MDL = CHAT_MDL + (f"@{LLM_FACTORY}" if "@" not in CHAT_MDL and CHAT_MDL != "" else "")
|
||||||
|
EMBEDDING_MDL = EMBEDDING_MDL + (f"@{LLM_FACTORY}" if "@" not in EMBEDDING_MDL and EMBEDDING_MDL != "" else "")
|
||||||
|
RERANK_MDL = RERANK_MDL + (f"@{LLM_FACTORY}" if "@" not in RERANK_MDL and RERANK_MDL != "" else "")
|
||||||
|
ASR_MDL = ASR_MDL + (f"@{LLM_FACTORY}" if "@" not in ASR_MDL and ASR_MDL != "" else "")
|
||||||
|
IMAGE2TEXT_MDL = IMAGE2TEXT_MDL + (
|
||||||
|
f"@{LLM_FACTORY}" if "@" not in IMAGE2TEXT_MDL and IMAGE2TEXT_MDL != "" else "")
|
||||||
|
|
||||||
global API_KEY, PARSERS, HOST_IP, HOST_PORT, SECRET_KEY
|
global API_KEY, PARSERS, HOST_IP, HOST_PORT, SECRET_KEY
|
||||||
API_KEY = LLM.get("api_key", "")
|
API_KEY = LLM.get("api_key", "")
|
||||||
PARSERS = LLM.get(
|
PARSERS = LLM.get(
|
||||||
"parsers",
|
"parsers",
|
||||||
"naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email,tag:Tag")
|
"naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag")
|
||||||
|
|
||||||
HOST_IP = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
|
HOST_IP = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
|
||||||
HOST_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
|
HOST_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
|
||||||
|
|||||||
@ -70,6 +70,12 @@ def show_configs():
|
|||||||
if "password" in v:
|
if "password" in v:
|
||||||
v = copy.deepcopy(v)
|
v = copy.deepcopy(v)
|
||||||
v["password"] = "*" * 8
|
v["password"] = "*" * 8
|
||||||
|
if "access_key" in v:
|
||||||
|
v = copy.deepcopy(v)
|
||||||
|
v["access_key"] = "*" * 8
|
||||||
|
if "secret_key" in v:
|
||||||
|
v = copy.deepcopy(v)
|
||||||
|
v["secret_key"] = "*" * 8
|
||||||
msg += f"\n\t{k}: {v}"
|
msg += f"\n\t{k}: {v}"
|
||||||
logging.info(msg)
|
logging.info(msg)
|
||||||
|
|
||||||
@ -351,6 +357,26 @@ def decrypt(line):
|
|||||||
line), "Fail to decrypt password!").decode('utf-8')
|
line), "Fail to decrypt password!").decode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
|
def decrypt2(crypt_text):
|
||||||
|
from base64 import b64decode, b16decode
|
||||||
|
from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5
|
||||||
|
from Crypto.PublicKey import RSA
|
||||||
|
decode_data = b64decode(crypt_text)
|
||||||
|
if len(decode_data) == 127:
|
||||||
|
hex_fixed = '00' + decode_data.hex()
|
||||||
|
decode_data = b16decode(hex_fixed.upper())
|
||||||
|
|
||||||
|
file_path = os.path.join(
|
||||||
|
file_utils.get_project_base_directory(),
|
||||||
|
"conf",
|
||||||
|
"private.pem")
|
||||||
|
pem = open(file_path).read()
|
||||||
|
rsa_key = RSA.importKey(pem, "Welcome")
|
||||||
|
cipher = Cipher_PKCS1_v1_5.new(rsa_key)
|
||||||
|
decrypt_text = cipher.decrypt(decode_data, None)
|
||||||
|
return (b64decode(decrypt_text)).decode()
|
||||||
|
|
||||||
|
|
||||||
def download_img(url):
|
def download_img(url):
|
||||||
if not url:
|
if not url:
|
||||||
return ""
|
return ""
|
||||||
|
|||||||
@ -13,9 +13,9 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import logging
|
|
||||||
import functools
|
import functools
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
import random
|
import random
|
||||||
import time
|
import time
|
||||||
from base64 import b64encode
|
from base64 import b64encode
|
||||||
@ -27,59 +27,60 @@ from uuid import uuid1
|
|||||||
|
|
||||||
import requests
|
import requests
|
||||||
from flask import (
|
from flask import (
|
||||||
Response, jsonify, send_file, make_response,
|
Response,
|
||||||
|
jsonify,
|
||||||
|
make_response,
|
||||||
|
send_file,
|
||||||
|
)
|
||||||
|
from flask import (
|
||||||
request as flask_request,
|
request as flask_request,
|
||||||
)
|
)
|
||||||
from itsdangerous import URLSafeTimedSerializer
|
from itsdangerous import URLSafeTimedSerializer
|
||||||
from werkzeug.http import HTTP_STATUS_CODES
|
from werkzeug.http import HTTP_STATUS_CODES
|
||||||
|
|
||||||
from api.db.db_models import APIToken
|
|
||||||
from api import settings
|
from api import settings
|
||||||
|
from api.constants import REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC
|
||||||
|
from api.db.db_models import APIToken
|
||||||
|
from api.utils import CustomJSONEncoder, get_uuid, json_dumps
|
||||||
|
|
||||||
from api.utils import CustomJSONEncoder, get_uuid
|
requests.models.complexjson.dumps = functools.partial(json.dumps, cls=CustomJSONEncoder)
|
||||||
from api.utils import json_dumps
|
|
||||||
from api.constants import REQUEST_WAIT_SEC, REQUEST_MAX_WAIT_SEC
|
|
||||||
|
|
||||||
requests.models.complexjson.dumps = functools.partial(
|
|
||||||
json.dumps, cls=CustomJSONEncoder)
|
|
||||||
|
|
||||||
|
|
||||||
def request(**kwargs):
|
def request(**kwargs):
|
||||||
sess = requests.Session()
|
sess = requests.Session()
|
||||||
stream = kwargs.pop('stream', sess.stream)
|
stream = kwargs.pop("stream", sess.stream)
|
||||||
timeout = kwargs.pop('timeout', None)
|
timeout = kwargs.pop("timeout", None)
|
||||||
kwargs['headers'] = {
|
kwargs["headers"] = {k.replace("_", "-").upper(): v for k, v in kwargs.get("headers", {}).items()}
|
||||||
k.replace(
|
|
||||||
'_',
|
|
||||||
'-').upper(): v for k,
|
|
||||||
v in kwargs.get(
|
|
||||||
'headers',
|
|
||||||
{}).items()}
|
|
||||||
prepped = requests.Request(**kwargs).prepare()
|
prepped = requests.Request(**kwargs).prepare()
|
||||||
|
|
||||||
if settings.CLIENT_AUTHENTICATION and settings.HTTP_APP_KEY and settings.SECRET_KEY:
|
if settings.CLIENT_AUTHENTICATION and settings.HTTP_APP_KEY and settings.SECRET_KEY:
|
||||||
timestamp = str(round(time() * 1000))
|
timestamp = str(round(time() * 1000))
|
||||||
nonce = str(uuid1())
|
nonce = str(uuid1())
|
||||||
signature = b64encode(HMAC(settings.SECRET_KEY.encode('ascii'), b'\n'.join([
|
signature = b64encode(
|
||||||
timestamp.encode('ascii'),
|
HMAC(
|
||||||
nonce.encode('ascii'),
|
settings.SECRET_KEY.encode("ascii"),
|
||||||
settings.HTTP_APP_KEY.encode('ascii'),
|
b"\n".join(
|
||||||
prepped.path_url.encode('ascii'),
|
[
|
||||||
prepped.body if kwargs.get('json') else b'',
|
timestamp.encode("ascii"),
|
||||||
urlencode(
|
nonce.encode("ascii"),
|
||||||
sorted(
|
settings.HTTP_APP_KEY.encode("ascii"),
|
||||||
kwargs['data'].items()),
|
prepped.path_url.encode("ascii"),
|
||||||
quote_via=quote,
|
prepped.body if kwargs.get("json") else b"",
|
||||||
safe='-._~').encode('ascii')
|
urlencode(sorted(kwargs["data"].items()), quote_via=quote, safe="-._~").encode("ascii") if kwargs.get("data") and isinstance(kwargs["data"], dict) else b"",
|
||||||
if kwargs.get('data') and isinstance(kwargs['data'], dict) else b'',
|
]
|
||||||
]), 'sha1').digest()).decode('ascii')
|
),
|
||||||
|
"sha1",
|
||||||
|
).digest()
|
||||||
|
).decode("ascii")
|
||||||
|
|
||||||
prepped.headers.update({
|
prepped.headers.update(
|
||||||
'TIMESTAMP': timestamp,
|
{
|
||||||
'NONCE': nonce,
|
"TIMESTAMP": timestamp,
|
||||||
'APP-KEY': settings.HTTP_APP_KEY,
|
"NONCE": nonce,
|
||||||
'SIGNATURE': signature,
|
"APP-KEY": settings.HTTP_APP_KEY,
|
||||||
})
|
"SIGNATURE": signature,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
return sess.send(prepped, stream=stream, timeout=timeout)
|
return sess.send(prepped, stream=stream, timeout=timeout)
|
||||||
|
|
||||||
@ -87,7 +88,7 @@ def request(**kwargs):
|
|||||||
def get_exponential_backoff_interval(retries, full_jitter=False):
|
def get_exponential_backoff_interval(retries, full_jitter=False):
|
||||||
"""Calculate the exponential backoff wait time."""
|
"""Calculate the exponential backoff wait time."""
|
||||||
# Will be zero if factor equals 0
|
# Will be zero if factor equals 0
|
||||||
countdown = min(REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC * (2 ** retries))
|
countdown = min(REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC * (2**retries))
|
||||||
# Full jitter according to
|
# Full jitter according to
|
||||||
# https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
|
# https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
|
||||||
if full_jitter:
|
if full_jitter:
|
||||||
@ -96,12 +97,9 @@ def get_exponential_backoff_interval(retries, full_jitter=False):
|
|||||||
return max(0, countdown)
|
return max(0, countdown)
|
||||||
|
|
||||||
|
|
||||||
def get_data_error_result(code=settings.RetCode.DATA_ERROR,
|
def get_data_error_result(code=settings.RetCode.DATA_ERROR, message="Sorry! Data missing!"):
|
||||||
message='Sorry! Data missing!'):
|
|
||||||
logging.exception(Exception(message))
|
logging.exception(Exception(message))
|
||||||
result_dict = {
|
result_dict = {"code": code, "message": message}
|
||||||
"code": code,
|
|
||||||
"message": message}
|
|
||||||
response = {}
|
response = {}
|
||||||
for key, value in result_dict.items():
|
for key, value in result_dict.items():
|
||||||
if value is None and key != "code":
|
if value is None and key != "code":
|
||||||
@ -119,23 +117,27 @@ def server_error_response(e):
|
|||||||
except BaseException:
|
except BaseException:
|
||||||
pass
|
pass
|
||||||
if len(e.args) > 1:
|
if len(e.args) > 1:
|
||||||
return get_json_result(
|
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||||
code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
|
||||||
if repr(e).find("index_not_found_exception") >= 0:
|
if repr(e).find("index_not_found_exception") >= 0:
|
||||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR,
|
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message="No chunk found, please upload file and parse it.")
|
||||||
message="No chunk found, please upload file and parse it.")
|
|
||||||
|
|
||||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e))
|
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||||
|
|
||||||
|
|
||||||
def error_response(response_code, message=None):
|
def error_response(response_code, message=None):
|
||||||
if message is None:
|
if message is None:
|
||||||
message = HTTP_STATUS_CODES.get(response_code, 'Unknown Error')
|
message = HTTP_STATUS_CODES.get(response_code, "Unknown Error")
|
||||||
|
|
||||||
return Response(json.dumps({
|
return Response(
|
||||||
'message': message,
|
json.dumps(
|
||||||
'code': response_code,
|
{
|
||||||
}), status=response_code, mimetype='application/json')
|
"message": message,
|
||||||
|
"code": response_code,
|
||||||
|
}
|
||||||
|
),
|
||||||
|
status=response_code,
|
||||||
|
mimetype="application/json",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def validate_request(*args, **kwargs):
|
def validate_request(*args, **kwargs):
|
||||||
@ -160,13 +162,10 @@ def validate_request(*args, **kwargs):
|
|||||||
if no_arguments or error_arguments:
|
if no_arguments or error_arguments:
|
||||||
error_string = ""
|
error_string = ""
|
||||||
if no_arguments:
|
if no_arguments:
|
||||||
error_string += "required argument are missing: {}; ".format(
|
error_string += "required argument are missing: {}; ".format(",".join(no_arguments))
|
||||||
",".join(no_arguments))
|
|
||||||
if error_arguments:
|
if error_arguments:
|
||||||
error_string += "required argument values: {}".format(
|
error_string += "required argument values: {}".format(",".join(["{}={}".format(a[0], a[1]) for a in error_arguments]))
|
||||||
",".join(["{}={}".format(a[0], a[1]) for a in error_arguments]))
|
return get_json_result(code=settings.RetCode.ARGUMENT_ERROR, message=error_string)
|
||||||
return get_json_result(
|
|
||||||
code=settings.RetCode.ARGUMENT_ERROR, message=error_string)
|
|
||||||
return func(*_args, **_kwargs)
|
return func(*_args, **_kwargs)
|
||||||
|
|
||||||
return decorated_function
|
return decorated_function
|
||||||
@ -180,8 +179,7 @@ def not_allowed_parameters(*params):
|
|||||||
input_arguments = flask_request.json or flask_request.form.to_dict()
|
input_arguments = flask_request.json or flask_request.form.to_dict()
|
||||||
for param in params:
|
for param in params:
|
||||||
if param in input_arguments:
|
if param in input_arguments:
|
||||||
return get_json_result(
|
return get_json_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"Parameter {param} isn't allowed")
|
||||||
code=settings.RetCode.ARGUMENT_ERROR, message=f"Parameter {param} isn't allowed")
|
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
@ -190,14 +188,14 @@ def not_allowed_parameters(*params):
|
|||||||
|
|
||||||
|
|
||||||
def is_localhost(ip):
|
def is_localhost(ip):
|
||||||
return ip in {'127.0.0.1', '::1', '[::1]', 'localhost'}
|
return ip in {"127.0.0.1", "::1", "[::1]", "localhost"}
|
||||||
|
|
||||||
|
|
||||||
def send_file_in_mem(data, filename):
|
def send_file_in_mem(data, filename):
|
||||||
if not isinstance(data, (str, bytes)):
|
if not isinstance(data, (str, bytes)):
|
||||||
data = json_dumps(data)
|
data = json_dumps(data)
|
||||||
if isinstance(data, str):
|
if isinstance(data, str):
|
||||||
data = data.encode('utf-8')
|
data = data.encode("utf-8")
|
||||||
|
|
||||||
f = BytesIO()
|
f = BytesIO()
|
||||||
f.write(data)
|
f.write(data)
|
||||||
@ -206,7 +204,7 @@ def send_file_in_mem(data, filename):
|
|||||||
return send_file(f, as_attachment=True, attachment_filename=filename)
|
return send_file(f, as_attachment=True, attachment_filename=filename)
|
||||||
|
|
||||||
|
|
||||||
def get_json_result(code=settings.RetCode.SUCCESS, message='success', data=None):
|
def get_json_result(code=settings.RetCode.SUCCESS, message="success", data=None):
|
||||||
response = {"code": code, "message": message, "data": data}
|
response = {"code": code, "message": message, "data": data}
|
||||||
return jsonify(response)
|
return jsonify(response)
|
||||||
|
|
||||||
@ -214,27 +212,24 @@ def get_json_result(code=settings.RetCode.SUCCESS, message='success', data=None)
|
|||||||
def apikey_required(func):
|
def apikey_required(func):
|
||||||
@wraps(func)
|
@wraps(func)
|
||||||
def decorated_function(*args, **kwargs):
|
def decorated_function(*args, **kwargs):
|
||||||
token = flask_request.headers.get('Authorization').split()[1]
|
token = flask_request.headers.get("Authorization").split()[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return build_error_result(
|
return build_error_result(message="API-KEY is invalid!", code=settings.RetCode.FORBIDDEN)
|
||||||
message='API-KEY is invalid!', code=settings.RetCode.FORBIDDEN
|
kwargs["tenant_id"] = objs[0].tenant_id
|
||||||
)
|
|
||||||
kwargs['tenant_id'] = objs[0].tenant_id
|
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
return decorated_function
|
return decorated_function
|
||||||
|
|
||||||
|
|
||||||
def build_error_result(code=settings.RetCode.FORBIDDEN, message='success'):
|
def build_error_result(code=settings.RetCode.FORBIDDEN, message="success"):
|
||||||
response = {"code": code, "message": message}
|
response = {"code": code, "message": message}
|
||||||
response = jsonify(response)
|
response = jsonify(response)
|
||||||
response.status_code = code
|
response.status_code = code
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
def construct_response(code=settings.RetCode.SUCCESS,
|
def construct_response(code=settings.RetCode.SUCCESS, message="success", data=None, auth=None):
|
||||||
message='success', data=None, auth=None):
|
|
||||||
result_dict = {"code": code, "message": message, "data": data}
|
result_dict = {"code": code, "message": message, "data": data}
|
||||||
response_dict = {}
|
response_dict = {}
|
||||||
for key, value in result_dict.items():
|
for key, value in result_dict.items():
|
||||||
@ -253,7 +248,7 @@ def construct_response(code=settings.RetCode.SUCCESS,
|
|||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
def construct_result(code=settings.RetCode.DATA_ERROR, message='data is missing'):
|
def construct_result(code=settings.RetCode.DATA_ERROR, message="data is missing"):
|
||||||
result_dict = {"code": code, "message": message}
|
result_dict = {"code": code, "message": message}
|
||||||
response = {}
|
response = {}
|
||||||
for key, value in result_dict.items():
|
for key, value in result_dict.items():
|
||||||
@ -264,7 +259,7 @@ def construct_result(code=settings.RetCode.DATA_ERROR, message='data is missing'
|
|||||||
return jsonify(response)
|
return jsonify(response)
|
||||||
|
|
||||||
|
|
||||||
def construct_json_result(code=settings.RetCode.SUCCESS, message='success', data=None):
|
def construct_json_result(code=settings.RetCode.SUCCESS, message="success", data=None):
|
||||||
if data is None:
|
if data is None:
|
||||||
return jsonify({"code": code, "message": message})
|
return jsonify({"code": code, "message": message})
|
||||||
else:
|
else:
|
||||||
@ -286,7 +281,7 @@ def construct_error_response(e):
|
|||||||
def token_required(func):
|
def token_required(func):
|
||||||
@wraps(func)
|
@wraps(func)
|
||||||
def decorated_function(*args, **kwargs):
|
def decorated_function(*args, **kwargs):
|
||||||
authorization_str = flask_request.headers.get('Authorization')
|
authorization_str = flask_request.headers.get("Authorization")
|
||||||
if not authorization_str:
|
if not authorization_str:
|
||||||
return get_json_result(data=False, message="`Authorization` can't be empty")
|
return get_json_result(data=False, message="`Authorization` can't be empty")
|
||||||
authorization_list = authorization_str.split()
|
authorization_list = authorization_str.split()
|
||||||
@ -295,11 +290,8 @@ def token_required(func):
|
|||||||
token = authorization_list[1]
|
token = authorization_list[1]
|
||||||
objs = APIToken.query(token=token)
|
objs = APIToken.query(token=token)
|
||||||
if not objs:
|
if not objs:
|
||||||
return get_json_result(
|
return get_json_result(data=False, message="Authentication error: API key is invalid!", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||||
data=False, message='Authentication error: API key is invalid!',
|
kwargs["tenant_id"] = objs[0].tenant_id
|
||||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
|
||||||
)
|
|
||||||
kwargs['tenant_id'] = objs[0].tenant_id
|
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
return decorated_function
|
return decorated_function
|
||||||
@ -316,11 +308,11 @@ def get_result(code=settings.RetCode.SUCCESS, message="", data=None):
|
|||||||
return jsonify(response)
|
return jsonify(response)
|
||||||
|
|
||||||
|
|
||||||
def get_error_data_result(message='Sorry! Data missing!', code=settings.RetCode.DATA_ERROR,
|
def get_error_data_result(
|
||||||
):
|
message="Sorry! Data missing!",
|
||||||
result_dict = {
|
code=settings.RetCode.DATA_ERROR,
|
||||||
"code": code,
|
):
|
||||||
"message": message}
|
result_dict = {"code": code, "message": message}
|
||||||
response = {}
|
response = {}
|
||||||
for key, value in result_dict.items():
|
for key, value in result_dict.items():
|
||||||
if value is None and key != "code":
|
if value is None and key != "code":
|
||||||
@ -335,11 +327,9 @@ def generate_confirmation_token(tenent_id):
|
|||||||
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenent_id)[2:34]
|
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenent_id)[2:34]
|
||||||
|
|
||||||
|
|
||||||
def valid(permission, valid_permission, language, valid_language, chunk_method, valid_chunk_method):
|
def valid(permission, valid_permission, chunk_method, valid_chunk_method):
|
||||||
if valid_parameter(permission, valid_permission):
|
if valid_parameter(permission, valid_permission):
|
||||||
return valid_parameter(permission, valid_permission)
|
return valid_parameter(permission, valid_permission)
|
||||||
if valid_parameter(language, valid_language):
|
|
||||||
return valid_parameter(language, valid_language)
|
|
||||||
if valid_parameter(chunk_method, valid_chunk_method):
|
if valid_parameter(chunk_method, valid_chunk_method):
|
||||||
return valid_parameter(chunk_method, valid_chunk_method)
|
return valid_parameter(chunk_method, valid_chunk_method)
|
||||||
|
|
||||||
@ -349,14 +339,17 @@ def valid_parameter(parameter, valid_values):
|
|||||||
return get_error_data_result(f"'{parameter}' is not in {valid_values}")
|
return get_error_data_result(f"'{parameter}' is not in {valid_values}")
|
||||||
|
|
||||||
|
|
||||||
|
def dataset_readonly_fields(field_name):
|
||||||
|
return field_name in ["chunk_count", "create_date", "create_time", "update_date", "update_time", "created_by", "document_count", "token_num", "status", "tenant_id", "id"]
|
||||||
|
|
||||||
|
|
||||||
def get_parser_config(chunk_method, parser_config):
|
def get_parser_config(chunk_method, parser_config):
|
||||||
if parser_config:
|
if parser_config:
|
||||||
return parser_config
|
return parser_config
|
||||||
if not chunk_method:
|
if not chunk_method:
|
||||||
chunk_method = "naive"
|
chunk_method = "naive"
|
||||||
key_mapping = {
|
key_mapping = {
|
||||||
"naive": {"chunk_token_num": 128, "delimiter": "\\n!?;。;!?", "html4excel": False, "layout_recognize": "DeepDOC",
|
"naive": {"chunk_token_num": 128, "delimiter": "\\n!?;。;!?", "html4excel": False, "layout_recognize": "DeepDOC", "raptor": {"use_raptor": False}},
|
||||||
"raptor": {"use_raptor": False}},
|
|
||||||
"qa": {"raptor": {"use_raptor": False}},
|
"qa": {"raptor": {"use_raptor": False}},
|
||||||
"tag": None,
|
"tag": None,
|
||||||
"resume": None,
|
"resume": None,
|
||||||
@ -367,9 +360,115 @@ def get_parser_config(chunk_method, parser_config):
|
|||||||
"laws": {"raptor": {"use_raptor": False}},
|
"laws": {"raptor": {"use_raptor": False}},
|
||||||
"presentation": {"raptor": {"use_raptor": False}},
|
"presentation": {"raptor": {"use_raptor": False}},
|
||||||
"one": None,
|
"one": None,
|
||||||
"knowledge_graph": {"chunk_token_num": 8192, "delimiter": "\\n!?;。;!?",
|
"knowledge_graph": {"chunk_token_num": 8192, "delimiter": "\\n!?;。;!?", "entity_types": ["organization", "person", "location", "event", "time"]},
|
||||||
"entity_types": ["organization", "person", "location", "event", "time"]},
|
|
||||||
"email": None,
|
"email": None,
|
||||||
"picture": None}
|
"picture": None,
|
||||||
|
}
|
||||||
parser_config = key_mapping[chunk_method]
|
parser_config = key_mapping[chunk_method]
|
||||||
return parser_config
|
return parser_config
|
||||||
|
|
||||||
|
|
||||||
|
def get_data_openai(id=None,
|
||||||
|
created=None,
|
||||||
|
model=None,
|
||||||
|
prompt_tokens= 0,
|
||||||
|
completion_tokens=0,
|
||||||
|
content = None,
|
||||||
|
finish_reason= None,
|
||||||
|
object="chat.completion",
|
||||||
|
param=None,
|
||||||
|
):
|
||||||
|
|
||||||
|
total_tokens= prompt_tokens + completion_tokens
|
||||||
|
return {
|
||||||
|
"id":f"{id}",
|
||||||
|
"object": object,
|
||||||
|
"created": int(time.time()) if created else None,
|
||||||
|
"model": model,
|
||||||
|
"param":param,
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": prompt_tokens,
|
||||||
|
"completion_tokens": completion_tokens,
|
||||||
|
"total_tokens": total_tokens,
|
||||||
|
"completion_tokens_details": {
|
||||||
|
"reasoning_tokens": 0,
|
||||||
|
"accepted_prediction_tokens": 0,
|
||||||
|
"rejected_prediction_tokens": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": content
|
||||||
|
},
|
||||||
|
"logprobs": None,
|
||||||
|
"finish_reason": finish_reason,
|
||||||
|
"index": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
def valid_parser_config(parser_config):
|
||||||
|
if not parser_config:
|
||||||
|
return
|
||||||
|
scopes = set(
|
||||||
|
[
|
||||||
|
"chunk_token_num",
|
||||||
|
"delimiter",
|
||||||
|
"raptor",
|
||||||
|
"graphrag",
|
||||||
|
"layout_recognize",
|
||||||
|
"task_page_size",
|
||||||
|
"pages",
|
||||||
|
"html4excel",
|
||||||
|
"auto_keywords",
|
||||||
|
"auto_questions",
|
||||||
|
"tag_kb_ids",
|
||||||
|
"topn_tags",
|
||||||
|
"filename_embd_weight",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
for k in parser_config.keys():
|
||||||
|
assert k in scopes, f"Abnormal 'parser_config'. Invalid key: {k}"
|
||||||
|
|
||||||
|
assert isinstance(parser_config.get("chunk_token_num", 1), int), "chunk_token_num should be int"
|
||||||
|
assert 1 <= parser_config.get("chunk_token_num", 1) < 100000000, "chunk_token_num should be in range from 1 to 100000000"
|
||||||
|
assert isinstance(parser_config.get("task_page_size", 1), int), "task_page_size should be int"
|
||||||
|
assert 1 <= parser_config.get("task_page_size", 1) < 100000000, "task_page_size should be in range from 1 to 100000000"
|
||||||
|
assert isinstance(parser_config.get("auto_keywords", 1), int), "auto_keywords should be int"
|
||||||
|
assert 0 <= parser_config.get("auto_keywords", 0) < 32, "auto_keywords should be in range from 0 to 32"
|
||||||
|
assert isinstance(parser_config.get("auto_questions", 1), int), "auto_questions should be int"
|
||||||
|
assert 0 <= parser_config.get("auto_questions", 0) < 10, "auto_questions should be in range from 0 to 10"
|
||||||
|
assert isinstance(parser_config.get("topn_tags", 1), int), "topn_tags should be int"
|
||||||
|
assert 0 <= parser_config.get("topn_tags", 0) < 10, "topn_tags should be in range from 0 to 10"
|
||||||
|
assert isinstance(parser_config.get("html4excel", False), bool), "html4excel should be True or False"
|
||||||
|
assert isinstance(parser_config.get("delimiter", ""), str), "delimiter should be str"
|
||||||
|
|
||||||
|
|
||||||
|
def check_duplicate_ids(ids, id_type="item"):
|
||||||
|
"""
|
||||||
|
Check for duplicate IDs in a list and return unique IDs and error messages.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ids (list): List of IDs to check for duplicates
|
||||||
|
id_type (str): Type of ID for error messages (e.g., 'document', 'dataset', 'chunk')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (unique_ids, error_messages)
|
||||||
|
- unique_ids (list): List of unique IDs
|
||||||
|
- error_messages (list): List of error messages for duplicate IDs
|
||||||
|
"""
|
||||||
|
id_count = {}
|
||||||
|
duplicate_messages = []
|
||||||
|
|
||||||
|
# Count occurrences of each ID
|
||||||
|
for id_value in ids:
|
||||||
|
id_count[id_value] = id_count.get(id_value, 0) + 1
|
||||||
|
|
||||||
|
# Check for duplicates
|
||||||
|
for id_value, count in id_count.items():
|
||||||
|
if count > 1:
|
||||||
|
duplicate_messages.append(f"Duplicate {id_type} ids: {id_value}")
|
||||||
|
|
||||||
|
# Return unique IDs and error messages
|
||||||
|
return list(set(ids)), duplicate_messages
|
||||||
|
|||||||
@ -17,6 +17,8 @@ import base64
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
import pdfplumber
|
import pdfplumber
|
||||||
@ -30,6 +32,10 @@ from api.constants import IMG_BASE64_PREFIX
|
|||||||
PROJECT_BASE = os.getenv("RAG_PROJECT_BASE") or os.getenv("RAG_DEPLOY_BASE")
|
PROJECT_BASE = os.getenv("RAG_PROJECT_BASE") or os.getenv("RAG_DEPLOY_BASE")
|
||||||
RAG_BASE = os.getenv("RAG_BASE")
|
RAG_BASE = os.getenv("RAG_BASE")
|
||||||
|
|
||||||
|
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
|
||||||
|
if LOCK_KEY_pdfplumber not in sys.modules:
|
||||||
|
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
def get_project_base_directory(*args):
|
def get_project_base_directory(*args):
|
||||||
global PROJECT_BASE
|
global PROJECT_BASE
|
||||||
@ -175,19 +181,21 @@ def thumbnail_img(filename, blob):
|
|||||||
"""
|
"""
|
||||||
filename = filename.lower()
|
filename = filename.lower()
|
||||||
if re.match(r".*\.pdf$", filename):
|
if re.match(r".*\.pdf$", filename):
|
||||||
pdf = pdfplumber.open(BytesIO(blob))
|
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||||
buffered = BytesIO()
|
pdf = pdfplumber.open(BytesIO(blob))
|
||||||
resolution = 32
|
buffered = BytesIO()
|
||||||
img = None
|
resolution = 32
|
||||||
for _ in range(10):
|
img = None
|
||||||
# https://github.com/jsvine/pdfplumber?tab=readme-ov-file#creating-a-pageimage-with-to_image
|
for _ in range(10):
|
||||||
pdf.pages[0].to_image(resolution=resolution).annotated.save(buffered, format="png")
|
# https://github.com/jsvine/pdfplumber?tab=readme-ov-file#creating-a-pageimage-with-to_image
|
||||||
img = buffered.getvalue()
|
pdf.pages[0].to_image(resolution=resolution).annotated.save(buffered, format="png")
|
||||||
if len(img) >= 64000 and resolution >= 2:
|
img = buffered.getvalue()
|
||||||
resolution = resolution / 2
|
if len(img) >= 64000 and resolution >= 2:
|
||||||
buffered = BytesIO()
|
resolution = resolution / 2
|
||||||
else:
|
buffered = BytesIO()
|
||||||
break
|
else:
|
||||||
|
break
|
||||||
|
pdf.close()
|
||||||
return img
|
return img
|
||||||
|
|
||||||
elif re.match(r".*\.(jpg|jpeg|png|tif|gif|icon|ico|webp)$", filename):
|
elif re.match(r".*\.(jpg|jpeg|png|tif|gif|icon|ico|webp)$", filename):
|
||||||
|
|||||||
@ -18,6 +18,8 @@ import os.path
|
|||||||
import logging
|
import logging
|
||||||
from logging.handlers import RotatingFileHandler
|
from logging.handlers import RotatingFileHandler
|
||||||
|
|
||||||
|
initialized_root_logger = False
|
||||||
|
|
||||||
def get_project_base_directory():
|
def get_project_base_directory():
|
||||||
PROJECT_BASE = os.path.abspath(
|
PROJECT_BASE = os.path.abspath(
|
||||||
os.path.join(
|
os.path.join(
|
||||||
@ -29,10 +31,13 @@ def get_project_base_directory():
|
|||||||
return PROJECT_BASE
|
return PROJECT_BASE
|
||||||
|
|
||||||
def initRootLogger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
def initRootLogger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||||
logger = logging.getLogger()
|
global initialized_root_logger
|
||||||
if logger.hasHandlers():
|
if initialized_root_logger:
|
||||||
return
|
return
|
||||||
|
initialized_root_logger = True
|
||||||
|
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.handlers.clear()
|
||||||
log_path = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{logfile_basename}.log"))
|
log_path = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{logfile_basename}.log"))
|
||||||
|
|
||||||
os.makedirs(os.path.dirname(log_path), exist_ok=True)
|
os.makedirs(os.path.dirname(log_path), exist_ok=True)
|
||||||
|
|||||||
@ -5,14 +5,14 @@
|
|||||||
"create_time": {"type": "varchar", "default": ""},
|
"create_time": {"type": "varchar", "default": ""},
|
||||||
"create_timestamp_flt": {"type": "float", "default": 0.0},
|
"create_timestamp_flt": {"type": "float", "default": 0.0},
|
||||||
"img_id": {"type": "varchar", "default": ""},
|
"img_id": {"type": "varchar", "default": ""},
|
||||||
"docnm_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"docnm_kwd": {"type": "varchar", "default": ""},
|
||||||
"title_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"title_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||||
"title_sm_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"title_sm_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||||
"name_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"name_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"important_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"important_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"tag_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"tag_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"important_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"important_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||||
"question_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"question_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"question_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"question_tks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||||
"content_with_weight": {"type": "varchar", "default": ""},
|
"content_with_weight": {"type": "varchar", "default": ""},
|
||||||
"content_ltks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"content_ltks": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
||||||
@ -27,16 +27,16 @@
|
|||||||
"rank_int": {"type": "integer", "default": 0},
|
"rank_int": {"type": "integer", "default": 0},
|
||||||
"rank_flt": {"type": "float", "default": 0},
|
"rank_flt": {"type": "float", "default": 0},
|
||||||
"available_int": {"type": "integer", "default": 1},
|
"available_int": {"type": "integer", "default": 1},
|
||||||
"knowledge_graph_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"knowledge_graph_kwd": {"type": "varchar", "default": ""},
|
||||||
"entities_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"entities_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"pagerank_fea": {"type": "integer", "default": 0},
|
"pagerank_fea": {"type": "integer", "default": 0},
|
||||||
"tag_feas": {"type": "varchar", "default": ""},
|
"tag_feas": {"type": "varchar", "default": ""},
|
||||||
|
|
||||||
"from_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"from_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"to_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"to_entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"entity_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"entity_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"},
|
"entity_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"source_id": {"type": "varchar", "default": ""},
|
"source_id": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"n_hop_with_weight": {"type": "varchar", "default": ""},
|
"n_hop_with_weight": {"type": "varchar", "default": ""},
|
||||||
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace"}
|
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"}
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -5,25 +5,25 @@ mysql:
|
|||||||
name: 'rag_flow'
|
name: 'rag_flow'
|
||||||
user: 'root'
|
user: 'root'
|
||||||
password: 'infini_rag_flow'
|
password: 'infini_rag_flow'
|
||||||
host: 'mysql'
|
host: 'localhost'
|
||||||
port: 5455
|
port: 5455
|
||||||
max_connections: 100
|
max_connections: 100
|
||||||
stale_timeout: 30
|
stale_timeout: 30
|
||||||
minio:
|
minio:
|
||||||
user: 'rag_flow'
|
user: 'rag_flow'
|
||||||
password: 'infini_rag_flow'
|
password: 'infini_rag_flow'
|
||||||
host: 'minio:9000'
|
host: 'localhost:9000'
|
||||||
es:
|
es:
|
||||||
hosts: 'http://es01:1200'
|
hosts: 'http://localhost:1200'
|
||||||
username: 'elastic'
|
username: 'elastic'
|
||||||
password: 'infini_rag_flow'
|
password: 'infini_rag_flow'
|
||||||
infinity:
|
infinity:
|
||||||
uri: 'infinity:23817'
|
uri: 'localhost:23817'
|
||||||
db_name: 'default_db'
|
db_name: 'default_db'
|
||||||
redis:
|
redis:
|
||||||
db: 1
|
db: 1
|
||||||
password: 'infini_rag_flow'
|
password: 'infini_rag_flow'
|
||||||
host: 'redis:6379'
|
host: 'localhost:6379'
|
||||||
|
|
||||||
# postgres:
|
# postgres:
|
||||||
# name: 'rag_flow'
|
# name: 'rag_flow'
|
||||||
@ -37,6 +37,12 @@ redis:
|
|||||||
# access_key: 'access_key'
|
# access_key: 'access_key'
|
||||||
# secret_key: 'secret_key'
|
# secret_key: 'secret_key'
|
||||||
# region: 'region'
|
# region: 'region'
|
||||||
|
# oss:
|
||||||
|
# access_key: 'access_key'
|
||||||
|
# secret_key: 'secret_key'
|
||||||
|
# endpoint_url: 'http://oss-cn-hangzhou.aliyuncs.com'
|
||||||
|
# region: 'cn-hangzhou'
|
||||||
|
# bucket: 'bucket_name'
|
||||||
# azure:
|
# azure:
|
||||||
# auth_type: 'sas'
|
# auth_type: 'sas'
|
||||||
# container_url: 'container_url'
|
# container_url: 'container_url'
|
||||||
|
|||||||
@ -113,4 +113,4 @@ PDF、DOCX、EXCEL和PPT四种文档格式都有相应的解析器。最复杂
|
|||||||
|
|
||||||
### 简历
|
### 简历
|
||||||
|
|
||||||
简历是一种非常复杂的文件。一份由各种布局的非结构化文本组成的简历可以分解为由近百个字段组成的结构化数据。我们还没有打开解析器,因为我们在解析过程之后打开了处理方法。
|
简历是一种非常复杂的文档。由各种格式的非结构化文本构成的简历可以被解析为包含近百个字段的结构化数据。我们还没有启用解析器,因为在解析过程之后才会启动处理方法。
|
||||||
|
|||||||
@ -1,6 +1,3 @@
|
|||||||
#
|
|
||||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
@ -14,20 +11,68 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from openpyxl import load_workbook
|
import logging
|
||||||
import sys
|
import sys
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from openpyxl import Workbook, load_workbook
|
||||||
|
|
||||||
from rag.nlp import find_codec
|
from rag.nlp import find_codec
|
||||||
|
|
||||||
|
|
||||||
class RAGFlowExcelParser:
|
class RAGFlowExcelParser:
|
||||||
def html(self, fnm, chunk_rows=256):
|
|
||||||
if isinstance(fnm, str):
|
|
||||||
wb = load_workbook(fnm)
|
|
||||||
else:
|
|
||||||
wb = load_workbook(BytesIO(fnm))
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _load_excel_to_workbook(file_like_object):
|
||||||
|
if isinstance(file_like_object, bytes):
|
||||||
|
file_like_object = BytesIO(file_like_object)
|
||||||
|
|
||||||
|
# Read first 4 bytes to determine file type
|
||||||
|
file_like_object.seek(0)
|
||||||
|
file_head = file_like_object.read(4)
|
||||||
|
file_like_object.seek(0)
|
||||||
|
|
||||||
|
if not (file_head.startswith(b'PK\x03\x04') or file_head.startswith(b'\xD0\xCF\x11\xE0')):
|
||||||
|
logging.info("****wxy: Not an Excel file, converting CSV to Excel Workbook")
|
||||||
|
|
||||||
|
try:
|
||||||
|
file_like_object.seek(0)
|
||||||
|
df = pd.read_csv(file_like_object)
|
||||||
|
return RAGFlowExcelParser._dataframe_to_workbook(df)
|
||||||
|
|
||||||
|
except Exception as e_csv:
|
||||||
|
raise Exception(f"****wxy: Failed to parse CSV and convert to Excel Workbook: {e_csv}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
return load_workbook(file_like_object,data_only= True)
|
||||||
|
except Exception as e:
|
||||||
|
logging.info(f"****wxy: openpyxl load error: {e}, try pandas instead")
|
||||||
|
try:
|
||||||
|
file_like_object.seek(0)
|
||||||
|
df = pd.read_excel(file_like_object)
|
||||||
|
return RAGFlowExcelParser._dataframe_to_workbook(df)
|
||||||
|
except Exception as e_pandas:
|
||||||
|
raise Exception(f"****wxy: pandas.read_excel error: {e_pandas}, original openpyxl error: {e}")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _dataframe_to_workbook(df):
|
||||||
|
wb = Workbook()
|
||||||
|
ws = wb.active
|
||||||
|
ws.title = "Data"
|
||||||
|
|
||||||
|
for col_num, column_name in enumerate(df.columns, 1):
|
||||||
|
ws.cell(row=1, column=col_num, value=column_name)
|
||||||
|
|
||||||
|
for row_num, row in enumerate(df.values, 2):
|
||||||
|
for col_num, value in enumerate(row, 1):
|
||||||
|
ws.cell(row=row_num, column=col_num, value=value)
|
||||||
|
|
||||||
|
return wb
|
||||||
|
|
||||||
|
def html(self, fnm, chunk_rows=256):
|
||||||
|
file_like_object = BytesIO(fnm) if not isinstance(fnm, str) else fnm
|
||||||
|
wb = RAGFlowExcelParser._load_excel_to_workbook(file_like_object)
|
||||||
tb_chunks = []
|
tb_chunks = []
|
||||||
for sheetname in wb.sheetnames:
|
for sheetname in wb.sheetnames:
|
||||||
ws = wb[sheetname]
|
ws = wb[sheetname]
|
||||||
@ -45,7 +90,7 @@ class RAGFlowExcelParser:
|
|||||||
tb += f"<table><caption>{sheetname}</caption>"
|
tb += f"<table><caption>{sheetname}</caption>"
|
||||||
tb += tb_rows_0
|
tb += tb_rows_0
|
||||||
for r in list(
|
for r in list(
|
||||||
rows[1 + chunk_i * chunk_rows : 1 + (chunk_i + 1) * chunk_rows]
|
rows[1 + chunk_i * chunk_rows: 1 + (chunk_i + 1) * chunk_rows]
|
||||||
):
|
):
|
||||||
tb += "<tr>"
|
tb += "<tr>"
|
||||||
for i, c in enumerate(r):
|
for i, c in enumerate(r):
|
||||||
@ -60,10 +105,9 @@ class RAGFlowExcelParser:
|
|||||||
return tb_chunks
|
return tb_chunks
|
||||||
|
|
||||||
def __call__(self, fnm):
|
def __call__(self, fnm):
|
||||||
if isinstance(fnm, str):
|
file_like_object = BytesIO(fnm) if not isinstance(fnm, str) else fnm
|
||||||
wb = load_workbook(fnm)
|
wb = RAGFlowExcelParser._load_excel_to_workbook(file_like_object)
|
||||||
else:
|
|
||||||
wb = load_workbook(BytesIO(fnm))
|
|
||||||
res = []
|
res = []
|
||||||
for sheetname in wb.sheetnames:
|
for sheetname in wb.sheetnames:
|
||||||
ws = wb[sheetname]
|
ws = wb[sheetname]
|
||||||
@ -88,7 +132,7 @@ class RAGFlowExcelParser:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def row_number(fnm, binary):
|
def row_number(fnm, binary):
|
||||||
if fnm.split(".")[-1].lower().find("xls") >= 0:
|
if fnm.split(".")[-1].lower().find("xls") >= 0:
|
||||||
wb = load_workbook(BytesIO(binary))
|
wb = RAGFlowExcelParser._load_excel_to_workbook(BytesIO(binary))
|
||||||
total = 0
|
total = 0
|
||||||
for sheetname in wb.sheetnames:
|
for sheetname in wb.sheetnames:
|
||||||
ws = wb[sheetname]
|
ws = wb[sheetname]
|
||||||
|
|||||||
91
deepdoc/parser/figure_parser.py
Normal file
91
deepdoc/parser/figure_parser.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from rag.app.picture import vision_llm_chunk as picture_vision_llm_chunk
|
||||||
|
from rag.prompts import vision_llm_figure_describe_prompt
|
||||||
|
|
||||||
|
|
||||||
|
def vision_figure_parser_figure_data_wraper(figures_data_without_positions):
|
||||||
|
return [(
|
||||||
|
(figure_data[1], [figure_data[0]]),
|
||||||
|
[(0, 0, 0, 0, 0)]
|
||||||
|
) for figure_data in figures_data_without_positions if isinstance(figure_data[1], Image.Image)]
|
||||||
|
|
||||||
|
|
||||||
|
class VisionFigureParser:
|
||||||
|
def __init__(self, vision_model, figures_data, *args, **kwargs):
|
||||||
|
self.vision_model = vision_model
|
||||||
|
self._extract_figures_info(figures_data)
|
||||||
|
assert len(self.figures) == len(self.descriptions)
|
||||||
|
assert not self.positions or (len(self.figures) == len(self.positions))
|
||||||
|
|
||||||
|
def _extract_figures_info(self, figures_data):
|
||||||
|
self.figures = []
|
||||||
|
self.descriptions = []
|
||||||
|
self.positions = []
|
||||||
|
|
||||||
|
for item in figures_data:
|
||||||
|
# position
|
||||||
|
if len(item) == 2 and isinstance(item[1], list) and len(item[1]) == 1 and isinstance(item[1][0], tuple) and len(item[1][0]) == 5:
|
||||||
|
img_desc = item[0]
|
||||||
|
assert len(img_desc) == 2 and isinstance(img_desc[0], Image.Image) and isinstance(img_desc[1], list), "Should be (figure, [description])"
|
||||||
|
self.figures.append(img_desc[0])
|
||||||
|
self.descriptions.append(img_desc[1])
|
||||||
|
self.positions.append(item[1])
|
||||||
|
else:
|
||||||
|
assert len(item) == 2 and isinstance(item, tuple) and isinstance(item[1], list), f"get {len(item)=}, {item=}"
|
||||||
|
self.figures.append(item[0])
|
||||||
|
self.descriptions.append(item[1])
|
||||||
|
|
||||||
|
def _assemble(self):
|
||||||
|
self.assembled = []
|
||||||
|
self.has_positions = len(self.positions) != 0
|
||||||
|
for i in range(len(self.figures)):
|
||||||
|
figure = self.figures[i]
|
||||||
|
desc = self.descriptions[i]
|
||||||
|
pos = self.positions[i] if self.has_positions else None
|
||||||
|
|
||||||
|
figure_desc = (figure, desc)
|
||||||
|
|
||||||
|
if pos is not None:
|
||||||
|
self.assembled.append((figure_desc, pos))
|
||||||
|
else:
|
||||||
|
self.assembled.append((figure_desc,))
|
||||||
|
|
||||||
|
return self.assembled
|
||||||
|
|
||||||
|
def __call__(self, **kwargs):
|
||||||
|
callback = kwargs.get("callback", lambda prog, msg: None)
|
||||||
|
|
||||||
|
for idx, img_binary in enumerate(self.figures or []):
|
||||||
|
figure_num = idx # 0-based
|
||||||
|
|
||||||
|
txt = picture_vision_llm_chunk(
|
||||||
|
binary=img_binary,
|
||||||
|
vision_model=self.vision_model,
|
||||||
|
prompt=vision_llm_figure_describe_prompt(),
|
||||||
|
callback=callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
if txt:
|
||||||
|
self.descriptions[figure_num] = txt + "\n".join(self.descriptions[figure_num])
|
||||||
|
|
||||||
|
self._assemble()
|
||||||
|
|
||||||
|
return self.assembled
|
||||||
@ -22,27 +22,56 @@ class RAGFlowMarkdownParser:
|
|||||||
self.chunk_token_num = int(chunk_token_num)
|
self.chunk_token_num = int(chunk_token_num)
|
||||||
|
|
||||||
def extract_tables_and_remainder(self, markdown_text):
|
def extract_tables_and_remainder(self, markdown_text):
|
||||||
# Standard Markdown table
|
tables = []
|
||||||
table_pattern = re.compile(
|
remainder = markdown_text
|
||||||
r'''
|
if "|" in markdown_text: # for optimize performance
|
||||||
(?:\n|^)
|
# Standard Markdown table
|
||||||
(?:\|.*?\|.*?\|.*?\n)
|
border_table_pattern = re.compile(
|
||||||
(?:\|(?:\s*[:-]+[-| :]*\s*)\|.*?\n)
|
r'''
|
||||||
(?:\|.*?\|.*?\|.*?\n)+
|
(?:\n|^)
|
||||||
|
(?:\|.*?\|.*?\|.*?\n)
|
||||||
|
(?:\|(?:\s*[:-]+[-| :]*\s*)\|.*?\n)
|
||||||
|
(?:\|.*?\|.*?\|.*?\n)+
|
||||||
''', re.VERBOSE)
|
''', re.VERBOSE)
|
||||||
tables = table_pattern.findall(markdown_text)
|
border_tables = border_table_pattern.findall(markdown_text)
|
||||||
remainder = table_pattern.sub('', markdown_text)
|
tables.extend(border_tables)
|
||||||
|
remainder = border_table_pattern.sub('', remainder)
|
||||||
|
|
||||||
# Borderless Markdown table
|
# Borderless Markdown table
|
||||||
no_border_table_pattern = re.compile(
|
no_border_table_pattern = re.compile(
|
||||||
|
r'''
|
||||||
|
(?:\n|^)
|
||||||
|
(?:\S.*?\|.*?\n)
|
||||||
|
(?:(?:\s*[:-]+[-| :]*\s*).*?\n)
|
||||||
|
(?:\S.*?\|.*?\n)+
|
||||||
|
''', re.VERBOSE)
|
||||||
|
no_border_tables = no_border_table_pattern.findall(remainder)
|
||||||
|
tables.extend(no_border_tables)
|
||||||
|
remainder = no_border_table_pattern.sub('', remainder)
|
||||||
|
|
||||||
|
if "<table>" in remainder.lower(): # for optimize performance
|
||||||
|
#HTML table extraction - handle possible html/body wrapper tags
|
||||||
|
html_table_pattern = re.compile(
|
||||||
r'''
|
r'''
|
||||||
(?:\n|^)
|
(?:\n|^)
|
||||||
(?:\S.*?\|.*?\n)
|
\s*
|
||||||
(?:(?:\s*[:-]+[-| :]*\s*).*?\n)
|
(?:
|
||||||
(?:\S.*?\|.*?\n)+
|
# case1: <html><body><table>...</table></body></html>
|
||||||
''', re.VERBOSE)
|
(?:<html[^>]*>\s*<body[^>]*>\s*<table[^>]*>.*?</table>\s*</body>\s*</html>)
|
||||||
no_border_tables = no_border_table_pattern.findall(remainder)
|
|
|
||||||
tables.extend(no_border_tables)
|
# case2: <body><table>...</table></body>
|
||||||
remainder = no_border_table_pattern.sub('', remainder)
|
(?:<body[^>]*>\s*<table[^>]*>.*?</table>\s*</body>)
|
||||||
|
|
|
||||||
|
# case3: only<table>...</table>
|
||||||
|
(?:<table[^>]*>.*?</table>)
|
||||||
|
)
|
||||||
|
\s*
|
||||||
|
(?=\n|$)
|
||||||
|
''',
|
||||||
|
re.VERBOSE | re.DOTALL | re.IGNORECASE
|
||||||
|
)
|
||||||
|
html_tables = html_table_pattern.findall(remainder)
|
||||||
|
tables.extend(html_tables)
|
||||||
|
remainder = html_table_pattern.sub('', remainder)
|
||||||
|
|
||||||
return remainder, tables
|
return remainder, tables
|
||||||
|
|||||||
@ -17,25 +17,53 @@
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
|
||||||
import xgboost as xgb
|
|
||||||
from io import BytesIO
|
|
||||||
import re
|
import re
|
||||||
import pdfplumber
|
import sys
|
||||||
from PIL import Image
|
import threading
|
||||||
|
from copy import deepcopy
|
||||||
|
from io import BytesIO
|
||||||
|
from timeit import default_timer as timer
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import pdfplumber
|
||||||
|
import trio
|
||||||
|
import xgboost as xgb
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
from PIL import Image
|
||||||
from pypdf import PdfReader as pdf2_read
|
from pypdf import PdfReader as pdf2_read
|
||||||
|
|
||||||
from api import settings
|
from api import settings
|
||||||
from api.utils.file_utils import get_project_base_directory
|
from api.utils.file_utils import get_project_base_directory
|
||||||
from deepdoc.vision import OCR, Recognizer, LayoutRecognizer, TableStructureRecognizer
|
from deepdoc.vision import OCR, LayoutRecognizer, Recognizer, TableStructureRecognizer
|
||||||
|
from rag.app.picture import vision_llm_chunk as picture_vision_llm_chunk
|
||||||
from rag.nlp import rag_tokenizer
|
from rag.nlp import rag_tokenizer
|
||||||
from copy import deepcopy
|
from rag.prompts import vision_llm_describe_prompt
|
||||||
from huggingface_hub import snapshot_download
|
from rag.settings import PARALLEL_DEVICES
|
||||||
|
|
||||||
|
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
|
||||||
|
if LOCK_KEY_pdfplumber not in sys.modules:
|
||||||
|
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
class RAGFlowPdfParser:
|
class RAGFlowPdfParser:
|
||||||
def __init__(self):
|
def __init__(self, **kwargs):
|
||||||
|
"""
|
||||||
|
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
||||||
|
|
||||||
|
For Linux:
|
||||||
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
|
|
||||||
|
For Windows:
|
||||||
|
Good luck
|
||||||
|
^_-
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
self.ocr = OCR()
|
self.ocr = OCR()
|
||||||
|
self.parallel_limiter = None
|
||||||
|
if PARALLEL_DEVICES is not None and PARALLEL_DEVICES > 1:
|
||||||
|
self.parallel_limiter = [trio.CapacityLimiter(1) for _ in range(PARALLEL_DEVICES)]
|
||||||
|
|
||||||
if hasattr(self, "model_speciess"):
|
if hasattr(self, "model_speciess"):
|
||||||
self.layouter = LayoutRecognizer("layout." + self.model_speciess)
|
self.layouter = LayoutRecognizer("layout." + self.model_speciess)
|
||||||
else:
|
else:
|
||||||
@ -45,7 +73,7 @@ class RAGFlowPdfParser:
|
|||||||
self.updown_cnt_mdl = xgb.Booster()
|
self.updown_cnt_mdl = xgb.Booster()
|
||||||
if not settings.LIGHTEN:
|
if not settings.LIGHTEN:
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch.cuda
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
self.updown_cnt_mdl.set_param({"device": "cuda"})
|
self.updown_cnt_mdl.set_param({"device": "cuda"})
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -65,17 +93,6 @@ class RAGFlowPdfParser:
|
|||||||
model_dir, "updown_concat_xgb.model"))
|
model_dir, "updown_concat_xgb.model"))
|
||||||
|
|
||||||
self.page_from = 0
|
self.page_from = 0
|
||||||
"""
|
|
||||||
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
|
||||||
|
|
||||||
For Linux:
|
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
|
||||||
|
|
||||||
For Windows:
|
|
||||||
Good luck
|
|
||||||
^_-
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __char_width(self, c):
|
def __char_width(self, c):
|
||||||
return (c["x1"] - c["x0"]) // max(len(c["text"]), 1)
|
return (c["x1"] - c["x0"]) // max(len(c["text"]), 1)
|
||||||
@ -90,7 +107,7 @@ class RAGFlowPdfParser:
|
|||||||
def _y_dis(
|
def _y_dis(
|
||||||
self, a, b):
|
self, a, b):
|
||||||
return (
|
return (
|
||||||
b["top"] + b["bottom"] - a["top"] - a["bottom"]) / 2
|
b["top"] + b["bottom"] - a["top"] - a["bottom"]) / 2
|
||||||
|
|
||||||
def _match_proj(self, b):
|
def _match_proj(self, b):
|
||||||
proj_patt = [
|
proj_patt = [
|
||||||
@ -113,9 +130,9 @@ class RAGFlowPdfParser:
|
|||||||
tks_down = rag_tokenizer.tokenize(down["text"][:LEN]).split()
|
tks_down = rag_tokenizer.tokenize(down["text"][:LEN]).split()
|
||||||
tks_up = rag_tokenizer.tokenize(up["text"][-LEN:]).split()
|
tks_up = rag_tokenizer.tokenize(up["text"][-LEN:]).split()
|
||||||
tks_all = up["text"][-LEN:].strip() \
|
tks_all = up["text"][-LEN:].strip() \
|
||||||
+ (" " if re.match(r"[a-zA-Z0-9]+",
|
+ (" " if re.match(r"[a-zA-Z0-9]+",
|
||||||
up["text"][-1] + down["text"][0]) else "") \
|
up["text"][-1] + down["text"][0]) else "") \
|
||||||
+ down["text"][:LEN].strip()
|
+ down["text"][:LEN].strip()
|
||||||
tks_all = rag_tokenizer.tokenize(tks_all).split()
|
tks_all = rag_tokenizer.tokenize(tks_all).split()
|
||||||
fea = [
|
fea = [
|
||||||
up.get("R", -1) == down.get("R", -1),
|
up.get("R", -1) == down.get("R", -1),
|
||||||
@ -137,7 +154,7 @@ class RAGFlowPdfParser:
|
|||||||
True if re.search(r"[,,][^。.]+$", up["text"]) else False,
|
True if re.search(r"[,,][^。.]+$", up["text"]) else False,
|
||||||
True if re.search(r"[,,][^。.]+$", up["text"]) else False,
|
True if re.search(r"[,,][^。.]+$", up["text"]) else False,
|
||||||
True if re.search(r"[\((][^\))]+$", up["text"])
|
True if re.search(r"[\((][^\))]+$", up["text"])
|
||||||
and re.search(r"[\))]", down["text"]) else False,
|
and re.search(r"[\))]", down["text"]) else False,
|
||||||
self._match_proj(down),
|
self._match_proj(down),
|
||||||
True if re.match(r"[A-Z]", down["text"]) else False,
|
True if re.match(r"[A-Z]", down["text"]) else False,
|
||||||
True if re.match(r"[A-Z]", up["text"][-1]) else False,
|
True if re.match(r"[A-Z]", up["text"][-1]) else False,
|
||||||
@ -199,7 +216,7 @@ class RAGFlowPdfParser:
|
|||||||
continue
|
continue
|
||||||
for tb in tbls: # for table
|
for tb in tbls: # for table
|
||||||
left, top, right, bott = tb["x0"] - MARGIN, tb["top"] - MARGIN, \
|
left, top, right, bott = tb["x0"] - MARGIN, tb["top"] - MARGIN, \
|
||||||
tb["x1"] + MARGIN, tb["bottom"] + MARGIN
|
tb["x1"] + MARGIN, tb["bottom"] + MARGIN
|
||||||
left *= ZM
|
left *= ZM
|
||||||
top *= ZM
|
top *= ZM
|
||||||
right *= ZM
|
right *= ZM
|
||||||
@ -276,8 +293,12 @@ class RAGFlowPdfParser:
|
|||||||
b["H_right"] = spans[ii]["x1"]
|
b["H_right"] = spans[ii]["x1"]
|
||||||
b["SP"] = ii
|
b["SP"] = ii
|
||||||
|
|
||||||
def __ocr(self, pagenum, img, chars, ZM=3):
|
def __ocr(self, pagenum, img, chars, ZM=3, device_id: int | None = None):
|
||||||
bxs = self.ocr.detect(np.array(img))
|
start = timer()
|
||||||
|
bxs = self.ocr.detect(np.array(img), device_id)
|
||||||
|
logging.info(f"__ocr detecting boxes of a image cost ({timer() - start}s)")
|
||||||
|
|
||||||
|
start = timer()
|
||||||
if not bxs:
|
if not bxs:
|
||||||
self.boxes.append([])
|
self.boxes.append([])
|
||||||
return
|
return
|
||||||
@ -289,7 +310,7 @@ class RAGFlowPdfParser:
|
|||||||
"page_number": pagenum} for b, t in bxs if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1]],
|
"page_number": pagenum} for b, t in bxs if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1]],
|
||||||
self.mean_height[-1] / 3
|
self.mean_height[-1] / 3
|
||||||
)
|
)
|
||||||
|
|
||||||
# merge chars in the same rect
|
# merge chars in the same rect
|
||||||
for c in Recognizer.sort_Y_firstly(
|
for c in Recognizer.sort_Y_firstly(
|
||||||
chars, self.mean_height[pagenum - 1] // 4):
|
chars, self.mean_height[pagenum - 1] // 4):
|
||||||
@ -308,14 +329,22 @@ class RAGFlowPdfParser:
|
|||||||
else:
|
else:
|
||||||
bxs[ii]["text"] += c["text"]
|
bxs[ii]["text"] += c["text"]
|
||||||
|
|
||||||
|
logging.info(f"__ocr sorting {len(chars)} chars cost {timer() - start}s")
|
||||||
|
start = timer()
|
||||||
|
boxes_to_reg = []
|
||||||
|
img_np = np.array(img)
|
||||||
for b in bxs:
|
for b in bxs:
|
||||||
if not b["text"]:
|
if not b["text"]:
|
||||||
left, right, top, bott = b["x0"] * ZM, b["x1"] * \
|
left, right, top, bott = b["x0"] * ZM, b["x1"] * \
|
||||||
ZM, b["top"] * ZM, b["bottom"] * ZM
|
ZM, b["top"] * ZM, b["bottom"] * ZM
|
||||||
b["text"] = self.ocr.recognize(np.array(img),
|
b["box_image"] = self.ocr.get_rotate_crop_image(img_np, np.array([[left, top], [right, top], [right, bott], [left, bott]], dtype=np.float32))
|
||||||
np.array([[left, top], [right, top], [right, bott], [left, bott]],
|
boxes_to_reg.append(b)
|
||||||
dtype=np.float32))
|
|
||||||
del b["txt"]
|
del b["txt"]
|
||||||
|
texts = self.ocr.recognize_batch([b["box_image"] for b in boxes_to_reg], device_id)
|
||||||
|
for i in range(len(boxes_to_reg)):
|
||||||
|
boxes_to_reg[i]["text"] = texts[i]
|
||||||
|
del boxes_to_reg[i]["box_image"]
|
||||||
|
logging.info(f"__ocr recognize {len(bxs)} boxes cost {timer() - start}s")
|
||||||
bxs = [b for b in bxs if b["text"]]
|
bxs = [b for b in bxs if b["text"]]
|
||||||
if self.mean_height[-1] == 0:
|
if self.mean_height[-1] == 0:
|
||||||
self.mean_height[-1] = np.median([b["bottom"] - b["top"]
|
self.mean_height[-1] = np.median([b["bottom"] - b["top"]
|
||||||
@ -429,7 +458,7 @@ class RAGFlowPdfParser:
|
|||||||
b_["text"],
|
b_["text"],
|
||||||
any(feats),
|
any(feats),
|
||||||
any(concatting_feats),
|
any(concatting_feats),
|
||||||
))
|
))
|
||||||
i += 1
|
i += 1
|
||||||
continue
|
continue
|
||||||
# merge up and down
|
# merge up and down
|
||||||
@ -624,8 +653,7 @@ class RAGFlowPdfParser:
|
|||||||
b_["top"] = b["top"]
|
b_["top"] = b["top"]
|
||||||
self.boxes.pop(i)
|
self.boxes.pop(i)
|
||||||
|
|
||||||
def _extract_table_figure(self, need_image, ZM,
|
def _extract_table_figure(self, need_image, ZM, return_html, need_position, separate_tables_figures=False):
|
||||||
return_html, need_position):
|
|
||||||
tables = {}
|
tables = {}
|
||||||
figures = {}
|
figures = {}
|
||||||
# extract figure and table boxes
|
# extract figure and table boxes
|
||||||
@ -637,7 +665,7 @@ class RAGFlowPdfParser:
|
|||||||
i += 1
|
i += 1
|
||||||
continue
|
continue
|
||||||
lout_no = str(self.boxes[i]["page_number"]) + \
|
lout_no = str(self.boxes[i]["page_number"]) + \
|
||||||
"-" + str(self.boxes[i]["layoutno"])
|
"-" + str(self.boxes[i]["layoutno"])
|
||||||
if TableStructureRecognizer.is_caption(self.boxes[i]) or self.boxes[i]["layout_type"] in ["table caption",
|
if TableStructureRecognizer.is_caption(self.boxes[i]) or self.boxes[i]["layout_type"] in ["table caption",
|
||||||
"title",
|
"title",
|
||||||
"figure caption",
|
"figure caption",
|
||||||
@ -739,9 +767,6 @@ class RAGFlowPdfParser:
|
|||||||
tk)
|
tk)
|
||||||
self.boxes.pop(i)
|
self.boxes.pop(i)
|
||||||
|
|
||||||
res = []
|
|
||||||
positions = []
|
|
||||||
|
|
||||||
def cropout(bxs, ltype, poss):
|
def cropout(bxs, ltype, poss):
|
||||||
nonlocal ZM
|
nonlocal ZM
|
||||||
pn = set([b["page_number"] - 1 for b in bxs])
|
pn = set([b["page_number"] - 1 for b in bxs])
|
||||||
@ -789,6 +814,10 @@ class RAGFlowPdfParser:
|
|||||||
height += img.size[1]
|
height += img.size[1]
|
||||||
return pic
|
return pic
|
||||||
|
|
||||||
|
res = []
|
||||||
|
positions = []
|
||||||
|
figure_results = []
|
||||||
|
figure_positions = []
|
||||||
# crop figure out and add caption
|
# crop figure out and add caption
|
||||||
for k, bxs in figures.items():
|
for k, bxs in figures.items():
|
||||||
txt = "\n".join([b["text"] for b in bxs])
|
txt = "\n".join([b["text"] for b in bxs])
|
||||||
@ -796,28 +825,46 @@ class RAGFlowPdfParser:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
poss = []
|
poss = []
|
||||||
res.append(
|
|
||||||
(cropout(
|
if separate_tables_figures:
|
||||||
bxs,
|
figure_results.append(
|
||||||
"figure", poss),
|
(cropout(
|
||||||
[txt]))
|
bxs,
|
||||||
positions.append(poss)
|
"figure", poss),
|
||||||
|
[txt]))
|
||||||
|
figure_positions.append(poss)
|
||||||
|
else:
|
||||||
|
res.append(
|
||||||
|
(cropout(
|
||||||
|
bxs,
|
||||||
|
"figure", poss),
|
||||||
|
[txt]))
|
||||||
|
positions.append(poss)
|
||||||
|
|
||||||
for k, bxs in tables.items():
|
for k, bxs in tables.items():
|
||||||
if not bxs:
|
if not bxs:
|
||||||
continue
|
continue
|
||||||
bxs = Recognizer.sort_Y_firstly(bxs, np.mean(
|
bxs = Recognizer.sort_Y_firstly(bxs, np.mean(
|
||||||
[(b["bottom"] - b["top"]) / 2 for b in bxs]))
|
[(b["bottom"] - b["top"]) / 2 for b in bxs]))
|
||||||
|
|
||||||
poss = []
|
poss = []
|
||||||
|
|
||||||
res.append((cropout(bxs, "table", poss),
|
res.append((cropout(bxs, "table", poss),
|
||||||
self.tbl_det.construct_table(bxs, html=return_html, is_english=self.is_english)))
|
self.tbl_det.construct_table(bxs, html=return_html, is_english=self.is_english)))
|
||||||
positions.append(poss)
|
positions.append(poss)
|
||||||
|
|
||||||
assert len(positions) == len(res)
|
if separate_tables_figures:
|
||||||
|
assert len(positions) + len(figure_positions) == len(res) + len(figure_results)
|
||||||
if need_position:
|
if need_position:
|
||||||
return list(zip(res, positions))
|
return list(zip(res, positions)), list(zip(figure_results, figure_positions))
|
||||||
return res
|
else:
|
||||||
|
return res, figure_results
|
||||||
|
else:
|
||||||
|
assert len(positions) == len(res)
|
||||||
|
if need_position:
|
||||||
|
return list(zip(res, positions))
|
||||||
|
else:
|
||||||
|
return res
|
||||||
|
|
||||||
def proj_match(self, line):
|
def proj_match(self, line):
|
||||||
if len(line) <= 2:
|
if len(line) <= 2:
|
||||||
@ -935,9 +982,12 @@ class RAGFlowPdfParser:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def total_page_number(fnm, binary=None):
|
def total_page_number(fnm, binary=None):
|
||||||
try:
|
try:
|
||||||
pdf = pdfplumber.open(
|
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||||
fnm) if not binary else pdfplumber.open(BytesIO(binary))
|
pdf = pdfplumber.open(
|
||||||
return len(pdf.pages)
|
fnm) if not binary else pdfplumber.open(BytesIO(binary))
|
||||||
|
total_page = len(pdf.pages)
|
||||||
|
pdf.close()
|
||||||
|
return total_page
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.exception("total_page_number")
|
logging.exception("total_page_number")
|
||||||
|
|
||||||
@ -951,59 +1001,59 @@ class RAGFlowPdfParser:
|
|||||||
self.page_cum_height = [0]
|
self.page_cum_height = [0]
|
||||||
self.page_layout = []
|
self.page_layout = []
|
||||||
self.page_from = page_from
|
self.page_from = page_from
|
||||||
|
start = timer()
|
||||||
try:
|
try:
|
||||||
self.pdf = pdfplumber.open(fnm) if isinstance(
|
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||||
fnm, str) else pdfplumber.open(BytesIO(fnm))
|
with (pdfplumber.open(fnm) if isinstance(fnm, str) else pdfplumber.open(BytesIO(fnm))) as pdf:
|
||||||
self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
self.pdf = pdf
|
||||||
enumerate(self.pdf.pages[page_from:page_to])]
|
self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
||||||
try:
|
enumerate(self.pdf.pages[page_from:page_to])]
|
||||||
self.page_chars = [[{**c, 'top': c['top'], 'bottom': c['bottom']} for c in page.dedupe_chars().chars if self._has_color(c)] for page in self.pdf.pages[page_from:page_to]]
|
|
||||||
except Exception as e:
|
try:
|
||||||
logging.warning(f"Failed to extract characters for pages {page_from}-{page_to}: {str(e)}")
|
self.page_chars = [[c for c in page.dedupe_chars().chars if self._has_color(c)] for page in self.pdf.pages[page_from:page_to]]
|
||||||
self.page_chars = [[] for _ in range(page_to - page_from)] # If failed to extract, using empty list instead.
|
except Exception as e:
|
||||||
|
logging.warning(f"Failed to extract characters for pages {page_from}-{page_to}: {str(e)}")
|
||||||
self.total_page = len(self.pdf.pages)
|
self.page_chars = [[] for _ in range(page_to - page_from)] # If failed to extract, using empty list instead.
|
||||||
|
|
||||||
|
self.total_page = len(self.pdf.pages)
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.exception("RAGFlowPdfParser __images__")
|
logging.exception("RAGFlowPdfParser __images__")
|
||||||
|
logging.info(f"__images__ dedupe_chars cost {timer() - start}s")
|
||||||
|
|
||||||
self.outlines = []
|
self.outlines = []
|
||||||
try:
|
try:
|
||||||
self.pdf = pdf2_read(fnm if isinstance(fnm, str) else BytesIO(fnm))
|
with (pdf2_read(fnm if isinstance(fnm, str)
|
||||||
outlines = self.pdf.outline
|
else BytesIO(fnm))) as pdf:
|
||||||
|
self.pdf = pdf
|
||||||
|
|
||||||
def dfs(arr, depth):
|
outlines = self.pdf.outline
|
||||||
for a in arr:
|
def dfs(arr, depth):
|
||||||
if isinstance(a, dict):
|
for a in arr:
|
||||||
self.outlines.append((a["/Title"], depth))
|
if isinstance(a, dict):
|
||||||
continue
|
self.outlines.append((a["/Title"], depth))
|
||||||
dfs(a, depth + 1)
|
continue
|
||||||
|
dfs(a, depth + 1)
|
||||||
|
|
||||||
|
dfs(outlines, 0)
|
||||||
|
|
||||||
dfs(outlines, 0)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.warning(f"Outlines exception: {e}")
|
logging.warning(f"Outlines exception: {e}")
|
||||||
|
|
||||||
if not self.outlines:
|
if not self.outlines:
|
||||||
logging.warning("Miss outlines")
|
logging.warning("Miss outlines")
|
||||||
|
|
||||||
logging.debug("Images converted.")
|
logging.debug("Images converted.")
|
||||||
self.is_english = [re.search(r"[a-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join(
|
self.is_english = [re.search(r"[a-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join(
|
||||||
random.choices([c["text"] for c in self.page_chars[i]], k=min(100, len(self.page_chars[i]))))) for i in
|
random.choices([c["text"] for c in self.page_chars[i]], k=min(100, len(self.page_chars[i]))))) for i in
|
||||||
range(len(self.page_chars))]
|
range(len(self.page_chars))]
|
||||||
if sum([1 if e else 0 for e in self.is_english]) > len(
|
if sum([1 if e else 0 for e in self.is_english]) > len(
|
||||||
self.page_images) / 2:
|
self.page_images) / 2:
|
||||||
self.is_english = True
|
self.is_english = True
|
||||||
else:
|
else:
|
||||||
self.is_english = False
|
self.is_english = False
|
||||||
|
|
||||||
# st = timer()
|
async def __img_ocr(i, id, img, chars, limiter):
|
||||||
for i, img in enumerate(self.page_images):
|
|
||||||
chars = self.page_chars[i] if not self.is_english else []
|
|
||||||
self.mean_height.append(
|
|
||||||
np.median(sorted([c["height"] for c in chars])) if chars else 0
|
|
||||||
)
|
|
||||||
self.mean_width.append(
|
|
||||||
np.median(sorted([c["width"] for c in chars])) if chars else 8
|
|
||||||
)
|
|
||||||
self.page_cum_height.append(img.size[1] / zoomin)
|
|
||||||
j = 0
|
j = 0
|
||||||
while j + 1 < len(chars):
|
while j + 1 < len(chars):
|
||||||
if chars[j]["text"] and chars[j + 1]["text"] \
|
if chars[j]["text"] and chars[j + 1]["text"] \
|
||||||
@ -1013,10 +1063,45 @@ class RAGFlowPdfParser:
|
|||||||
chars[j]["text"] += " "
|
chars[j]["text"] += " "
|
||||||
j += 1
|
j += 1
|
||||||
|
|
||||||
self.__ocr(i + 1, img, chars, zoomin)
|
if limiter:
|
||||||
|
async with limiter:
|
||||||
|
await trio.to_thread.run_sync(lambda: self.__ocr(i + 1, img, chars, zoomin, id))
|
||||||
|
else:
|
||||||
|
self.__ocr(i + 1, img, chars, zoomin, id)
|
||||||
|
|
||||||
if callback and i % 6 == 5:
|
if callback and i % 6 == 5:
|
||||||
callback(prog=(i + 1) * 0.6 / len(self.page_images), msg="")
|
callback(prog=(i + 1) * 0.6 / len(self.page_images), msg="")
|
||||||
# print("OCR:", timer()-st)
|
|
||||||
|
async def __img_ocr_launcher():
|
||||||
|
def __ocr_preprocess():
|
||||||
|
chars = self.page_chars[i] if not self.is_english else []
|
||||||
|
self.mean_height.append(
|
||||||
|
np.median(sorted([c["height"] for c in chars])) if chars else 0
|
||||||
|
)
|
||||||
|
self.mean_width.append(
|
||||||
|
np.median(sorted([c["width"] for c in chars])) if chars else 8
|
||||||
|
)
|
||||||
|
self.page_cum_height.append(img.size[1] / zoomin)
|
||||||
|
return chars
|
||||||
|
|
||||||
|
if self.parallel_limiter:
|
||||||
|
async with trio.open_nursery() as nursery:
|
||||||
|
for i, img in enumerate(self.page_images):
|
||||||
|
chars = __ocr_preprocess()
|
||||||
|
|
||||||
|
nursery.start_soon(__img_ocr, i, i % PARALLEL_DEVICES, img, chars,
|
||||||
|
self.parallel_limiter[i % PARALLEL_DEVICES])
|
||||||
|
await trio.sleep(0.1)
|
||||||
|
else:
|
||||||
|
for i, img in enumerate(self.page_images):
|
||||||
|
chars = __ocr_preprocess()
|
||||||
|
await __img_ocr(i, 0, img, chars, None)
|
||||||
|
|
||||||
|
start = timer()
|
||||||
|
|
||||||
|
trio.run(__img_ocr_launcher)
|
||||||
|
|
||||||
|
logging.info(f"__images__ {len(self.page_images)} pages cost {timer() - start}s")
|
||||||
|
|
||||||
if not self.is_english and not any(
|
if not self.is_english and not any(
|
||||||
[c for c in self.page_chars]) and self.boxes:
|
[c for c in self.page_chars]) and self.boxes:
|
||||||
@ -1080,7 +1165,7 @@ class RAGFlowPdfParser:
|
|||||||
self.page_images[pns[0]].crop((left * ZM, top * ZM,
|
self.page_images[pns[0]].crop((left * ZM, top * ZM,
|
||||||
right *
|
right *
|
||||||
ZM, min(
|
ZM, min(
|
||||||
bottom, self.page_images[pns[0]].size[1])
|
bottom, self.page_images[pns[0]].size[1])
|
||||||
))
|
))
|
||||||
)
|
)
|
||||||
if 0 < ii < len(poss) - 1:
|
if 0 < ii < len(poss) - 1:
|
||||||
@ -1142,7 +1227,7 @@ class RAGFlowPdfParser:
|
|||||||
return poss
|
return poss
|
||||||
|
|
||||||
|
|
||||||
class PlainParser(object):
|
class PlainParser:
|
||||||
def __call__(self, filename, from_page=0, to_page=100000, **kwargs):
|
def __call__(self, filename, from_page=0, to_page=100000, **kwargs):
|
||||||
self.outlines = []
|
self.outlines = []
|
||||||
lines = []
|
lines = []
|
||||||
@ -1178,5 +1263,52 @@ class PlainParser(object):
|
|||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class VisionParser(RAGFlowPdfParser):
|
||||||
|
def __init__(self, vision_model, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.vision_model = vision_model
|
||||||
|
|
||||||
|
def __images__(self, fnm, zoomin=3, page_from=0, page_to=299, callback=None):
|
||||||
|
try:
|
||||||
|
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||||
|
self.pdf = pdfplumber.open(fnm) if isinstance(
|
||||||
|
fnm, str) else pdfplumber.open(BytesIO(fnm))
|
||||||
|
self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
||||||
|
enumerate(self.pdf.pages[page_from:page_to])]
|
||||||
|
self.total_page = len(self.pdf.pages)
|
||||||
|
except Exception:
|
||||||
|
self.page_images = None
|
||||||
|
self.total_page = 0
|
||||||
|
logging.exception("VisionParser __images__")
|
||||||
|
|
||||||
|
def __call__(self, filename, from_page=0, to_page=100000, **kwargs):
|
||||||
|
callback = kwargs.get("callback", lambda prog, msg: None)
|
||||||
|
|
||||||
|
self.__images__(fnm=filename, zoomin=3, page_from=from_page, page_to=to_page, **kwargs)
|
||||||
|
|
||||||
|
total_pdf_pages = self.total_page
|
||||||
|
|
||||||
|
start_page = max(0, from_page)
|
||||||
|
end_page = min(to_page, total_pdf_pages)
|
||||||
|
|
||||||
|
all_docs = []
|
||||||
|
|
||||||
|
for idx, img_binary in enumerate(self.page_images or []):
|
||||||
|
pdf_page_num = idx # 0-based
|
||||||
|
if pdf_page_num < start_page or pdf_page_num >= end_page:
|
||||||
|
continue
|
||||||
|
|
||||||
|
docs = picture_vision_llm_chunk(
|
||||||
|
binary=img_binary,
|
||||||
|
vision_model=self.vision_model,
|
||||||
|
prompt=vision_llm_describe_prompt(page=pdf_page_num+1),
|
||||||
|
callback=callback,
|
||||||
|
)
|
||||||
|
|
||||||
|
if docs:
|
||||||
|
all_docs.append(docs)
|
||||||
|
return [(doc, "") for doc in all_docs], []
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
pass
|
pass
|
||||||
|
|||||||
@ -19,29 +19,60 @@ from io import BytesIO
|
|||||||
from pptx import Presentation
|
from pptx import Presentation
|
||||||
|
|
||||||
|
|
||||||
class RAGFlowPptParser(object):
|
class RAGFlowPptParser:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
def __get_bulleted_text(self, paragraph):
|
||||||
|
is_bulleted = bool(paragraph._p.xpath("./a:pPr/a:buChar")) or bool(paragraph._p.xpath("./a:pPr/a:buAutoNum")) or bool(paragraph._p.xpath("./a:pPr/a:buBlip"))
|
||||||
|
if is_bulleted:
|
||||||
|
return f"{' '* paragraph.level}.{paragraph.text}"
|
||||||
|
else:
|
||||||
|
return paragraph.text
|
||||||
|
|
||||||
def __extract(self, shape):
|
def __extract(self, shape):
|
||||||
if shape.shape_type == 19:
|
try:
|
||||||
tb = shape.table
|
# First try to get text content
|
||||||
rows = []
|
if hasattr(shape, 'has_text_frame') and shape.has_text_frame:
|
||||||
for i in range(1, len(tb.rows)):
|
text_frame = shape.text_frame
|
||||||
rows.append("; ".join([tb.cell(
|
texts = []
|
||||||
0, j).text + ": " + tb.cell(i, j).text for j in range(len(tb.columns)) if tb.cell(i, j)]))
|
for paragraph in text_frame.paragraphs:
|
||||||
return "\n".join(rows)
|
if paragraph.text.strip():
|
||||||
|
texts.append(self.__get_bulleted_text(paragraph))
|
||||||
|
return "\n".join(texts)
|
||||||
|
|
||||||
if shape.has_text_frame:
|
# Safely get shape_type
|
||||||
return shape.text_frame.text
|
try:
|
||||||
|
shape_type = shape.shape_type
|
||||||
|
except NotImplementedError:
|
||||||
|
# If shape_type is not available, try to get text content
|
||||||
|
if hasattr(shape, 'text'):
|
||||||
|
return shape.text.strip()
|
||||||
|
return ""
|
||||||
|
|
||||||
if shape.shape_type == 6:
|
# Handle table
|
||||||
texts = []
|
if shape_type == 19:
|
||||||
for p in sorted(shape.shapes, key=lambda x: (x.top // 10, x.left)):
|
tb = shape.table
|
||||||
t = self.__extract(p)
|
rows = []
|
||||||
if t:
|
for i in range(1, len(tb.rows)):
|
||||||
texts.append(t)
|
rows.append("; ".join([tb.cell(
|
||||||
return "\n".join(texts)
|
0, j).text + ": " + tb.cell(i, j).text for j in range(len(tb.columns)) if tb.cell(i, j)]))
|
||||||
|
return "\n".join(rows)
|
||||||
|
|
||||||
|
# Handle group shape
|
||||||
|
if shape_type == 6:
|
||||||
|
texts = []
|
||||||
|
for p in sorted(shape.shapes, key=lambda x: (x.top // 10, x.left)):
|
||||||
|
t = self.__extract_texts(p)
|
||||||
|
if t:
|
||||||
|
texts.append(t)
|
||||||
|
return "\n".join(texts)
|
||||||
|
|
||||||
|
return ""
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error processing shape: {str(e)}")
|
||||||
|
return ""
|
||||||
|
|
||||||
def __call__(self, fnm, from_page, to_page, callback=None):
|
def __call__(self, fnm, from_page, to_page, callback=None):
|
||||||
ppt = Presentation(fnm) if isinstance(
|
ppt = Presentation(fnm) if isinstance(
|
||||||
@ -65,4 +96,4 @@ class RAGFlowPptParser(object):
|
|||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
txts.append("\n".join(texts))
|
txts.append("\n".join(texts))
|
||||||
|
|
||||||
return txts
|
return txts
|
||||||
@ -30,10 +30,10 @@ GOODS = pd.read_csv(
|
|||||||
GOODS["cid"] = GOODS["cid"].astype(str)
|
GOODS["cid"] = GOODS["cid"].astype(str)
|
||||||
GOODS = GOODS.set_index(["cid"])
|
GOODS = GOODS.set_index(["cid"])
|
||||||
CORP_TKS = json.load(
|
CORP_TKS = json.load(
|
||||||
open(os.path.join(current_file_path, "res/corp.tks.freq.json"), "r")
|
open(os.path.join(current_file_path, "res/corp.tks.freq.json"), "r",encoding="utf-8")
|
||||||
)
|
)
|
||||||
GOOD_CORP = json.load(open(os.path.join(current_file_path, "res/good_corp.json"), "r"))
|
GOOD_CORP = json.load(open(os.path.join(current_file_path, "res/good_corp.json"), "r",encoding="utf-8"))
|
||||||
CORP_TAG = json.load(open(os.path.join(current_file_path, "res/corp_tag.json"), "r"))
|
CORP_TAG = json.load(open(os.path.join(current_file_path, "res/corp_tag.json"), "r",encoding="utf-8"))
|
||||||
|
|
||||||
|
|
||||||
def baike(cid, default_v=0):
|
def baike(cid, default_v=0):
|
||||||
|
|||||||
@ -25,7 +25,7 @@ TBL = pd.read_csv(
|
|||||||
os.path.join(current_file_path, "res/schools.csv"), sep="\t", header=0
|
os.path.join(current_file_path, "res/schools.csv"), sep="\t", header=0
|
||||||
).fillna("")
|
).fillna("")
|
||||||
TBL["name_en"] = TBL["name_en"].map(lambda x: x.lower().strip())
|
TBL["name_en"] = TBL["name_en"].map(lambda x: x.lower().strip())
|
||||||
GOOD_SCH = json.load(open(os.path.join(current_file_path, "res/good_sch.json"), "r"))
|
GOOD_SCH = json.load(open(os.path.join(current_file_path, "res/good_sch.json"), "r",encoding="utf-8"))
|
||||||
GOOD_SCH = set([re.sub(r"[,. &()()]+", "", c) for c in GOOD_SCH])
|
GOOD_SCH = set([re.sub(r"[,. &()()]+", "", c) for c in GOOD_SCH])
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -31,6 +31,7 @@ class RAGFlowTxtParser:
|
|||||||
raise TypeError("txt type should be str!")
|
raise TypeError("txt type should be str!")
|
||||||
cks = [""]
|
cks = [""]
|
||||||
tk_nums = [0]
|
tk_nums = [0]
|
||||||
|
delimiter = delimiter.encode('utf-8').decode('unicode_escape').encode('latin1').decode('utf-8')
|
||||||
|
|
||||||
def add_chunk(t):
|
def add_chunk(t):
|
||||||
nonlocal cks, tk_nums, delimiter
|
nonlocal cks, tk_nums, delimiter
|
||||||
@ -51,11 +52,13 @@ class RAGFlowTxtParser:
|
|||||||
s = t
|
s = t
|
||||||
if s < len(delimiter):
|
if s < len(delimiter):
|
||||||
dels.extend(list(delimiter[s:]))
|
dels.extend(list(delimiter[s:]))
|
||||||
dels = [re.escape(d) for d in delimiter if d]
|
dels = [re.escape(d) for d in dels if d]
|
||||||
dels = [d for d in dels if d]
|
dels = [d for d in dels if d]
|
||||||
dels = "|".join(dels)
|
dels = "|".join(dels)
|
||||||
secs = re.split(r"(%s)" % dels, txt)
|
secs = re.split(r"(%s)" % dels, txt)
|
||||||
for sec in secs:
|
for sec in secs:
|
||||||
|
if re.match(f"^{dels}$", sec):
|
||||||
|
continue
|
||||||
add_chunk(sec)
|
add_chunk(sec)
|
||||||
|
|
||||||
return [[c, ""] for c in cks]
|
return [[c, ""] for c in cks]
|
||||||
|
|||||||
@ -14,7 +14,8 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
import io
|
import io
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
import pdfplumber
|
import pdfplumber
|
||||||
|
|
||||||
from .ocr import OCR
|
from .ocr import OCR
|
||||||
@ -23,6 +24,11 @@ from .layout_recognizer import LayoutRecognizer4YOLOv10 as LayoutRecognizer
|
|||||||
from .table_structure_recognizer import TableStructureRecognizer
|
from .table_structure_recognizer import TableStructureRecognizer
|
||||||
|
|
||||||
|
|
||||||
|
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
|
||||||
|
if LOCK_KEY_pdfplumber not in sys.modules:
|
||||||
|
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
def init_in_out(args):
|
def init_in_out(args):
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
import os
|
import os
|
||||||
@ -36,12 +42,14 @@ def init_in_out(args):
|
|||||||
|
|
||||||
def pdf_pages(fnm, zoomin=3):
|
def pdf_pages(fnm, zoomin=3):
|
||||||
nonlocal outputs, images
|
nonlocal outputs, images
|
||||||
pdf = pdfplumber.open(fnm)
|
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||||
images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
pdf = pdfplumber.open(fnm)
|
||||||
enumerate(pdf.pages)]
|
images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
||||||
|
enumerate(pdf.pages)]
|
||||||
|
|
||||||
for i, page in enumerate(images):
|
for i, page in enumerate(images):
|
||||||
outputs.append(os.path.split(fnm)[-1] + f"_{i}.jpg")
|
outputs.append(os.path.split(fnm)[-1] + f"_{i}.jpg")
|
||||||
|
pdf.close()
|
||||||
|
|
||||||
def images_and_outputs(fnm):
|
def images_and_outputs(fnm):
|
||||||
nonlocal outputs, images
|
nonlocal outputs, images
|
||||||
|
|||||||
@ -46,8 +46,8 @@ class LayoutRecognizer(Recognizer):
|
|||||||
def __init__(self, domain):
|
def __init__(self, domain):
|
||||||
try:
|
try:
|
||||||
model_dir = os.path.join(
|
model_dir = os.path.join(
|
||||||
get_project_base_directory(),
|
get_project_base_directory(),
|
||||||
"rag/res/deepdoc")
|
"rag/res/deepdoc")
|
||||||
super().__init__(self.labels, domain, model_dir)
|
super().__init__(self.labels, domain, model_dir)
|
||||||
except Exception:
|
except Exception:
|
||||||
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
|
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
|
||||||
@ -56,18 +56,23 @@ class LayoutRecognizer(Recognizer):
|
|||||||
super().__init__(self.labels, domain, model_dir)
|
super().__init__(self.labels, domain, model_dir)
|
||||||
|
|
||||||
self.garbage_layouts = ["footer", "header", "reference"]
|
self.garbage_layouts = ["footer", "header", "reference"]
|
||||||
|
self.client = None
|
||||||
|
if os.environ.get("TENSORRT_DLA_SVR"):
|
||||||
|
from deepdoc.vision.dla_cli import DLAClient
|
||||||
|
self.client = DLAClient(os.environ["TENSORRT_DLA_SVR"])
|
||||||
|
|
||||||
def __call__(self, image_list, ocr_res, scale_factor=3,
|
def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16, drop=True):
|
||||||
thr=0.2, batch_size=16, drop=True):
|
|
||||||
def __is_garbage(b):
|
def __is_garbage(b):
|
||||||
patt = [r"^•+$", r"(版权归©|免责条款|地址[::])", r"\.{3,}", "^[0-9]{1,2} / ?[0-9]{1,2}$",
|
patt = [r"^•+$", "^[0-9]{1,2} / ?[0-9]{1,2}$",
|
||||||
r"^[0-9]{1,2} of [0-9]{1,2}$", "^http://[^ ]{12,}",
|
r"^[0-9]{1,2} of [0-9]{1,2}$", "^http://[^ ]{12,}",
|
||||||
"(资料|数据)来源[::]", "[0-9a-z._-]+@[a-z0-9-]+\\.[a-z]{2,3}",
|
|
||||||
"\\(cid *: *[0-9]+ *\\)"
|
"\\(cid *: *[0-9]+ *\\)"
|
||||||
]
|
]
|
||||||
return any([re.search(p, b["text"]) for p in patt])
|
return any([re.search(p, b["text"]) for p in patt])
|
||||||
|
|
||||||
layouts = super().__call__(image_list, thr, batch_size)
|
if self.client:
|
||||||
|
layouts = self.client.predict(image_list)
|
||||||
|
else:
|
||||||
|
layouts = super().__call__(image_list, thr, batch_size)
|
||||||
# save_results(image_list, layouts, self.labels, output_dir='output/', threshold=0.7)
|
# save_results(image_list, layouts, self.labels, output_dir='output/', threshold=0.7)
|
||||||
assert len(image_list) == len(ocr_res)
|
assert len(image_list) == len(ocr_res)
|
||||||
# Tag layout type
|
# Tag layout type
|
||||||
@ -160,6 +165,7 @@ class LayoutRecognizer(Recognizer):
|
|||||||
def forward(self, image_list, thr=0.7, batch_size=16):
|
def forward(self, image_list, thr=0.7, batch_size=16):
|
||||||
return super().__call__(image_list, thr, batch_size)
|
return super().__call__(image_list, thr, batch_size)
|
||||||
|
|
||||||
|
|
||||||
class LayoutRecognizer4YOLOv10(LayoutRecognizer):
|
class LayoutRecognizer4YOLOv10(LayoutRecognizer):
|
||||||
labels = [
|
labels = [
|
||||||
"title",
|
"title",
|
||||||
@ -185,9 +191,9 @@ class LayoutRecognizer4YOLOv10(LayoutRecognizer):
|
|||||||
|
|
||||||
def preprocess(self, image_list):
|
def preprocess(self, image_list):
|
||||||
inputs = []
|
inputs = []
|
||||||
new_shape = self.input_shape # height, width
|
new_shape = self.input_shape # height, width
|
||||||
for img in image_list:
|
for img in image_list:
|
||||||
shape = img.shape[:2]# current shape [height, width]
|
shape = img.shape[:2] # current shape [height, width]
|
||||||
# Scale ratio (new / old)
|
# Scale ratio (new / old)
|
||||||
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
||||||
# Compute padding
|
# Compute padding
|
||||||
@ -242,4 +248,3 @@ class LayoutRecognizer4YOLOv10(LayoutRecognizer):
|
|||||||
"bbox": [float(t) for t in boxes[i].tolist()],
|
"bbox": [float(t) for t in boxes[i].tolist()],
|
||||||
"score": float(scores[i])
|
"score": float(scores[i])
|
||||||
} for i in indices]
|
} for i in indices]
|
||||||
|
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import os
|
|||||||
from huggingface_hub import snapshot_download
|
from huggingface_hub import snapshot_download
|
||||||
|
|
||||||
from api.utils.file_utils import get_project_base_directory
|
from api.utils.file_utils import get_project_base_directory
|
||||||
|
from rag.settings import PARALLEL_DEVICES
|
||||||
from .operators import * # noqa: F403
|
from .operators import * # noqa: F403
|
||||||
from . import operators
|
from . import operators
|
||||||
import math
|
import math
|
||||||
@ -31,6 +32,7 @@ import onnxruntime as ort
|
|||||||
|
|
||||||
from .postprocess import build_post_process
|
from .postprocess import build_post_process
|
||||||
|
|
||||||
|
loaded_models = {}
|
||||||
|
|
||||||
def transform(data, ops=None):
|
def transform(data, ops=None):
|
||||||
""" transform """
|
""" transform """
|
||||||
@ -65,8 +67,16 @@ def create_operators(op_param_list, global_config=None):
|
|||||||
return ops
|
return ops
|
||||||
|
|
||||||
|
|
||||||
def load_model(model_dir, nm):
|
def load_model(model_dir, nm, device_id: int | None = None):
|
||||||
model_file_path = os.path.join(model_dir, nm + ".onnx")
|
model_file_path = os.path.join(model_dir, nm + ".onnx")
|
||||||
|
model_cached_tag = model_file_path + str(device_id) if device_id is not None else model_file_path
|
||||||
|
|
||||||
|
global loaded_models
|
||||||
|
loaded_model = loaded_models.get(model_cached_tag)
|
||||||
|
if loaded_model:
|
||||||
|
logging.info(f"load_model {model_file_path} reuses cached model")
|
||||||
|
return loaded_model
|
||||||
|
|
||||||
if not os.path.exists(model_file_path):
|
if not os.path.exists(model_file_path):
|
||||||
raise ValueError("not find model file path {}".format(
|
raise ValueError("not find model file path {}".format(
|
||||||
model_file_path))
|
model_file_path))
|
||||||
@ -74,7 +84,7 @@ def load_model(model_dir, nm):
|
|||||||
def cuda_is_available():
|
def cuda_is_available():
|
||||||
try:
|
try:
|
||||||
import torch
|
import torch
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and torch.cuda.device_count() > device_id:
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
@ -91,7 +101,7 @@ def load_model(model_dir, nm):
|
|||||||
run_options = ort.RunOptions()
|
run_options = ort.RunOptions()
|
||||||
if cuda_is_available():
|
if cuda_is_available():
|
||||||
cuda_provider_options = {
|
cuda_provider_options = {
|
||||||
"device_id": 0, # Use specific GPU
|
"device_id": device_id, # Use specific GPU
|
||||||
"gpu_mem_limit": 512 * 1024 * 1024, # Limit gpu memory
|
"gpu_mem_limit": 512 * 1024 * 1024, # Limit gpu memory
|
||||||
"arena_extend_strategy": "kNextPowerOfTwo", # gpu memory allocation strategy
|
"arena_extend_strategy": "kNextPowerOfTwo", # gpu memory allocation strategy
|
||||||
}
|
}
|
||||||
@ -101,20 +111,22 @@ def load_model(model_dir, nm):
|
|||||||
providers=['CUDAExecutionProvider'],
|
providers=['CUDAExecutionProvider'],
|
||||||
provider_options=[cuda_provider_options]
|
provider_options=[cuda_provider_options]
|
||||||
)
|
)
|
||||||
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "gpu:0")
|
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "gpu:" + str(device_id))
|
||||||
logging.info(f"TextRecognizer {nm} uses GPU")
|
logging.info(f"load_model {model_file_path} uses GPU")
|
||||||
else:
|
else:
|
||||||
sess = ort.InferenceSession(
|
sess = ort.InferenceSession(
|
||||||
model_file_path,
|
model_file_path,
|
||||||
options=options,
|
options=options,
|
||||||
providers=['CPUExecutionProvider'])
|
providers=['CPUExecutionProvider'])
|
||||||
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "cpu")
|
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "cpu")
|
||||||
logging.info(f"TextRecognizer {nm} uses CPU")
|
logging.info(f"load_model {model_file_path} uses CPU")
|
||||||
return sess, sess.get_inputs()[0], run_options
|
loaded_model = (sess, run_options)
|
||||||
|
loaded_models[model_cached_tag] = loaded_model
|
||||||
|
return loaded_model
|
||||||
|
|
||||||
|
|
||||||
class TextRecognizer(object):
|
class TextRecognizer:
|
||||||
def __init__(self, model_dir):
|
def __init__(self, model_dir, device_id: int | None = None):
|
||||||
self.rec_image_shape = [int(v) for v in "3, 48, 320".split(",")]
|
self.rec_image_shape = [int(v) for v in "3, 48, 320".split(",")]
|
||||||
self.rec_batch_num = 16
|
self.rec_batch_num = 16
|
||||||
postprocess_params = {
|
postprocess_params = {
|
||||||
@ -123,7 +135,8 @@ class TextRecognizer(object):
|
|||||||
"use_space_char": True
|
"use_space_char": True
|
||||||
}
|
}
|
||||||
self.postprocess_op = build_post_process(postprocess_params)
|
self.postprocess_op = build_post_process(postprocess_params)
|
||||||
self.predictor, self.input_tensor, self.run_options = load_model(model_dir, 'rec')
|
self.predictor, self.run_options = load_model(model_dir, 'rec', device_id)
|
||||||
|
self.input_tensor = self.predictor.get_inputs()[0]
|
||||||
|
|
||||||
def resize_norm_img(self, img, max_wh_ratio):
|
def resize_norm_img(self, img, max_wh_ratio):
|
||||||
imgC, imgH, imgW = self.rec_image_shape
|
imgC, imgH, imgW = self.rec_image_shape
|
||||||
@ -383,8 +396,8 @@ class TextRecognizer(object):
|
|||||||
return rec_res, time.time() - st
|
return rec_res, time.time() - st
|
||||||
|
|
||||||
|
|
||||||
class TextDetector(object):
|
class TextDetector:
|
||||||
def __init__(self, model_dir):
|
def __init__(self, model_dir, device_id: int | None = None):
|
||||||
pre_process_list = [{
|
pre_process_list = [{
|
||||||
'DetResizeForTest': {
|
'DetResizeForTest': {
|
||||||
'limit_side_len': 960,
|
'limit_side_len': 960,
|
||||||
@ -408,7 +421,8 @@ class TextDetector(object):
|
|||||||
"unclip_ratio": 1.5, "use_dilation": False, "score_mode": "fast", "box_type": "quad"}
|
"unclip_ratio": 1.5, "use_dilation": False, "score_mode": "fast", "box_type": "quad"}
|
||||||
|
|
||||||
self.postprocess_op = build_post_process(postprocess_params)
|
self.postprocess_op = build_post_process(postprocess_params)
|
||||||
self.predictor, self.input_tensor, self.run_options = load_model(model_dir, 'det')
|
self.predictor, self.run_options = load_model(model_dir, 'det', device_id)
|
||||||
|
self.input_tensor = self.predictor.get_inputs()[0]
|
||||||
|
|
||||||
img_h, img_w = self.input_tensor.shape[2:]
|
img_h, img_w = self.input_tensor.shape[2:]
|
||||||
if isinstance(img_h, str) or isinstance(img_w, str):
|
if isinstance(img_h, str) or isinstance(img_w, str):
|
||||||
@ -495,7 +509,7 @@ class TextDetector(object):
|
|||||||
return dt_boxes, time.time() - st
|
return dt_boxes, time.time() - st
|
||||||
|
|
||||||
|
|
||||||
class OCR(object):
|
class OCR:
|
||||||
def __init__(self, model_dir=None):
|
def __init__(self, model_dir=None):
|
||||||
"""
|
"""
|
||||||
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
||||||
@ -513,14 +527,33 @@ class OCR(object):
|
|||||||
model_dir = os.path.join(
|
model_dir = os.path.join(
|
||||||
get_project_base_directory(),
|
get_project_base_directory(),
|
||||||
"rag/res/deepdoc")
|
"rag/res/deepdoc")
|
||||||
self.text_detector = TextDetector(model_dir)
|
|
||||||
self.text_recognizer = TextRecognizer(model_dir)
|
# Append muti-gpus task to the list
|
||||||
|
if PARALLEL_DEVICES is not None and PARALLEL_DEVICES > 0:
|
||||||
|
self.text_detector = []
|
||||||
|
self.text_recognizer = []
|
||||||
|
for device_id in range(PARALLEL_DEVICES):
|
||||||
|
self.text_detector.append(TextDetector(model_dir, device_id))
|
||||||
|
self.text_recognizer.append(TextRecognizer(model_dir, device_id))
|
||||||
|
else:
|
||||||
|
self.text_detector = [TextDetector(model_dir, 0)]
|
||||||
|
self.text_recognizer = [TextRecognizer(model_dir, 0)]
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
|
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc",
|
||||||
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
|
local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
|
||||||
local_dir_use_symlinks=False)
|
local_dir_use_symlinks=False)
|
||||||
self.text_detector = TextDetector(model_dir)
|
|
||||||
self.text_recognizer = TextRecognizer(model_dir)
|
if PARALLEL_DEVICES is not None:
|
||||||
|
assert PARALLEL_DEVICES > 0, "Number of devices must be >= 1"
|
||||||
|
self.text_detector = []
|
||||||
|
self.text_recognizer = []
|
||||||
|
for device_id in range(PARALLEL_DEVICES):
|
||||||
|
self.text_detector.append(TextDetector(model_dir, device_id))
|
||||||
|
self.text_recognizer.append(TextRecognizer(model_dir, device_id))
|
||||||
|
else:
|
||||||
|
self.text_detector = [TextDetector(model_dir, 0)]
|
||||||
|
self.text_recognizer = [TextRecognizer(model_dir, 0)]
|
||||||
|
|
||||||
self.drop_score = 0.5
|
self.drop_score = 0.5
|
||||||
self.crop_image_res_index = 0
|
self.crop_image_res_index = 0
|
||||||
@ -582,14 +615,17 @@ class OCR(object):
|
|||||||
break
|
break
|
||||||
return _boxes
|
return _boxes
|
||||||
|
|
||||||
def detect(self, img):
|
def detect(self, img, device_id: int | None = None):
|
||||||
|
if device_id is None:
|
||||||
|
device_id = 0
|
||||||
|
|
||||||
time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
|
time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
|
||||||
|
|
||||||
if img is None:
|
if img is None:
|
||||||
return None, None, time_dict
|
return None, None, time_dict
|
||||||
|
|
||||||
start = time.time()
|
start = time.time()
|
||||||
dt_boxes, elapse = self.text_detector(img)
|
dt_boxes, elapse = self.text_detector[device_id](img)
|
||||||
time_dict['det'] = elapse
|
time_dict['det'] = elapse
|
||||||
|
|
||||||
if dt_boxes is None:
|
if dt_boxes is None:
|
||||||
@ -600,24 +636,41 @@ class OCR(object):
|
|||||||
return zip(self.sorted_boxes(dt_boxes), [
|
return zip(self.sorted_boxes(dt_boxes), [
|
||||||
("", 0) for _ in range(len(dt_boxes))])
|
("", 0) for _ in range(len(dt_boxes))])
|
||||||
|
|
||||||
def recognize(self, ori_im, box):
|
def recognize(self, ori_im, box, device_id: int | None = None):
|
||||||
|
if device_id is None:
|
||||||
|
device_id = 0
|
||||||
|
|
||||||
img_crop = self.get_rotate_crop_image(ori_im, box)
|
img_crop = self.get_rotate_crop_image(ori_im, box)
|
||||||
|
|
||||||
rec_res, elapse = self.text_recognizer([img_crop])
|
rec_res, elapse = self.text_recognizer[device_id]([img_crop])
|
||||||
text, score = rec_res[0]
|
text, score = rec_res[0]
|
||||||
if score < self.drop_score:
|
if score < self.drop_score:
|
||||||
return ""
|
return ""
|
||||||
return text
|
return text
|
||||||
|
|
||||||
def __call__(self, img, cls=True):
|
def recognize_batch(self, img_list, device_id: int | None = None):
|
||||||
|
if device_id is None:
|
||||||
|
device_id = 0
|
||||||
|
rec_res, elapse = self.text_recognizer[device_id](img_list)
|
||||||
|
texts = []
|
||||||
|
for i in range(len(rec_res)):
|
||||||
|
text, score = rec_res[i]
|
||||||
|
if score < self.drop_score:
|
||||||
|
text = ""
|
||||||
|
texts.append(text)
|
||||||
|
return texts
|
||||||
|
|
||||||
|
def __call__(self, img, device_id = 0, cls=True):
|
||||||
time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
|
time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
|
||||||
|
if device_id is None:
|
||||||
|
device_id = 0
|
||||||
|
|
||||||
if img is None:
|
if img is None:
|
||||||
return None, None, time_dict
|
return None, None, time_dict
|
||||||
|
|
||||||
start = time.time()
|
start = time.time()
|
||||||
ori_im = img.copy()
|
ori_im = img.copy()
|
||||||
dt_boxes, elapse = self.text_detector(img)
|
dt_boxes, elapse = self.text_detector[device_id](img)
|
||||||
time_dict['det'] = elapse
|
time_dict['det'] = elapse
|
||||||
|
|
||||||
if dt_boxes is None:
|
if dt_boxes is None:
|
||||||
@ -634,7 +687,7 @@ class OCR(object):
|
|||||||
img_crop = self.get_rotate_crop_image(ori_im, tmp_box)
|
img_crop = self.get_rotate_crop_image(ori_im, tmp_box)
|
||||||
img_crop_list.append(img_crop)
|
img_crop_list.append(img_crop)
|
||||||
|
|
||||||
rec_res, elapse = self.text_recognizer(img_crop_list)
|
rec_res, elapse = self.text_recognizer[device_id](img_crop_list)
|
||||||
|
|
||||||
time_dict['rec'] = elapse
|
time_dict['rec'] = elapse
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -23,7 +23,7 @@ import math
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
|
|
||||||
class DecodeImage(object):
|
class DecodeImage:
|
||||||
""" decode image """
|
""" decode image """
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
@ -65,7 +65,7 @@ class DecodeImage(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class StandardizeImage(object):
|
class StandardizeImag:
|
||||||
"""normalize image
|
"""normalize image
|
||||||
Args:
|
Args:
|
||||||
mean (list): im - mean
|
mean (list): im - mean
|
||||||
@ -102,7 +102,7 @@ class StandardizeImage(object):
|
|||||||
return im, im_info
|
return im, im_info
|
||||||
|
|
||||||
|
|
||||||
class NormalizeImage(object):
|
class NormalizeImage:
|
||||||
""" normalize image such as subtract mean, divide std
|
""" normalize image such as subtract mean, divide std
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ class NormalizeImage(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class ToCHWImage(object):
|
class ToCHWImage:
|
||||||
""" convert hwc image to chw image
|
""" convert hwc image to chw image
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -145,19 +145,7 @@ class ToCHWImage(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class Fasttext(object):
|
class KeepKeys:
|
||||||
def __init__(self, path="None", **kwargs):
|
|
||||||
import fasttext
|
|
||||||
self.fast_model = fasttext.load_model(path)
|
|
||||||
|
|
||||||
def __call__(self, data):
|
|
||||||
label = data['label']
|
|
||||||
fast_label = self.fast_model[label]
|
|
||||||
data['fast_label'] = fast_label
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
class KeepKeys(object):
|
|
||||||
def __init__(self, keep_keys, **kwargs):
|
def __init__(self, keep_keys, **kwargs):
|
||||||
self.keep_keys = keep_keys
|
self.keep_keys = keep_keys
|
||||||
|
|
||||||
@ -168,7 +156,7 @@ class KeepKeys(object):
|
|||||||
return data_list
|
return data_list
|
||||||
|
|
||||||
|
|
||||||
class Pad(object):
|
class Pad:
|
||||||
def __init__(self, size=None, size_div=32, **kwargs):
|
def __init__(self, size=None, size_div=32, **kwargs):
|
||||||
if size is not None and not isinstance(size, (int, list, tuple)):
|
if size is not None and not isinstance(size, (int, list, tuple)):
|
||||||
raise TypeError("Type of target_size is invalid. Now is {}".format(
|
raise TypeError("Type of target_size is invalid. Now is {}".format(
|
||||||
@ -206,7 +194,7 @@ class Pad(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class LinearResize(object):
|
class LinearResize:
|
||||||
"""resize image by target_size and max_size
|
"""resize image by target_size and max_size
|
||||||
Args:
|
Args:
|
||||||
target_size (int): the target size of image
|
target_size (int): the target size of image
|
||||||
@ -273,7 +261,7 @@ class LinearResize(object):
|
|||||||
return im_scale_y, im_scale_x
|
return im_scale_y, im_scale_x
|
||||||
|
|
||||||
|
|
||||||
class Resize(object):
|
class Resize:
|
||||||
def __init__(self, size=(640, 640), **kwargs):
|
def __init__(self, size=(640, 640), **kwargs):
|
||||||
self.size = size
|
self.size = size
|
||||||
|
|
||||||
@ -303,7 +291,7 @@ class Resize(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class DetResizeForTest(object):
|
class DetResizeForTest:
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super(DetResizeForTest, self).__init__()
|
super(DetResizeForTest, self).__init__()
|
||||||
self.resize_type = 0
|
self.resize_type = 0
|
||||||
@ -433,7 +421,7 @@ class DetResizeForTest(object):
|
|||||||
return img, [ratio_h, ratio_w]
|
return img, [ratio_h, ratio_w]
|
||||||
|
|
||||||
|
|
||||||
class E2EResizeForTest(object):
|
class E2EResizeForTest:
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super(E2EResizeForTest, self).__init__()
|
super(E2EResizeForTest, self).__init__()
|
||||||
self.max_side_len = kwargs['max_side_len']
|
self.max_side_len = kwargs['max_side_len']
|
||||||
@ -501,7 +489,7 @@ class E2EResizeForTest(object):
|
|||||||
return im, (ratio_h, ratio_w)
|
return im, (ratio_h, ratio_w)
|
||||||
|
|
||||||
|
|
||||||
class KieResize(object):
|
class KieResize:
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super(KieResize, self).__init__()
|
super(KieResize, self).__init__()
|
||||||
self.max_side, self.min_side = kwargs['img_scale'][0], kwargs[
|
self.max_side, self.min_side = kwargs['img_scale'][0], kwargs[
|
||||||
@ -551,7 +539,7 @@ class KieResize(object):
|
|||||||
return points
|
return points
|
||||||
|
|
||||||
|
|
||||||
class SRResize(object):
|
class SRResize:
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
imgH=32,
|
imgH=32,
|
||||||
imgW=128,
|
imgW=128,
|
||||||
@ -588,7 +576,7 @@ class SRResize(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class ResizeNormalize(object):
|
class ResizeNormalize:
|
||||||
def __init__(self, size, interpolation=Image.BICUBIC):
|
def __init__(self, size, interpolation=Image.BICUBIC):
|
||||||
self.size = size
|
self.size = size
|
||||||
self.interpolation = interpolation
|
self.interpolation = interpolation
|
||||||
@ -600,7 +588,7 @@ class ResizeNormalize(object):
|
|||||||
return img_numpy
|
return img_numpy
|
||||||
|
|
||||||
|
|
||||||
class GrayImageChannelFormat(object):
|
class GrayImageChannelFormat:
|
||||||
"""
|
"""
|
||||||
format gray scale image's channel: (3,h,w) -> (1,h,w)
|
format gray scale image's channel: (3,h,w) -> (1,h,w)
|
||||||
Args:
|
Args:
|
||||||
@ -624,7 +612,7 @@ class GrayImageChannelFormat(object):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class Permute(object):
|
class Permute:
|
||||||
"""permute image
|
"""permute image
|
||||||
Args:
|
Args:
|
||||||
to_bgr (bool): whether convert RGB to BGR
|
to_bgr (bool): whether convert RGB to BGR
|
||||||
@ -647,7 +635,7 @@ class Permute(object):
|
|||||||
return im, im_info
|
return im, im_info
|
||||||
|
|
||||||
|
|
||||||
class PadStride(object):
|
class PadStride:
|
||||||
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
|
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
|
||||||
Args:
|
Args:
|
||||||
stride (bool): model with FPN need image shape % stride == 0
|
stride (bool): model with FPN need image shape % stride == 0
|
||||||
|
|||||||
@ -38,7 +38,7 @@ def build_post_process(config, global_config=None):
|
|||||||
return module_class(**config)
|
return module_class(**config)
|
||||||
|
|
||||||
|
|
||||||
class DBPostProcess(object):
|
class DBPostProcess:
|
||||||
"""
|
"""
|
||||||
The post process for Differentiable Binarization (DB).
|
The post process for Differentiable Binarization (DB).
|
||||||
"""
|
"""
|
||||||
@ -259,7 +259,7 @@ class DBPostProcess(object):
|
|||||||
return boxes_batch
|
return boxes_batch
|
||||||
|
|
||||||
|
|
||||||
class BaseRecLabelDecode(object):
|
class BaseRecLabelDecode:
|
||||||
""" Convert between text-label and text-index """
|
""" Convert between text-label and text-index """
|
||||||
|
|
||||||
def __init__(self, character_dict_path=None, use_space_char=False):
|
def __init__(self, character_dict_path=None, use_space_char=False):
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user