mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
727 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d44739283c | |||
| 9c953a67a6 | |||
| bd3fa317e7 | |||
| 715e2b48ca | |||
| 90d18143ba | |||
| 4b6809b32d | |||
| 7b96146d3f | |||
| 21c55a2e0f | |||
| 8e965040ce | |||
| 780ee2b2be | |||
| 6f9cd96ec5 | |||
| 47e244ee9f | |||
| df11fe75d3 | |||
| bf0d516e49 | |||
| b18da35da6 | |||
| 8ba1e6c183 | |||
| d4f84f0b54 | |||
| 6ec6ca6971 | |||
| 1163e9e409 | |||
| 15736c57c3 | |||
| fa817a8ab3 | |||
| 8b99635eb3 | |||
| 1919780880 | |||
| 82f5d901c8 | |||
| dc4d4342cd | |||
| e05658685c | |||
| b29539b442 | |||
| b1a46d5adc | |||
| 50c510d16b | |||
| 8a84d1048c | |||
| 2ad852d8df | |||
| ca39f5204d | |||
| 5b0e38060a | |||
| 66938e0b68 | |||
| 64c6cc4cf3 | |||
| 3418984848 | |||
| 3c79990934 | |||
| da3f279495 | |||
| b1bbb9e210 | |||
| 0e3e129a83 | |||
| c87b58511e | |||
| 8d61dcc8ab | |||
| 06b29d7da4 | |||
| 5229a76f68 | |||
| 4f9504305a | |||
| 27153dde85 | |||
| 9fc7174612 | |||
| 8fb8374dfc | |||
| ff35c140dc | |||
| df9b7b2fe9 | |||
| 48f3f49e80 | |||
| 94d7af00b8 | |||
| 251ba7f058 | |||
| 28296955f1 | |||
| 1b2fc3cc9a | |||
| b8da2eeb69 | |||
| 5f62f0c9d7 | |||
| a54843cc65 | |||
| 4326873af6 | |||
| a64f4539e7 | |||
| ec68ab1c8c | |||
| e5041749a2 | |||
| 78b2e0be89 | |||
| b6aded378d | |||
| 11e3f5e8b2 | |||
| f65c3ae62b | |||
| 02c955babb | |||
| ca04ae9540 | |||
| b0c21b00d9 | |||
| 47684fa17c | |||
| 148a7e7002 | |||
| 76e8285904 | |||
| 555c70672e | |||
| 850e218051 | |||
| fb4b5b0a06 | |||
| f256e1a59a | |||
| 9816b868f9 | |||
| 6e828f0fcb | |||
| 4d6484b03e | |||
| afe9269534 | |||
| 688cb8f19d | |||
| f6dd2cd1af | |||
| 69dc14f5d6 | |||
| 202acbd628 | |||
| a283fefd18 | |||
| d9bbaf5d6c | |||
| 1075b975c5 | |||
| c813c1ff4c | |||
| abac2ca2c5 | |||
| 64e9702a26 | |||
| 76cb4cd174 | |||
| 65d7c19979 | |||
| b67697b6f2 | |||
| 131f272e69 | |||
| 03d1265cfd | |||
| c190086707 | |||
| 5d89a8010b | |||
| 7a81fa00e9 | |||
| 606ed0c8ab | |||
| 8b1a4365ed | |||
| 8a2542157f | |||
| d6836444c9 | |||
| 3b30799b7e | |||
| e61da33672 | |||
| 6a71314d70 | |||
| 06e0c7d1a9 | |||
| 7600ebd263 | |||
| 21943ce0e2 | |||
| aa313e112a | |||
| 2c7428e2ee | |||
| 014f2ef900 | |||
| b418ce5643 | |||
| fe1c48178e | |||
| 35f13e882e | |||
| 85924e898e | |||
| 622b72db4b | |||
| a0a7b46cff | |||
| 37aacb3960 | |||
| 79bc9d97c9 | |||
| f150687dbc | |||
| b2a5482d2c | |||
| 5fdfb8d465 | |||
| 8b2c04abc4 | |||
| 83d0949498 | |||
| 244cf49ba4 | |||
| 651422127c | |||
| 11de7599e5 | |||
| 7a6e70d6b3 | |||
| 230865c4f7 | |||
| 4c9a3e918f | |||
| 5beb022ee1 | |||
| 170abf9b7f | |||
| afaa7144a5 | |||
| eaa1adb3b2 | |||
| fa76974e24 | |||
| f372bd8809 | |||
| 0284248c93 | |||
| d9dd1171a3 | |||
| fefea3a2a5 | |||
| 0e920a91dd | |||
| 63e3398f49 | |||
| cdcaae17c6 | |||
| 96e9d50060 | |||
| 5cab6c4ccb | |||
| b3b341173f | |||
| a9e4695b74 | |||
| 4f40f685d9 | |||
| ffb4cda475 | |||
| 5859a3df72 | |||
| 5c6a7cb4b8 | |||
| 4e2afcd3b8 | |||
| 11e6d84d46 | |||
| 53b9e7b52f | |||
| e5e9ca0015 | |||
| 150ab9c6a4 | |||
| f789463982 | |||
| 955801db2e | |||
| 93b2e80eb8 | |||
| 1a41b92f77 | |||
| 58a8f1f1b0 | |||
| daddfc9e1b | |||
| ecf5f6976f | |||
| e2448fb6dd | |||
| 9c9f2dbe3f | |||
| b3d579e2c1 | |||
| eb72d598b1 | |||
| 033a4cf21e | |||
| fda9b58ab7 | |||
| ca865df87f | |||
| f9f75aa119 | |||
| db42d0e0ae | |||
| df3d0f61bd | |||
| c6bc69cbc5 | |||
| 8c9df482ab | |||
| 1137b04154 | |||
| ec96426c00 | |||
| 4d22daefa7 | |||
| bcc92e04c9 | |||
| 9aa222f738 | |||
| 605cfdb8dc | |||
| 041d72b755 | |||
| 569e40544d | |||
| 3d605a23fe | |||
| 4f2816c01c | |||
| a0b461a18e | |||
| 7ce675030b | |||
| 217caecfda | |||
| ef8847eda7 | |||
| d78010c376 | |||
| 3444cb15e3 | |||
| 0151d42156 | |||
| 392f28882f | |||
| cdb3e6434a | |||
| bf5f6ec262 | |||
| 1a755e75c5 | |||
| 46ff897107 | |||
| f5d63bb7df | |||
| c54ec09519 | |||
| 7b3d700d5f | |||
| 744ff55c62 | |||
| c326f14fed | |||
| 07ddb8fcff | |||
| 84bcd8b3bc | |||
| f52970b038 | |||
| 39b96849a9 | |||
| f298e55ded | |||
| ed943b1b5b | |||
| 0c6d787f92 | |||
| a4f9aa2172 | |||
| c432ce6be5 | |||
| c5b32b2211 | |||
| 24efa86f26 | |||
| 38e551cc3d | |||
| ef95f08c48 | |||
| 3ced290eb5 | |||
| fab0f07379 | |||
| 8525f55ad0 | |||
| e6c024f8bf | |||
| c28bc41a96 | |||
| 29a59ed7e2 | |||
| f8b80f3f93 | |||
| 189007e44d | |||
| 3cffadc7a2 | |||
| 18e43831bc | |||
| 3356de55ed | |||
| 375e727f9a | |||
| a2b8ba472f | |||
| 00c7ddbc9b | |||
| 3e0bc9e36b | |||
| d6ba4bd255 | |||
| 84b4b38cbb | |||
| 4694604836 | |||
| 224c5472c8 | |||
| 409310aae9 | |||
| 9ff825f39d | |||
| 7b5d831296 | |||
| 42ee209084 | |||
| e4096fbc33 | |||
| 3aa5c2a699 | |||
| 2ddf278e2d | |||
| f46448d04c | |||
| ab17606e79 | |||
| 7c90b87715 | |||
| d2929e432e | |||
| 88daa349f9 | |||
| f29da49893 | |||
| 194e8ea696 | |||
| 810f997276 | |||
| 6daae7f226 | |||
| f9fe6ac642 | |||
| b4ad565df6 | |||
| 754d5ea364 | |||
| 26add87c3d | |||
| 986062a604 | |||
| 29ceeba95f | |||
| 849d9eb463 | |||
| dce7053c24 | |||
| 042f4c90c6 | |||
| c1583a3e1d | |||
| 17fa2e9e8e | |||
| ff237f2dbc | |||
| 50c99599f2 | |||
| 891ee85fa6 | |||
| a03f5dd9f6 | |||
| 415c4b7ed5 | |||
| d599707154 | |||
| 7f06712a30 | |||
| b08bb56f6c | |||
| 9bcccadebd | |||
| 1287558f24 | |||
| 6b389e01b5 | |||
| 8fcca1b958 | |||
| a1cf792245 | |||
| 978b580dcf | |||
| d197f33646 | |||
| 521d25d4e6 | |||
| ca1648052a | |||
| f34b913bd8 | |||
| 0d3ed37b48 | |||
| bc68f18c48 | |||
| 6e42687e65 | |||
| e4bd879686 | |||
| 78982d88e0 | |||
| fa5c7edab4 | |||
| 6fa34d5532 | |||
| 9e5427dc6e | |||
| a357190eff | |||
| bfcc2abe47 | |||
| f64ae9dc33 | |||
| 5a51bdd824 | |||
| b48c85dcf9 | |||
| f374dd38b6 | |||
| ccb72e6787 | |||
| 55823dbdf6 | |||
| 588207d7c1 | |||
| 2aa0cdde8f | |||
| 44d798d8f0 | |||
| 4150805073 | |||
| 448fa1c4d4 | |||
| e786f596e2 | |||
| fe9e9a644f | |||
| d2961b2d25 | |||
| a73e1750b6 | |||
| c1d71e9a3f | |||
| 2a07eb69a7 | |||
| a3a70431f3 | |||
| 6f2c3a3c3c | |||
| 54803c4ef2 | |||
| efbaa484d7 | |||
| 3411d0a2ce | |||
| 283d036cba | |||
| 307717b045 | |||
| 8e74bc8e42 | |||
| 4b9c4c0705 | |||
| b2bb560007 | |||
| e1526846da | |||
| 7a7f98b1a9 | |||
| 036f37a627 | |||
| 191587346c | |||
| 50055c47ec | |||
| 6f30397bb5 | |||
| d970d0ef39 | |||
| ce8658aa84 | |||
| bc6a768b90 | |||
| 656a2fab41 | |||
| 47b28a27a6 | |||
| c354239b79 | |||
| b4303f6010 | |||
| 4776fa5e4e | |||
| c24137bd11 | |||
| 4011c8f68c | |||
| 2cb8edc42c | |||
| 284b4d4430 | |||
| 42f7261509 | |||
| f33415b751 | |||
| 530b0dab17 | |||
| c4b1c4e6f4 | |||
| 3c2c8942d5 | |||
| 71c132f76d | |||
| 9d717f0b6e | |||
| 8b49734241 | |||
| 898ae7fa80 | |||
| fa4277225d | |||
| 1bff6b7333 | |||
| e9ccba0395 | |||
| f1d9f4290e | |||
| 4230402fbb | |||
| e14d6ae441 | |||
| 55f2b7c4d5 | |||
| 07b3e55903 | |||
| 86892959a0 | |||
| bbc1d02c96 | |||
| b23a4a8fea | |||
| 240e7d7c22 | |||
| 52fa8bdcf3 | |||
| 13f04b7cca | |||
| c4b9e903c8 | |||
| 15f9406e7b | |||
| c5c0dd2da0 | |||
| dd0ebbea35 | |||
| 1a367664f1 | |||
| 336e5fb37f | |||
| 598e142c85 | |||
| cbc3c5297e | |||
| 4b82275ae5 | |||
| 3894de895b | |||
| 583050a876 | |||
| a2946b0fb0 | |||
| 21052b2972 | |||
| 5632613eb5 | |||
| fc35821f81 | |||
| db80376427 | |||
| 99430a7db7 | |||
| a3391c4d55 | |||
| e0f52eebc6 | |||
| 367babda2f | |||
| 2962284c79 | |||
| 75e1981e13 | |||
| 4f9f9405b8 | |||
| 938492cbae | |||
| f4d084bcf1 | |||
| 69984554a5 | |||
| 03d7a51d49 | |||
| 0efe7a544b | |||
| c0799c53b3 | |||
| 4946e43941 | |||
| 37235315e1 | |||
| 39be08c83d | |||
| 3805621564 | |||
| a75cda4957 | |||
| 961e8c4980 | |||
| 57b4e0c464 | |||
| c852a6dfbf | |||
| b4614e9517 | |||
| be5f830878 | |||
| 7944aacafa | |||
| e478586a8e | |||
| 713f38090b | |||
| 8f7ecde908 | |||
| 23ad459136 | |||
| f556f0239c | |||
| f318342c8e | |||
| d3c07794b5 | |||
| fd0bf3adf0 | |||
| c08382099a | |||
| d8346cb7a6 | |||
| 46c52d65b7 | |||
| e098fcf6ad | |||
| ecdb2a88bd | |||
| 2c7ba90cb4 | |||
| 95261f17f6 | |||
| 7d909d4d1b | |||
| 4dde73f897 | |||
| 93b30b2fb5 | |||
| 06c54367fa | |||
| 6acbd374d8 | |||
| 48bca0ca01 | |||
| 300d8ecf51 | |||
| dac54ded96 | |||
| c5da3cdd97 | |||
| f892d7d426 | |||
| f86d8906e7 | |||
| bc681e2ee9 | |||
| b6c71c1e01 | |||
| 7bebf4b7bf | |||
| d64df4de9c | |||
| af43cb04e8 | |||
| 3d66d78304 | |||
| b7ce4e7e62 | |||
| 5e64d79587 | |||
| 49cebd9fec | |||
| d9a4e4cc3b | |||
| ac89a2dda1 | |||
| 01a122dc9d | |||
| 8ec392adb0 | |||
| de822a108f | |||
| 2e40c2a6f6 | |||
| d088a34fe2 | |||
| 16e1681fa4 | |||
| bb24e5f739 | |||
| 1d93eb81ae | |||
| 439d20e41f | |||
| 45619702ff | |||
| b93c136797 | |||
| 983ec0666c | |||
| a9ba051582 | |||
| bad764bcda | |||
| 9c6cf12137 | |||
| 6288b6d02b | |||
| 52c20033d7 | |||
| 5dad15600c | |||
| 8674156d1c | |||
| 5083d92998 | |||
| 59a78408be | |||
| df22ead841 | |||
| 5883493c7d | |||
| 50f209204e | |||
| 564277736a | |||
| 061a22588a | |||
| 5071df9de1 | |||
| 419b546f03 | |||
| e5b1511c66 | |||
| 0e5124ec99 | |||
| 7c7b7d2689 | |||
| accd3a6c7e | |||
| 4ba4f622a5 | |||
| b52b0f68fc | |||
| d42e78bce2 | |||
| 8fb18f37f6 | |||
| f619d5a9b6 | |||
| d1971e988a | |||
| 54908ebd30 | |||
| 3ba2b8d80f | |||
| 713b837276 | |||
| 8feb4c1a99 | |||
| dd13a5d05c | |||
| 8cdf10148d | |||
| 7773afa561 | |||
| 798eb3647c | |||
| 2d17e5aa04 | |||
| c75aa11ae6 | |||
| c7818770f4 | |||
| 6f6303d017 | |||
| 146e8bb793 | |||
| f948c0d9f1 | |||
| c3e3f0fbb4 | |||
| a1a825c830 | |||
| a6f4153775 | |||
| 097aab09a2 | |||
| 600f435d27 | |||
| 722545e5e0 | |||
| 9fa73771ee | |||
| fe279754ac | |||
| 7e063283ba | |||
| 28eeb29b88 | |||
| 85511cb1fd | |||
| a3eeb5de32 | |||
| 1160b58b6e | |||
| 61790ebe15 | |||
| bc3288d390 | |||
| 4e5f92f01b | |||
| 7d8e0602aa | |||
| 03cbbf7784 | |||
| b7a7413419 | |||
| 321e9f3719 | |||
| 76cd23eecf | |||
| d030b4a680 | |||
| a9fd6066d2 | |||
| c373dba0bc | |||
| cf62230548 | |||
| 8d73cf6f02 | |||
| b635002666 | |||
| 4abc144d3d | |||
| a4bccc1ae7 | |||
| 8f070c3d56 | |||
| 31d67c850e | |||
| 2cbe064080 | |||
| cac7851fc5 | |||
| 96da618b6a | |||
| f13f503952 | |||
| cb45431412 | |||
| 85083ad400 | |||
| 35580af875 | |||
| a0dc9e1bdf | |||
| 6379a934ff | |||
| 10a62115c7 | |||
| e38e3bcc3b | |||
| 8dcf99611b | |||
| 213218a094 | |||
| 478da3118c | |||
| 101b8ff813 | |||
| d8fca43017 | |||
| b35e811fe7 | |||
| 7474348394 | |||
| 8939206531 | |||
| 57c99dd811 | |||
| 561eeabfa4 | |||
| 5fb9136251 | |||
| 044bb0334b | |||
| a5cf6fc546 | |||
| 57fe5d0864 | |||
| bfdc4944a3 | |||
| a45ba3a91e | |||
| e513ad2f16 | |||
| 1fdad50dac | |||
| 4764ca5ef7 | |||
| 85f3d92816 | |||
| 742eef028f | |||
| dfbdeaddaf | |||
| 50c2b9d562 | |||
| f8cef73244 | |||
| f8c9ec4d56 | |||
| db74a3ef34 | |||
| 00f99ecbd5 | |||
| 0a3c6fff7c | |||
| 79e435fc2e | |||
| 163c2a70fc | |||
| bedc09f69c | |||
| 251592eeeb | |||
| 09436f6c60 | |||
| e8b4e8b3d7 | |||
| 000cd6d615 | |||
| 1d65299791 | |||
| bcccaccc2b | |||
| fddac1345d | |||
| 4a95349492 | |||
| 0fcb564261 | |||
| 96667696d2 | |||
| ce1e855328 | |||
| b5e4a5563c | |||
| 1053ef5551 | |||
| cb6e9ce164 | |||
| 8ea631a2a0 | |||
| 7fb67c4f67 | |||
| 44ac87aef4 | |||
| 7ddccbb952 | |||
| 4a7bc4df92 | |||
| 3b7d182720 | |||
| 78527acd88 | |||
| e5c3083826 | |||
| 9b9039de92 | |||
| 9b2ef62aee | |||
| 86507af770 | |||
| 93635674c3 | |||
| 1defe0b19b | |||
| 0bca46ac3a | |||
| 1ecb687c51 | |||
| 68d46b2a1e | |||
| 7559bbd46d | |||
| 275b5d14f2 | |||
| 9ae81b42a3 | |||
| d6c74ff131 | |||
| e8d74108a5 | |||
| c8b1a564aa | |||
| 301f95837c | |||
| 835fd7abcd | |||
| bb8f97c9cd | |||
| 6d19294ddc | |||
| f61c276f74 | |||
| 409acf0d9f | |||
| 74c6b21f3b | |||
| beeacd3e3f | |||
| 95259af68f | |||
| 855455006b | |||
| b844ad6e06 | |||
| e0533f19e9 | |||
| 9a6d976252 | |||
| 3d76f10a91 | |||
| e9b8c30a38 | |||
| 601d74160b | |||
| fc4e644e5f | |||
| 03f00c9e6f | |||
| 87e46b4425 | |||
| d5a322a352 | |||
| 7d4f1c0645 | |||
| 927873bfa6 | |||
| 5fe0791684 | |||
| 3e134ac0ad | |||
| 7a6bf4326e | |||
| 41a0601735 | |||
| 60486ecde5 | |||
| 255f4ccffc | |||
| afe82feb57 | |||
| 044afa83d1 | |||
| 4b00be4173 | |||
| 215e9361ea | |||
| aaec630759 | |||
| 3d735dca87 | |||
| dcedfc5ec8 | |||
| 1254ecf445 | |||
| 0d68a6cd1b | |||
| e267a026f3 | |||
| 44d4686b20 | |||
| 95614175e6 | |||
| c817ff184b | |||
| f284578cea | |||
| e69e6b2274 | |||
| 8cdb805c0b | |||
| 885418f3b0 | |||
| b44321f9c3 | |||
| f54a8d7748 | |||
| 311a475b6f | |||
| 655b01a0a4 | |||
| d4ee082735 | |||
| 1f5a7c4b12 | |||
| dab58b9311 | |||
| e56a60b316 | |||
| f189452446 | |||
| f576c555e4 | |||
| d8eea624e2 | |||
| e64c7dfdf6 | |||
| c76e7b1e28 | |||
| 0d5486aa57 | |||
| 3a0e9f9263 | |||
| 1f0a153d0e | |||
| 8bdf1d98a3 | |||
| 8037dc7b76 | |||
| 56f473b680 | |||
| b502dc7399 | |||
| cfe23badb0 | |||
| 593ffc4067 | |||
| a88a1848ff | |||
| 5ae33184d5 | |||
| 78601ee1bd | |||
| 84afb4259c | |||
| 1b817a5b4c | |||
| 1b589609a4 | |||
| 289f4f1916 | |||
| cf37e2ef1a | |||
| 41e2dadea7 | |||
| f3318b2e49 | |||
| 3f3469130b | |||
| fc38afcec4 | |||
| efae7afd62 | |||
| 285bc58364 | |||
| 6657ca7cde | |||
| 87455d79e4 | |||
| 821fdf02b4 | |||
| 54980337e4 | |||
| 92ab7ef659 | |||
| 934dbc2e2b | |||
| 95da6de9e1 | |||
| ccdeeda9cc | |||
| 74b28ef1b0 | |||
| 7543047de3 | |||
| e66addc82d | |||
| 7b6a5ffaff | |||
| 19545282aa | |||
| 6a0583f5ad | |||
| ed7e46b6ca | |||
| 9654e64a0a | |||
| 8b650fc9ef | |||
| 69fb323581 | |||
| 9d093547e8 | |||
| c5f13629af | |||
| c4b6df350a | |||
| 976d112280 | |||
| 8fba5c4179 | |||
| d19f059f34 | |||
| deca6c1b72 | |||
| 3ee9ca749d | |||
| 7058ac0041 | |||
| a7efd3cac5 | |||
| 59a5813f1b | |||
| 08c1a5e1e8 | |||
| ea84cc2e33 | |||
| b5f643681f | |||
| 5497ea34b9 | |||
| e079656473 | |||
| d00297a763 | |||
| a19210daf1 | |||
| b2abc36baa | |||
| fadbe23bfe | |||
| ea8a59d0b0 | |||
| 381219aa41 | |||
| 0f08b0f053 | |||
| 0dafce31c4 | |||
| c93e0355c3 | |||
| 1e0fc76efa | |||
| d94386e00a | |||
| 0a62dd7a7e | |||
| 06a21d2031 | |||
| 9a3febb7c5 | |||
| 27cd765d6f | |||
| a0c0a957b4 | |||
| b89f7c69ad | |||
| fcdc6ad085 |
118
.github/workflows/release.yml
vendored
Normal file
118
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
name: release
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 13 * * *' # This schedule runs every 13:00:00Z(21:00:00+08:00)
|
||||
# The "create tags" trigger is specifically focused on the creation of new tags, while the "push tags" trigger is activated when tags are pushed, including both new tag creations and updates to existing tags.
|
||||
create:
|
||||
tags:
|
||||
- "v*.*.*" # normal release
|
||||
- "nightly" # the only one mutable tag
|
||||
|
||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: [ "self-hosted", "overseas" ]
|
||||
steps:
|
||||
- name: Ensure workspace ownership
|
||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
|
||||
# https://github.com/actions/checkout/blob/v3/README.md
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Prepare release body
|
||||
run: |
|
||||
if [[ $GITHUB_EVENT_NAME == 'create' ]]; then
|
||||
RELEASE_TAG=${GITHUB_REF#refs/tags/}
|
||||
if [[ $RELEASE_TAG == 'nightly' ]]; then
|
||||
PRERELEASE=true
|
||||
else
|
||||
PRERELEASE=false
|
||||
fi
|
||||
echo "Workflow triggered by create tag: $RELEASE_TAG"
|
||||
else
|
||||
RELEASE_TAG=nightly
|
||||
PRERELEASE=true
|
||||
echo "Workflow triggered by schedule"
|
||||
fi
|
||||
echo "RELEASE_TAG=$RELEASE_TAG" >> $GITHUB_ENV
|
||||
echo "PRERELEASE=$PRERELEASE" >> $GITHUB_ENV
|
||||
RELEASE_DATETIME=$(date --rfc-3339=seconds)
|
||||
echo Release $RELEASE_TAG created from $GITHUB_SHA at $RELEASE_DATETIME > release_body.md
|
||||
|
||||
- name: Move the existing mutable tag
|
||||
# https://github.com/softprops/action-gh-release/issues/171
|
||||
run: |
|
||||
git fetch --tags
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
# Determine if a given tag exists and matches a specific Git commit.
|
||||
# actions/checkout@v4 fetch-tags doesn't work when triggered by schedule
|
||||
if [ "$(git rev-parse -q --verify "refs/tags/$RELEASE_TAG")" = "$GITHUB_SHA" ]; then
|
||||
echo "mutable tag $RELEASE_TAG exists and matches $GITHUB_SHA"
|
||||
else
|
||||
git tag -f $RELEASE_TAG $GITHUB_SHA
|
||||
git push -f origin $RELEASE_TAG:refs/tags/$RELEASE_TAG
|
||||
echo "created/moved mutable tag $RELEASE_TAG to $GITHUB_SHA"
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Create or overwrite a release
|
||||
# https://github.com/actions/upload-release-asset has been replaced by https://github.com/softprops/action-gh-release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||
prerelease: ${{ env.PRERELEASE }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
# The body field does not support environment variable substitution directly.
|
||||
body_path: release_body.md
|
||||
|
||||
# https://github.com/marketplace/actions/docker-login
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: infiniflow
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||
- name: Build and push full image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
|
||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||
- name: Build and push slim image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim
|
||||
file: Dockerfile
|
||||
build-args: LIGHTEN=1
|
||||
platforms: linux/amd64
|
||||
|
||||
- name: Build ragflow-sdk
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
cd sdk/python && \
|
||||
uv build
|
||||
|
||||
- name: Publish package distributions to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: sdk/python/dist/
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
verbose: true
|
||||
63
.github/workflows/tests.yml
vendored
63
.github/workflows/tests.yml
vendored
@ -32,12 +32,9 @@ jobs:
|
||||
# https://github.com/hmarr/debug-action
|
||||
#- uses: hmarr/debug-action@v2
|
||||
|
||||
- name: Show PR labels
|
||||
- name: Show who triggered this workflow
|
||||
run: |
|
||||
echo "Workflow triggered by ${{ github.event_name }}"
|
||||
if [[ ${{ github.event_name }} == 'pull_request' ]]; then
|
||||
echo "PR labels: ${{ join(github.event.pull_request.labels.*.name, ', ') }}"
|
||||
fi
|
||||
|
||||
- name: Ensure workspace ownership
|
||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
@ -49,29 +46,36 @@ jobs:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Build ragflow:dev-slim
|
||||
# https://github.com/astral-sh/ruff-action
|
||||
- name: Static check with Ruff
|
||||
uses: astral-sh/ruff-action@v2
|
||||
with:
|
||||
version: ">=0.8.2"
|
||||
args: "check --ignore E402"
|
||||
|
||||
- name: Build ragflow:nightly-slim
|
||||
run: |
|
||||
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-$HOME}
|
||||
cp -r ${RUNNER_WORKSPACE_PREFIX}/huggingface.co ${RUNNER_WORKSPACE_PREFIX}/nltk_data ${RUNNER_WORKSPACE_PREFIX}/libssl*.deb ${RUNNER_WORKSPACE_PREFIX}/tika-server*.jar* ${RUNNER_WORKSPACE_PREFIX}/chrome* ${RUNNER_WORKSPACE_PREFIX}/cl100k_base.tiktoken .
|
||||
sudo docker pull ubuntu:22.04
|
||||
sudo docker build --progress=plain -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
sudo docker build --progress=plain --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
|
||||
- name: Build ragflow:dev
|
||||
- name: Build ragflow:nightly
|
||||
run: |
|
||||
sudo docker build --progress=plain -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
sudo docker build --progress=plain --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
- name: Start ragflow:dev-slim
|
||||
- name: Start ragflow:nightly-slim
|
||||
run: |
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Stop ragflow:dev-slim
|
||||
- name: Stop ragflow:nightly-slim
|
||||
if: always() # always run this step even if previous steps failed
|
||||
run: |
|
||||
sudo docker compose -f docker/docker-compose.yml down -v
|
||||
|
||||
- name: Start ragflow:dev
|
||||
- name: Start ragflow:nightly
|
||||
run: |
|
||||
echo "RAGFLOW_IMAGE=infiniflow/ragflow:dev" >> docker/.env
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
- name: Run sdk tests against Elasticsearch
|
||||
@ -82,7 +86,7 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
|
||||
- name: Run frontend api tests against Elasticsearch
|
||||
run: |
|
||||
@ -92,15 +96,24 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Elasticsearch
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short -m "not slow"
|
||||
|
||||
- name: Stop ragflow:dev
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
run: |
|
||||
sudo docker compose -f docker/docker-compose.yml down -v
|
||||
|
||||
- name: Start ragflow:dev
|
||||
- name: Start ragflow:nightly
|
||||
run: |
|
||||
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
@ -112,7 +125,7 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
|
||||
- name: Run frontend api tests against Infinity
|
||||
run: |
|
||||
@ -122,9 +135,19 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && poetry install && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Stop ragflow:dev
|
||||
- name: Run http api tests against Infinity
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_http_api && pytest -s --tb=short -m "not slow"
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
run: |
|
||||
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml down -v
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@ -35,4 +35,9 @@ rag/res/deepdoc
|
||||
sdk/python/ragflow.egg-info/
|
||||
sdk/python/build/
|
||||
sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
huggingface.co/
|
||||
nltk_data/
|
||||
|
||||
# Exclude hash-like temporary files like 9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
*[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]*
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# Contribution guidelines
|
||||
|
||||
This document offers guidlines and major considerations for submitting your contributions to RAGFlow.
|
||||
This document offers guidelines and major considerations for submitting your contributions to RAGFlow.
|
||||
|
||||
- To report a bug, file a [GitHub issue](https://github.com/infiniflow/ragflow/issues/new/choose) with us.
|
||||
- For further questions, you can explore existing discussions or initiate a new one in [Discussions](https://github.com/orgs/infiniflow/discussions).
|
||||
|
||||
285
Dockerfile
285
Dockerfile
@ -3,55 +3,140 @@ FROM ubuntu:22.04 AS base
|
||||
USER root
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
ENV LIGHTEN=0
|
||||
ARG NEED_MIRROR=0
|
||||
ARG LIGHTEN=0
|
||||
ENV LIGHTEN=${LIGHTEN}
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
||||
|
||||
# Setup apt mirror site
|
||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && DEBIAN_FRONTEND=noninteractive apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus default-jdk python3-pip pipx \
|
||||
libatk-bridge2.0-0 libgtk-4-1 libnss3 xdg-utils unzip libgbm-dev wget git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && pip3 config set global.trusted-host "pypi.tuna.tsinghua.edu.cn mirrors.pku.edu.cn" && pip3 config set global.extra-index-url "https://mirrors.pku.edu.cn/pypi/web/simple" \
|
||||
&& pipx install poetry \
|
||||
&& /root/.local/bin/poetry self add poetry-plugin-pypi-mirror
|
||||
|
||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||
# aspose-slides on linux/arm64 is unavailable
|
||||
RUN --mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_amd64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb \
|
||||
--mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_arm64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||
# Copy models downloaded via download_deps.py
|
||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||
cp /huggingface.co/InfiniFlow/huqie/huqie.txt.trie /ragflow/rag/res/ && \
|
||||
tar --exclude='.*' -cf - \
|
||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||
/huggingface.co/InfiniFlow/deepdoc \
|
||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||
if [ "$LIGHTEN" != "1" ]; then \
|
||||
(tar -cf - \
|
||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
||||
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
||||
fi
|
||||
|
||||
# https://github.com/chrismattmann/tika-python
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||
cp -r /deps/nltk_data /root/ && \
|
||||
cp /deps/tika-server-standard-3.0.0.jar /deps/tika-server-standard-3.0.0.jar.md5 /ragflow/ && \
|
||||
cp /deps/cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard-3.0.0.jar"
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Setup apt
|
||||
# Python package and implicit dependencies:
|
||||
# opencv-python: libglib2.0-0 libglx-mesa0 libgl1
|
||||
# aspose-slides: pkg-config libicu-dev libgdiplus libssl1.1_1.1.1f-1ubuntu2_amd64.deb
|
||||
# python-pptx: default-jdk tika-server-standard-3.0.0.jar
|
||||
# selenium: libatk-bridge2.0-0 chrome-linux64-121-0-6167-85
|
||||
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||
fi; \
|
||||
rm -f /etc/apt/apt.conf.d/docker-clean && \
|
||||
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache && \
|
||||
chmod 1777 /tmp && \
|
||||
apt update && \
|
||||
apt --no-install-recommends install -y ca-certificates && \
|
||||
apt update && \
|
||||
apt install -y libglib2.0-0 libglx-mesa0 libgl1 && \
|
||||
apt install -y pkg-config libicu-dev libgdiplus && \
|
||||
apt install -y default-jdk && \
|
||||
apt install -y libatk-bridge2.0-0 && \
|
||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||
apt install -y libjemalloc-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||
|
||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||
pip3 config set global.trusted-host mirrors.aliyun.com; \
|
||||
mkdir -p /etc/uv && \
|
||||
echo "[[index]]" > /etc/uv/uv.toml && \
|
||||
echo 'url = "https://mirrors.aliyun.com/pypi/simple"' >> /etc/uv/uv.toml && \
|
||||
echo "default = true" >> /etc/uv/uv.toml; \
|
||||
fi; \
|
||||
pipx install uv
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||
ENV PATH=/root/.local/bin:$PATH
|
||||
# Configure Poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
|
||||
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt purge -y nodejs npm && \
|
||||
apt autoremove && \
|
||||
apt purge -y nodejs npm cargo && \
|
||||
apt autoremove -y && \
|
||||
apt update && \
|
||||
apt install -y nodejs cargo && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
apt install -y nodejs
|
||||
|
||||
# A modern version of cargo is needed for the latest version of the Rust compiler.
|
||||
RUN apt update && apt install -y curl build-essential \
|
||||
&& if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
# Use TUNA mirrors for rustup/rust dist files
|
||||
export RUSTUP_DIST_SERVER="https://mirrors.tuna.tsinghua.edu.cn/rustup"; \
|
||||
export RUSTUP_UPDATE_ROOT="https://mirrors.tuna.tsinghua.edu.cn/rustup/rustup"; \
|
||||
echo "Using TUNA mirrors for Rustup."; \
|
||||
fi; \
|
||||
# Force curl to use HTTP/1.1
|
||||
curl --proto '=https' --tlsv1.2 --http1.1 -sSf https://sh.rustup.rs | bash -s -- -y --profile minimal \
|
||||
&& echo 'export PATH="/root/.cargo/bin:${PATH}"' >> /root/.bashrc
|
||||
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
RUN cargo --version && rustc --version
|
||||
|
||||
# Add msssql ODBC driver
|
||||
# macOS ARM64 environment, install msodbcsql18.
|
||||
# general x86_64 environment, install msodbcsql17.
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - && \
|
||||
curl https://packages.microsoft.com/config/ubuntu/22.04/prod.list > /etc/apt/sources.list.d/mssql-release.list && \
|
||||
apt update && \
|
||||
arch="$(uname -m)"; \
|
||||
if [ "$arch" = "arm64" ] || [ "$arch" = "aarch64" ]; then \
|
||||
# ARM64 (macOS/Apple Silicon or Linux aarch64)
|
||||
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql18; \
|
||||
else \
|
||||
# x86_64 or others
|
||||
ACCEPT_EULA=Y apt install -y unixodbc-dev msodbcsql17; \
|
||||
fi || \
|
||||
{ echo "Failed to install ODBC driver"; exit 1; }
|
||||
|
||||
|
||||
|
||||
# Add dependencies of selenium
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||
unzip /chrome-linux64.zip && \
|
||||
mv chrome-linux64 /opt/chrome && \
|
||||
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||
mv chromedriver /usr/local/bin/ && \
|
||||
rm -f /usr/bin/google-chrome
|
||||
|
||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||
# aspose-slides on linux/arm64 is unavailable
|
||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
dpkg -i /deps/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
dpkg -i /deps/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||
fi
|
||||
|
||||
|
||||
# builder stage
|
||||
FROM base AS builder
|
||||
@ -59,17 +144,31 @@ USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
# install dependencies from uv.lock file
|
||||
COPY pyproject.toml uv.lock ./
|
||||
|
||||
# https://github.com/astral-sh/uv/issues/10462
|
||||
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
||||
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
sed -i 's|pypi.org|mirrors.aliyun.com/pypi|g' uv.lock; \
|
||||
else \
|
||||
sed -i 's|mirrors.aliyun.com/pypi|pypi.org|g' uv.lock; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
uv sync --python 3.10 --frozen; \
|
||||
else \
|
||||
uv sync --python 3.10 --frozen --all-extras; \
|
||||
fi
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm install && npm run build
|
||||
|
||||
COPY .git /ragflow/.git
|
||||
|
||||
RUN current_commit=$(git rev-parse --short HEAD); \
|
||||
last_tag=$(git describe --tags --abbrev=0); \
|
||||
commit_count=$(git rev-list --count "$last_tag..HEAD"); \
|
||||
version_info=""; \
|
||||
if [ "$commit_count" -eq 0 ]; then \
|
||||
version_info=$last_tag; \
|
||||
else \
|
||||
version_info="$current_commit($last_tag~$commit_count)"; \
|
||||
fi; \
|
||||
RUN version_info=$(git describe --tags --match=v* --first-parent --always); \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
version_info="$version_info slim"; \
|
||||
else \
|
||||
@ -78,84 +177,12 @@ RUN current_commit=$(git rev-parse --short HEAD); \
|
||||
echo "RAGFlow version: $version_info"; \
|
||||
echo $version_info > /ragflow/VERSION
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_builder_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm install --force && npm run build
|
||||
|
||||
# install dependencies from poetry.lock file
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_builder_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
poetry install --no-root; \
|
||||
else \
|
||||
poetry install --no-root --with=full; \
|
||||
fi
|
||||
|
||||
# production stage
|
||||
FROM base AS production
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||
|
||||
# Install python packages' dependencies
|
||||
# cv2 requires libGL.so.1
|
||||
RUN --mount=type=cache,id=ragflow_production_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt install -y --no-install-recommends nginx libgl1 vim less && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY web web
|
||||
COPY api api
|
||||
COPY conf conf
|
||||
COPY deepdoc deepdoc
|
||||
COPY rag rag
|
||||
COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
# Copy models downloaded via download_deps.py
|
||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
||||
tar --exclude='.*' -cf - \
|
||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||
/huggingface.co/InfiniFlow/deepdoc \
|
||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
||||
tar -cf - \
|
||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
||||
| tar -xf - --strip-components=2 -C /root/.ragflow
|
||||
|
||||
# Copy nltk data downloaded via download_deps.py
|
||||
COPY nltk_data /root/nltk_data
|
||||
|
||||
# https://github.com/chrismattmann/tika-python
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
COPY tika-server-standard-3.0.0.jar /ragflow/tika-server-standard.jar
|
||||
COPY tika-server-standard-3.0.0.jar.md5 /ragflow/tika-server-standard.jar.md5
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard.jar"
|
||||
|
||||
# Copy cl100k_base
|
||||
COPY cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
# Add dependencies of selenium
|
||||
RUN --mount=type=bind,source=chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||
unzip /chrome-linux64.zip && \
|
||||
mv chrome-linux64 /opt/chrome && \
|
||||
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||
RUN --mount=type=bind,source=chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||
mv chromedriver /usr/local/bin/ && \
|
||||
rm -f /usr/bin/google-chrome
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
# Copy Python environment and packages
|
||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
@ -163,8 +190,22 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
COPY web web
|
||||
COPY api api
|
||||
COPY conf conf
|
||||
COPY deepdoc deepdoc
|
||||
COPY rag rag
|
||||
COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY agentic_reasoning agentic_reasoning
|
||||
COPY pyproject.toml uv.lock ./
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh docker/entrypoint-parser.sh ./
|
||||
RUN chmod +x ./entrypoint*.sh
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
|
||||
10
Dockerfile.deps
Normal file
10
Dockerfile.deps
Normal file
@ -0,0 +1,10 @@
|
||||
# This builds an image that contains the resources needed by Dockerfile
|
||||
#
|
||||
FROM scratch
|
||||
|
||||
# Copy resources downloaded via download_deps.py
|
||||
COPY chromedriver-linux64-121-0-6167-85 chrome-linux64-121-0-6167-85 cl100k_base.tiktoken libssl1.1_1.1.1f-1ubuntu2_amd64.deb libssl1.1_1.1.1f-1ubuntu2_arm64.deb tika-server-standard-3.0.0.jar tika-server-standard-3.0.0.jar.md5 libssl*.deb /
|
||||
|
||||
COPY nltk_data /nltk_data
|
||||
|
||||
COPY huggingface.co /huggingface.co
|
||||
163
Dockerfile.slim
163
Dockerfile.slim
@ -1,163 +0,0 @@
|
||||
# base stage
|
||||
FROM ubuntu:22.04 AS base
|
||||
USER root
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
ENV LIGHTEN=1
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt-get --no-install-recommends install -y ca-certificates
|
||||
|
||||
# Setup apt mirror site
|
||||
RUN sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && DEBIAN_FRONTEND=noninteractive apt install -y curl libpython3-dev nginx libglib2.0-0 libglx-mesa0 pkg-config libicu-dev libgdiplus default-jdk python3-pip pipx \
|
||||
libatk-bridge2.0-0 libgtk-4-1 libnss3 xdg-utils unzip libgbm-dev wget git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && pip3 config set global.trusted-host "pypi.tuna.tsinghua.edu.cn mirrors.pku.edu.cn" && pip3 config set global.extra-index-url "https://mirrors.pku.edu.cn/pypi/web/simple" \
|
||||
&& pipx install poetry \
|
||||
&& /root/.local/bin/poetry self add poetry-plugin-pypi-mirror
|
||||
|
||||
# https://forum.aspose.com/t/aspose-slides-for-net-no-usable-version-of-libssl-found-with-linux-server/271344/13
|
||||
# aspose-slides on linux/arm64 is unavailable
|
||||
RUN --mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_amd64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb \
|
||||
--mount=type=bind,source=libssl1.1_1.1.1f-1ubuntu2_arm64.deb,target=/root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_amd64.deb; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
dpkg -i /root/libssl1.1_1.1.1f-1ubuntu2_arm64.deb; \
|
||||
fi
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 DOTNET_SYSTEM_GLOBALIZATION_INVARIANT=1
|
||||
ENV PATH=/root/.local/bin:$PATH
|
||||
# Configure Poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
ENV POETRY_VIRTUALENVS_CREATE=true
|
||||
ENV POETRY_REQUESTS_TIMEOUT=15
|
||||
ENV POETRY_PYPI_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple/
|
||||
|
||||
# nodejs 12.22 on Ubuntu 22.04 is too old
|
||||
RUN --mount=type=cache,id=ragflow_base_apt,target=/var/cache/apt,sharing=locked \
|
||||
curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt purge -y nodejs npm && \
|
||||
apt autoremove && \
|
||||
apt update && \
|
||||
apt install -y nodejs cargo && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# builder stage
|
||||
FROM base AS builder
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY .git /ragflow/.git
|
||||
|
||||
RUN current_commit=$(git rev-parse --short HEAD); \
|
||||
last_tag=$(git describe --tags --abbrev=0); \
|
||||
commit_count=$(git rev-list --count "$last_tag..HEAD"); \
|
||||
version_info=""; \
|
||||
if [ "$commit_count" -eq 0 ]; then \
|
||||
version_info=$last_tag; \
|
||||
else \
|
||||
version_info="$current_commit($last_tag~$commit_count)"; \
|
||||
fi; \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
version_info="$version_info slim"; \
|
||||
else \
|
||||
version_info="$version_info full"; \
|
||||
fi; \
|
||||
echo "RAGFlow version: $version_info"; \
|
||||
echo $version_info > /ragflow/VERSION
|
||||
|
||||
COPY web web
|
||||
COPY docs docs
|
||||
RUN --mount=type=cache,id=ragflow_builder_npm,target=/root/.npm,sharing=locked \
|
||||
cd web && npm install --force && npm run build
|
||||
|
||||
# install dependencies from poetry.lock file
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
RUN --mount=type=cache,id=ragflow_builder_poetry,target=/root/.cache/pypoetry,sharing=locked \
|
||||
if [ "$LIGHTEN" == "1" ]; then \
|
||||
poetry install --no-root; \
|
||||
else \
|
||||
poetry install --no-root --with=full; \
|
||||
fi
|
||||
|
||||
# production stage
|
||||
FROM base AS production
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY --from=builder /ragflow/VERSION /ragflow/VERSION
|
||||
|
||||
# Install python packages' dependencies
|
||||
# cv2 requires libGL.so.1
|
||||
RUN --mount=type=cache,id=ragflow_production_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt update && apt install -y --no-install-recommends nginx libgl1 vim less && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY web web
|
||||
COPY api api
|
||||
COPY conf conf
|
||||
COPY deepdoc deepdoc
|
||||
COPY rag rag
|
||||
COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY pyproject.toml poetry.toml poetry.lock ./
|
||||
|
||||
# Copy models downloaded via download_deps.py
|
||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||
RUN --mount=type=bind,source=huggingface.co,target=/huggingface.co \
|
||||
tar --exclude='.*' -cf - \
|
||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||
/huggingface.co/InfiniFlow/deepdoc \
|
||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||
|
||||
# Copy nltk data downloaded via download_deps.py
|
||||
COPY nltk_data /root/nltk_data
|
||||
|
||||
# https://github.com/chrismattmann/tika-python
|
||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||
COPY tika-server-standard-3.0.0.jar /ragflow/tika-server-standard.jar
|
||||
COPY tika-server-standard-3.0.0.jar.md5 /ragflow/tika-server-standard.jar.md5
|
||||
ENV TIKA_SERVER_JAR="file:///ragflow/tika-server-standard.jar"
|
||||
|
||||
# Copy cl100k_base
|
||||
COPY cl100k_base.tiktoken /ragflow/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
|
||||
# Add dependencies of selenium
|
||||
RUN --mount=type=bind,source=chrome-linux64-121-0-6167-85,target=/chrome-linux64.zip \
|
||||
unzip /chrome-linux64.zip && \
|
||||
mv chrome-linux64 /opt/chrome && \
|
||||
ln -s /opt/chrome/chrome /usr/local/bin/
|
||||
RUN --mount=type=bind,source=chromedriver-linux64-121-0-6167-85,target=/chromedriver-linux64.zip \
|
||||
unzip -j /chromedriver-linux64.zip chromedriver-linux64/chromedriver && \
|
||||
mv chromedriver /usr/local/bin/ && \
|
||||
rm -f /usr/bin/google-chrome
|
||||
|
||||
# Copy compiled web pages
|
||||
COPY --from=builder /ragflow/web/dist /ragflow/web/dist
|
||||
|
||||
# Copy Python environment and packages
|
||||
ENV VIRTUAL_ENV=/ragflow/.venv
|
||||
COPY --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
110
README.md
110
README.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -32,14 +34,14 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Table of Contents</b></summary>
|
||||
<summary><b>📕 Table of Contents</b></summary>
|
||||
|
||||
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
@ -68,6 +70,7 @@ data.
|
||||
## 🎮 Demo
|
||||
|
||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
@ -75,16 +78,20 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Latest Updates
|
||||
|
||||
- 2025-02-28 Combined with Internet search (Tavily), supports reasoning like Deep Research for any LLMs.
|
||||
- 2025-02-05 Updates the model list of 'SILICONFLOW' and adds support for Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
||||
- 2024-12-04 Adds support for pagerank score in knowledge base.
|
||||
- 2024-11-22 Adds more variables to Agent.
|
||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||
- 2024-09-13 Adds search mode for knowledge base Q&A.
|
||||
- 2024-08-22 Support text to SQL statements through RAG.
|
||||
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
||||
|
||||
## 🎉 Stay Tuned
|
||||
|
||||
⭐️ Star our repository to stay up-to-date with exciting new features and improvements! Get instant notifications for new
|
||||
releases! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
@ -133,7 +140,7 @@ releases! 🌟
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux),
|
||||
see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
> see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
@ -153,7 +160,7 @@ releases! 🌟
|
||||
> ```
|
||||
>
|
||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the
|
||||
`vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
> `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
@ -165,29 +172,25 @@ releases! 🌟
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. Build the pre-built Docker images and start up the server:
|
||||
3. Start up the server using the pre-built Docker images:
|
||||
|
||||
> The command below downloads the dev version Docker image for RAGFlow slim (`dev-slim`). Note that RAGFlow slim
|
||||
Docker images do not include embedding models or Python libraries and hence are approximately 1GB in size.
|
||||
> [!CAUTION]
|
||||
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||
|
||||
> The command below downloads the `v0.17.1-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.17.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1` for the full edition `v0.17.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - To download a RAGFlow slim Docker image of a specific version, update the `RAGFLOW_IMAGE` variable in *
|
||||
*docker/.env** to your desired version. For example, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1-slim`. After
|
||||
making this change, rerun the command above to initiate the download.
|
||||
> - To download the dev version of RAGFlow Docker image *including* embedding models and Python libraries, update the
|
||||
`RAGFLOW_IMAGE` variable in **docker/.env** to `RAGFLOW_IMAGE=infiniflow/ragflow:dev`. After making this change,
|
||||
rerun the command above to initiate the download.
|
||||
> - To download a specific version of RAGFlow Docker image *including* embedding models and Python libraries, update
|
||||
the `RAGFLOW_IMAGE` variable in **docker/.env** to your desired version. For example,
|
||||
`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`. After making this change, rerun the command above to initiate the
|
||||
download.
|
||||
|
||||
> **NOTE:** A RAGFlow Docker image that includes embedding models and Python libraries is approximately 9GB in size
|
||||
and may take significantly longer time to load.
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
|-------------------|-----------------|-----------------------|--------------------------|
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. Check the server status after having the server up and running:
|
||||
|
||||
@ -199,23 +202,21 @@ releases! 🌟
|
||||
|
||||
```bash
|
||||
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||
error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
> error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default
|
||||
HTTP serving port `80` can be omitted when using the default configurations.
|
||||
> HTTP serving port `80` can be omitted when using the default configurations.
|
||||
6. In [service_conf.yaml.template](./docker/service_conf.yaml.template), select the desired LLM factory in `user_default_llm` and update
|
||||
the `API_KEY` field with the corresponding API key.
|
||||
|
||||
@ -241,7 +242,7 @@ to `<YOUR_SERVING_PORT>:80`.
|
||||
Updates to the above configurations require a reboot of all containers to take effect:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Switch doc engine from Elasticsearch to Infinity
|
||||
@ -254,27 +255,28 @@ RAGFlow uses Elasticsearch by default for storing full text and vectors. To swit
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> `-v` will delete the docker container volumes, and the existing data will be cleared.
|
||||
|
||||
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||
|
||||
3. Start the containers:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> [!WARNING]
|
||||
> Switching to Infinity on a Linux/arm64 machine is not yet officially supported.
|
||||
|
||||
## 🔧 Build a Docker image without embedding models
|
||||
|
||||
This image is approximately 1 GB in size and relies on external LLM and embedding services.
|
||||
This image is approximately 2 GB in size and relies on external LLM and embedding services.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Build a Docker image including embedding models
|
||||
@ -284,36 +286,36 @@ This image is approximately 9 GB in size. As it includes embedding models, it re
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Launch service from source for development
|
||||
|
||||
1. Install Poetry, or skip this step if it is already installed:
|
||||
1. Install uv, or skip this step if it is already installed:
|
||||
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. Clone the source code and install Python dependencies:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root --with=full # install RAGFlow dependent python modules
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/.env** to `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
In **docker/service_conf.yaml.template**, update mysql port to `5455` and es port to `1200`, as specified in **docker/.env**.
|
||||
```
|
||||
|
||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||
|
||||
@ -322,6 +324,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
```
|
||||
|
||||
5. Launch backend service:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
@ -331,12 +334,13 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
6. Install frontend dependencies:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
npm install
|
||||
```
|
||||
7. Launch frontend service:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_The following output confirms a successful launch of the system:_
|
||||
|
||||
@ -351,7 +355,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
See the [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Community
|
||||
|
||||
|
||||
125
README_id.md
125
README_id.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
@ -32,14 +34,14 @@
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Peta Jalan</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Peta Jalan</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Daftar Isi</b></summary>
|
||||
<summary><b>📕 Daftar Isi </b> </summary>
|
||||
|
||||
- 💡 [Apa Itu RAGFlow?](#-apa-itu-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
@ -65,6 +67,7 @@
|
||||
## 🎮 Demo
|
||||
|
||||
Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
@ -72,15 +75,19 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
## 🔥 Pembaruan Terbaru
|
||||
|
||||
- 22-11-2024 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-11-01: Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-09-13: Penambahan mode pencarian untuk Q&A basis pengetahuan.
|
||||
- 2024-08-22: Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
- 2024-08-02: Dukungan GraphRAG yang terinspirasi oleh [graphrag](https://github.com/microsoft/graphrag) dan mind map.
|
||||
- 2025-02-28 dikombinasikan dengan pencarian Internet (TAVILY), mendukung penelitian mendalam untuk LLM apa pun.
|
||||
- 2025-02-05 Memperbarui daftar model 'SILICONFLOW' dan menambahkan dukungan untuk Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||
- 2024-12-04 Mendukung skor pagerank ke basis pengetahuan.
|
||||
- 2024-11-22 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
|
||||
## 🎉 Tetap Terkini
|
||||
|
||||
⭐️ Star repositori kami untuk tetap mendapat informasi tentang fitur baru dan peningkatan menarik! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
@ -146,7 +153,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
> ```
|
||||
>
|
||||
> Perubahan ini akan hilang setelah sistem direboot. Untuk membuat perubahan ini permanen, tambahkan atau perbarui nilai
|
||||
`vm.max_map_count` di **/etc/sysctl.conf**:
|
||||
> `vm.max_map_count` di **/etc/sysctl.conf**:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
@ -160,28 +167,25 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Bangun image Docker pre-built dan jalankan server:
|
||||
|
||||
> Perintah di bawah ini akan mengunduh versi dev dari Docker image RAGFlow slim (`dev-slim`). Image RAGFlow slim
|
||||
tidak termasuk model embedding atau library Python dan berukuran sekitar 1GB.
|
||||
> [!CAUTION]
|
||||
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> Perintah di bawah ini mengunduh edisi v0.17.1-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.17.1-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1 untuk edisi lengkap v0.17.1.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - Untuk mengunduh versi tertentu dari image Docker RAGFlow slim, perbarui variabel `RAGFlow_IMAGE` di *
|
||||
*docker/.env** sesuai dengan versi yang diinginkan. Misalnya, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1-slim`.
|
||||
Setelah mengubah ini, jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
> - Untuk mengunduh versi dev dari image Docker RAGFlow *termasuk* model embedding dan library Python, perbarui
|
||||
variabel `RAGFlow_IMAGE` di **docker/.env** menjadi `RAGFLOW_IMAGE=infiniflow/ragflow:dev`. Setelah mengubah ini,
|
||||
jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
> - Untuk mengunduh versi tertentu dari image Docker RAGFlow *termasuk* model embedding dan library Python, perbarui
|
||||
variabel `RAGFlow_IMAGE` di **docker/.env** sesuai dengan versi yang diinginkan. Misalnya,
|
||||
`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`. Setelah mengubah ini, jalankan ulang perintah di atas untuk memulai unduhan.
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
> **CATATAN:** Image Docker RAGFlow yang mencakup model embedding dan library Python berukuran sekitar 9GB
|
||||
dan mungkin memerlukan waktu lebih lama untuk dimuat.
|
||||
|
||||
4. Periksa status server setelah server aktif dan berjalan:
|
||||
1. Periksa status server setelah server aktif dan berjalan:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -191,24 +195,22 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
```bash
|
||||
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
karena RAGFlow mungkin belum sepenuhnya siap.
|
||||
|
||||
5. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||
port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||
6. Dalam [service_conf.yaml](./docker/service_conf.yaml), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
> karena RAGFlow mungkin belum sepenuhnya siap.
|
||||
|
||||
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||
3. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||
|
||||
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||
@ -221,35 +223,26 @@ Untuk konfigurasi sistem, Anda perlu mengelola file-file berikut:
|
||||
|
||||
- [.env](./docker/.env): Menyimpan pengaturan dasar sistem, seperti `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, dan
|
||||
`MINIO_PASSWORD`.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): Mengonfigurasi aplikasi backend.
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Mengonfigurasi aplikasi backend.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): Sistem ini bergantung pada [docker-compose.yml](./docker/docker-compose.yml) untuk memulai.
|
||||
|
||||
Anda harus memastikan bahwa perubahan pada file [.env](./docker/.env) sesuai dengan yang ada di file [service_conf.yaml](./docker/service_conf.yaml).
|
||||
|
||||
> File [./docker/README](./docker/README.md) menyediakan penjelasan detail tentang pengaturan lingkungan dan konfigurasi aplikasi,
|
||||
> dan Anda DIWAJIBKAN memastikan bahwa semua pengaturan lingkungan yang tercantum di
|
||||
> [./docker/README](./docker/README.md) selaras dengan konfigurasi yang sesuai di
|
||||
> [service_conf.yaml](./docker/service_conf.yaml).
|
||||
|
||||
Untuk memperbarui port HTTP default (80), buka [docker-compose.yml](./docker/docker-compose.yml) dan ubah `80:80`
|
||||
menjadi `<YOUR_SERVING_PORT>:80`.
|
||||
|
||||
Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
## 🔧 Membangun Docker Image tanpa Model Embedding
|
||||
|
||||
Image ini berukuran sekitar 1 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
||||
Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
||||
@ -259,36 +252,36 @@ Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hany
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||
|
||||
1. Instal Poetry, atau lewati langkah ini jika sudah terinstal:
|
||||
1. Instal uv, atau lewati langkah ini jika sudah terinstal:
|
||||
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. Clone kode sumber dan instal dependensi Python:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install modul python RAGFlow
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **docker/service_conf.yaml** ke `127.0.0.1`:
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **conf/service_conf.yaml** ke `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
Di **docker/service_conf.yaml**, perbarui port mysql ke `5455` dan es ke `1200`, sesuai dengan yang ditentukan di **docker/.env**.
|
||||
```
|
||||
|
||||
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||
|
||||
@ -297,6 +290,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
```
|
||||
|
||||
5. Jalankan aplikasi backend:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
@ -306,12 +300,13 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
6. Instal dependensi frontend:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
npm install
|
||||
```
|
||||
7. Jalankan aplikasi frontend:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||
|
||||
@ -326,7 +321,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
Lihat [Roadmap RAGFlow 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
Lihat [Roadmap RAGFlow 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Komunitas
|
||||
|
||||
@ -337,4 +332,4 @@ Lihat [Roadmap RAGFlow 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
## 🙌 Kontribusi
|
||||
|
||||
RAGFlow berkembang melalui kolaborasi open-source. Dalam semangat ini, kami menerima kontribusi dari komunitas.
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||
|
||||
116
README_ja.md
116
README_ja.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,10 +32,9 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -46,22 +47,27 @@
|
||||
## 🎮 Demo
|
||||
|
||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 最新情報
|
||||
|
||||
- 2025-02-28 インターネット検索 (TAVILY) と組み合わせて、あらゆる LLM の詳細な調査をサポートします。
|
||||
- 2025-02-05 シリコン フローの St およびモデル リストを更新し、Deep Seek-R1/Deep Seek-V3 のサポートを追加しました。
|
||||
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
||||
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-12-04 ナレッジ ベースへのページランク スコアをサポートしました。
|
||||
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||
- 2024-09-13 ナレッジベース Q&A の検索モードを追加しました。
|
||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
||||
|
||||
## 🎉 続きを楽しみに
|
||||
|
||||
⭐️ リポジトリをスター登録して、エキサイティングな新機能やアップデートを最新の状態に保ちましょう!すべての新しいリリースに関する即時通知を受け取れます! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
@ -141,20 +147,25 @@
|
||||
|
||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||
|
||||
> 以下のコマンドは、RAGFlow slim(`dev-slim`)の開発版Dockerイメージをダウンロードします。RAGFlow slimのDockerイメージには、埋め込みモデルやPythonライブラリが含まれていないため、サイズは約1GBです。
|
||||
> [!CAUTION]
|
||||
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.17.1-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.17.1-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.17.1 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1 と設定します。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 特定のバージョンのRAGFlow slim Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
> - RAGFlowの埋め込みモデルとPythonライブラリを含む開発版Dockerイメージをダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を`RAGFLOW_IMAGE=infiniflow/ragflow:dev`に更新します。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
> - 特定のバージョンのRAGFlow Dockerイメージ(埋め込みモデルとPythonライブラリを含む)をダウンロードするには、**docker/.env**内の`RAGFlow_IMAGE`変数を希望のバージョンに更新します。例えば、`RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`とします。この変更を行った後、上記のコマンドを再実行してダウンロードを開始してください。
|
||||
|
||||
> **NOTE:** 埋め込みモデルとPythonライブラリを含むRAGFlow Dockerイメージのサイズは約9GBであり、読み込みにかなりの時間がかかる場合があります。
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -163,22 +174,20 @@
|
||||
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||
|
||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
2. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||
|
||||
@ -189,19 +198,19 @@
|
||||
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
||||
|
||||
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): バックエンドのサービスを設定します。
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): バックエンドのサービスを設定します。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
||||
|
||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml](./docker/service_conf.yaml) ファイルの内容と一致していることを確認する必要があります。
|
||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml.template](./docker/service_conf.yaml.template) ファイルの内容と一致していることを確認する必要があります。
|
||||
|
||||
> [./docker/README](./docker/README.md) ファイルは環境設定とサービスコンフィグの詳細な説明を提供し、[./docker/README](./docker/README.md) ファイルに記載されている全ての環境設定が [service_conf.yaml](./docker/service_conf.yaml) ファイルの対応するコンフィグと一致していることを確認することが義務付けられています。
|
||||
> [./docker/README](./docker/README.md) ファイル ./docker/README には、service_conf.yaml.template ファイルで ${ENV_VARS} として使用できる環境設定とサービス構成の詳細な説明が含まれています。
|
||||
|
||||
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
||||
|
||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch から Infinity にドキュメントエンジンを切り替えます
|
||||
@ -212,89 +221,90 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
2. **docker/.env** の「DOC _ ENGINE」を「infinity」に設定します。
|
||||
Note: `-v` は docker コンテナのボリュームを削除し、既存のデータをクリアします。
|
||||
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
||||
|
||||
3. 起動コンテナ:
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||
> [!WARNING]
|
||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||
|
||||
## 🔧 ソースコードでDockerイメージを作成(埋め込みモデルなし)
|
||||
## 🔧 ソースコードで Docker イメージを作成(埋め込みモデルなし)
|
||||
|
||||
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 ソースコードをコンパイルしたDockerイメージ(埋め込みモデルを含む)
|
||||
## 🔧 ソースコードをコンパイルした Docker イメージ(埋め込みモデルを含む)
|
||||
|
||||
この Docker のサイズは約 9GB で、埋め込みモデルを含むため、外部の大モデルサービスのみが必要です。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 ソースコードからサービスを起動する方法
|
||||
|
||||
1. Poetry をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
1. uv をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
`/etc/hosts` に以下の行を追加して、**docker/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
**docker/service_conf.yaml** で mysql のポートを `5455` に、es のポートを `1200` に更新します(**docker/.env** に指定された通り).
|
||||
```
|
||||
|
||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. バックエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. フロントエンドの依存関係をインストールする:
|
||||
6. フロントエンドの依存関係をインストールする:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
7. フロントエンドサービスを起動する:
|
||||
npm install
|
||||
```
|
||||
7. フロントエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_以下の画面で、システムが正常に起動したことを示します:_
|
||||
_以下の画面で、システムが正常に起動したことを示します:_
|
||||
|
||||

|
||||
|
||||
@ -307,7 +317,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
[RAGFlow ロードマップ 2024](https://github.com/infiniflow/ragflow/issues/162) を参照
|
||||
[RAGFlow ロードマップ 2025](https://github.com/infiniflow/ragflow/issues/4214) を参照
|
||||
|
||||
## 🏄 コミュニティ
|
||||
|
||||
|
||||
148
README_ko.md
148
README_ko.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,76 +32,75 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
|
||||
## 💡 RAGFlow란?
|
||||
|
||||
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
||||
|
||||
|
||||
|
||||
## 🎮 데모
|
||||
|
||||
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 업데이트
|
||||
|
||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||
|
||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||
|
||||
- 2024-09-13 지식베이스 Q&A 검색 모드를 추가합니다.
|
||||
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
||||
- 2025-02-28 인터넷 검색(TAVILY)과 결합되어 모든 LLM에 대한 심층 연구를 지원합니다.
|
||||
- 2025-02-05 'SILICONFLOW' 모델 목록을 업데이트하고 Deepseek-R1/DeepSeek-V3에 대한 지원을 추가합니다.
|
||||
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
||||
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
- 2024-12-04 지식베이스에 대한 페이지랭크 점수를 지원합니다.
|
||||
|
||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
## 🎉 계속 지켜봐 주세요
|
||||
|
||||
⭐️우리의 저장소를 즐겨찾기에 등록하여 흥미로운 새로운 기능과 업데이트를 최신 상태로 유지하세요! 모든 새로운 릴리스에 대한 즉시 알림을 받으세요! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🌟 주요 기능
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
|
||||
- [심층 문서 이해](./deepdoc/README.md)를 기반으로 복잡한 형식의 비정형 데이터에서 지식을 추출합니다.
|
||||
- 문자 그대로 무한한 토큰에서 "데이터 속의 바늘"을 찾아냅니다.
|
||||
|
||||
### 🍱 **템플릿 기반의 chunking**
|
||||
|
||||
- 똑똑하고 설명 가능한 방식.
|
||||
- 다양한 템플릿 옵션을 제공합니다.
|
||||
|
||||
|
||||
### 🌱 **할루시네이션을 줄인 신뢰할 수 있는 인용**
|
||||
|
||||
- 텍스트 청킹을 시각화하여 사용자가 개입할 수 있도록 합니다.
|
||||
- 중요한 참고 자료와 추적 가능한 인용을 빠르게 확인하여 신뢰할 수 있는 답변을 지원합니다.
|
||||
|
||||
|
||||
### 🍔 **다른 종류의 데이터 소스와의 호환성**
|
||||
|
||||
- 워드, 슬라이드, 엑셀, 텍스트 파일, 이미지, 스캔본, 구조화된 데이터, 웹 페이지 등을 지원합니다.
|
||||
|
||||
### 🛀 **자동화되고 손쉬운 RAG 워크플로우**
|
||||
|
||||
- 개인 및 대규모 비즈니스에 맞춘 효율적인 RAG 오케스트레이션.
|
||||
- 구성 가능한 LLM 및 임베딩 모델.
|
||||
- 다중 검색과 결합된 re-ranking.
|
||||
- 비즈니스와 원활하게 통합할 수 있는 직관적인 API.
|
||||
|
||||
|
||||
## 🔎 시스템 아키텍처
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
@ -107,17 +108,19 @@
|
||||
</div>
|
||||
|
||||
## 🎬 시작하기
|
||||
|
||||
### 📝 사전 준비 사항
|
||||
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치]((https://docs.docker.com/engine/install/))를 참조하세요.
|
||||
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||
|
||||
### 🚀 서버 시작하기
|
||||
|
||||
1. `vm.max_map_count`가 262144 이상인지 확인하세요:
|
||||
|
||||
> `vm.max_map_count`의 값을 아래 명령어를 통해 확인하세요:
|
||||
>
|
||||
> ```bash
|
||||
@ -145,21 +148,25 @@
|
||||
|
||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||
|
||||
> 아래의 명령은 RAGFlow slim(dev-slim)의 개발 버전 Docker 이미지를 다운로드합니다. RAGFlow slim Docker 이미지에는 임베딩 모델이나 Python 라이브러리가 포함되어 있지 않으므로 크기는 약 1GB입니다.
|
||||
> [!CAUTION]
|
||||
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.17.1-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.17.1-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.17.1을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1로 설정합니다.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 특정 버전의 RAGFlow slim Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1-slim`으로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
> - RAGFlow의 임베딩 모델과 Python 라이브러리를 포함한 개발 버전 Docker 이미지를 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 `RAGFLOW_IMAGE=infiniflow/ragflow:dev`로 업데이트하세요. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
> - 특정 버전의 RAGFlow Docker 이미지를 임베딩 모델과 Python 라이브러리를 포함하여 다운로드하려면, **docker/.env**에서 `RAGFlow_IMAGE` 변수를 원하는 버전으로 업데이트하세요. 예를 들어, `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1` 로 설정합니다. 이 변경을 완료한 후, 위의 명령을 다시 실행하여 다운로드를 시작하세요.
|
||||
|
||||
> **NOTE:** 임베딩 모델과 Python 라이브러리를 포함한 RAGFlow Docker 이미지의 크기는 약 9GB이며, 로드하는 데 상당히 오랜 시간이 걸릴 수 있습니다.
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
1. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -168,22 +175,21 @@
|
||||
_다음 출력 결과로 시스템이 성공적으로 시작되었음을 확인합니다:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||
|
||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
2. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
|
||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||
|
||||
_이제 쇼가 시작됩니다!_
|
||||
@ -193,36 +199,38 @@
|
||||
시스템 설정과 관련하여 다음 파일들을 관리해야 합니다:
|
||||
|
||||
- [.env](./docker/.env): `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, `MINIO_PASSWORD`와 같은 시스템의 기본 설정을 포함합니다.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): 백엔드 서비스를 구성합니다.
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): 백엔드 서비스를 구성합니다.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 시스템은 [docker-compose.yml](./docker/docker-compose.yml)을 사용하여 시작됩니다.
|
||||
|
||||
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 내용과 일치하도록 해야 합니다.
|
||||
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일의 내용과 일치하도록 해야 합니다.
|
||||
|
||||
> [./docker/README](./docker/README.md) 파일에는 환경 설정과 서비스 구성에 대한 자세한 설명이 있으며, [./docker/README](./docker/README.md) 파일에 나열된 모든 환경 설정이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 해당 구성과 일치하도록 해야 합니다.
|
||||
> [./docker/README](./docker/README.md) 파일 ./docker/README은 service_conf.yaml.template 파일에서 ${ENV_VARS}로 사용할 수 있는 환경 설정과 서비스 구성에 대한 자세한 설명을 제공합니다.
|
||||
|
||||
기본 HTTP 서비스 포트(80)를 업데이트하려면 [docker-compose.yml](./docker/docker-compose.yml) 파일에서 `80:80`을 `<YOUR_SERVING_PORT>:80`으로 변경하세요.
|
||||
|
||||
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker/docker-compose.yml up -d
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Elasticsearch 에서 Infinity 로 문서 엔진 전환
|
||||
|
||||
RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및 벡터를 저장합니다. [Infinity]로 전환(https://github.com/infiniflow/infinity/), 다음 절차를 따르십시오.
|
||||
|
||||
1. 실행 중인 모든 컨테이너를 중지합니다.
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml down-v
|
||||
$docker compose-f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 는 docker 컨테이너의 볼륨을 삭제하고 기존 데이터를 지우며, 이 작업은 컨테이너를 중지하는 것과 동일합니다.
|
||||
2. **docker/.env**의 "DOC_ENGINE" 을 "infinity" 로 설정합니다.
|
||||
3. 컨테이너 부팅:
|
||||
```bash
|
||||
$docker compose-f docker/docker-compose.yml up-d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||
|
||||
$docker compose-f docker/docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||
|
||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함하지 않음)
|
||||
|
||||
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
||||
@ -230,9 +238,7 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
||||
@ -242,61 +248,63 @@ docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||
|
||||
1. Poetry를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
1. uv를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. Docker Compose를 사용하여 의존 서비스(MinIO, Elasticsearch, Redis 및 MySQL)를 시작합니다:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **docker/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
**docker/service_conf.yaml** 에서 mysql 포트를 `5455` 로, es 포트를 `1200` 으로 업데이트합니다( **docker/.env** 에 지정된 대로).
|
||||
```
|
||||
|
||||
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
||||
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 백엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 프론트엔드 의존성을 설치합니다:
|
||||
6. 프론트엔드 의존성을 설치합니다:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
7. 프론트엔드 서비스를 시작합니다:
|
||||
npm install
|
||||
```
|
||||
7. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_다음 인터페이스는 시스템이 성공적으로 시작되었음을 나타냅니다:_
|
||||
_다음 인터페이스는 시스템이 성공적으로 시작되었음을 나타냅니다:_
|
||||
|
||||

|
||||
|
||||
@ -309,7 +317,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
## 📜 로드맵
|
||||
|
||||
[RAGFlow 로드맵 2024](https://github.com/infiniflow/ragflow/issues/162)을 확인하세요.
|
||||
[RAGFlow 로드맵 2025](https://github.com/infiniflow/ragflow/issues/4214)을 확인하세요.
|
||||
|
||||
## 🏄 커뮤니티
|
||||
|
||||
|
||||
356
README_pt_br.md
Normal file
356
README_pt_br.md
Normal file
@ -0,0 +1,356 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="seguir no X(Twitter)">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="licença">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Documentação</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 Índice</b></summary>
|
||||
|
||||
- 💡 [O que é o RAGFlow?](#-o-que-é-o-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
- 📌 [Últimas Atualizações](#-últimas-atualizações)
|
||||
- 🌟 [Principais Funcionalidades](#-principais-funcionalidades)
|
||||
- 🔎 [Arquitetura do Sistema](#-arquitetura-do-sistema)
|
||||
- 🎬 [Primeiros Passos](#-primeiros-passos)
|
||||
- 🔧 [Configurações](#-configurações)
|
||||
- 🔧 [Construir uma imagem docker sem incorporar modelos](#-construir-uma-imagem-docker-sem-incorporar-modelos)
|
||||
- 🔧 [Construir uma imagem docker incluindo modelos](#-construir-uma-imagem-docker-incluindo-modelos)
|
||||
- 🔨 [Lançar serviço a partir do código-fonte para desenvolvimento](#-lançar-serviço-a-partir-do-código-fonte-para-desenvolvimento)
|
||||
- 📚 [Documentação](#-documentação)
|
||||
- 📜 [Roadmap](#-roadmap)
|
||||
- 🏄 [Comunidade](#-comunidade)
|
||||
- 🙌 [Contribuindo](#-contribuindo)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 O que é o RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) é um mecanismo RAG (Geração Aumentada por Recuperação) de código aberto baseado em entendimento profundo de documentos. Ele oferece um fluxo de trabalho RAG simplificado para empresas de qualquer porte, combinando LLMs (Modelos de Linguagem de Grande Escala) para fornecer capacidades de perguntas e respostas verídicas, respaldadas por citações bem fundamentadas de diversos dados complexos formatados.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 Últimas Atualizações
|
||||
|
||||
- 28/02/2025 combinado com a pesquisa na Internet (T AVI LY), suporta pesquisas profundas para qualquer LLM.
|
||||
- 05-02-2025 Atualiza a lista de modelos de 'SILICONFLOW' e adiciona suporte para Deepseek-R1/DeepSeek-V3.
|
||||
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||
- 04-12-2024 Adiciona suporte para pontuação de pagerank na base de conhecimento.
|
||||
- 22-11-2024 Adiciona mais variáveis para o Agente.
|
||||
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
||||
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
||||
|
||||
## 🎉 Fique Ligado
|
||||
|
||||
⭐️ Dê uma estrela no nosso repositório para se manter atualizado com novas funcionalidades e melhorias empolgantes! Receba notificações instantâneas sobre novos lançamentos! 🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🌟 Principais Funcionalidades
|
||||
|
||||
### 🍭 **"Qualidade entra, qualidade sai"**
|
||||
|
||||
- Extração de conhecimento baseada em [entendimento profundo de documentos](./deepdoc/README.md) a partir de dados não estruturados com formatos complicados.
|
||||
- Encontra a "agulha no palheiro de dados" de literalmente tokens ilimitados.
|
||||
|
||||
### 🍱 **Fragmentação baseada em templates**
|
||||
|
||||
- Inteligente e explicável.
|
||||
- Muitas opções de templates para escolher.
|
||||
|
||||
### 🌱 **Citações fundamentadas com menos alucinações**
|
||||
|
||||
- Visualização da fragmentação de texto para permitir intervenção humana.
|
||||
- Visualização rápida das referências chave e citações rastreáveis para apoiar respostas fundamentadas.
|
||||
|
||||
### 🍔 **Compatibilidade com fontes de dados heterogêneas**
|
||||
|
||||
- Suporta Word, apresentações, excel, txt, imagens, cópias digitalizadas, dados estruturados, páginas da web e mais.
|
||||
|
||||
### 🛀 **Fluxo de trabalho RAG automatizado e sem esforço**
|
||||
|
||||
- Orquestração RAG simplificada voltada tanto para negócios pessoais quanto grandes empresas.
|
||||
- Modelos LLM e de incorporação configuráveis.
|
||||
- Múltiplas recuperações emparelhadas com reclassificação fundida.
|
||||
- APIs intuitivas para integração sem problemas com os negócios.
|
||||
|
||||
## 🔎 Arquitetura do Sistema
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 Primeiros Passos
|
||||
|
||||
### 📝 Pré-requisitos
|
||||
|
||||
- CPU >= 4 núcleos
|
||||
- RAM >= 16 GB
|
||||
- Disco >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Iniciar o servidor
|
||||
|
||||
1. Certifique-se de que `vm.max_map_count` >= 262144:
|
||||
|
||||
> Para verificar o valor de `vm.max_map_count`:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> Se necessário, redefina `vm.max_map_count` para um valor de pelo menos 262144:
|
||||
>
|
||||
> ```bash
|
||||
> # Neste caso, defina para 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> Essa mudança será resetada após a reinicialização do sistema. Para garantir que a alteração permaneça permanente, adicione ou atualize o valor de `vm.max_map_count` em **/etc/sysctl.conf**:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. Clone o repositório:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
||||
|
||||
> [!CAUTION]
|
||||
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||
|
||||
> O comando abaixo baixa a edição `v0.17.1-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.17.1-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1` para a edição completa `v0.17.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
||||
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
||||
| v0.17.1 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.17.1-slim | ~2 | ❌ | Lançamento estável |
|
||||
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
||||
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
||||
|
||||
4. Verifique o status do servidor após tê-lo iniciado:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Rodando em todos os endereços (0.0.0.0)
|
||||
```
|
||||
|
||||
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||
|
||||
5. No seu navegador, insira o endereço IP do seu servidor e faça login no RAGFlow.
|
||||
|
||||
> Com as configurações padrão, você só precisa digitar `http://IP_DO_SEU_MÁQUINA` (**sem** o número da porta), pois a porta HTTP padrão `80` pode ser omitida ao usar as configurações padrão.
|
||||
|
||||
6. Em [service_conf.yaml.template](./docker/service_conf.yaml.template), selecione a fábrica LLM desejada em `user_default_llm` e atualize o campo `API_KEY` com a chave de API correspondente.
|
||||
|
||||
> Consulte [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) para mais informações.
|
||||
|
||||
_O show está no ar!_
|
||||
|
||||
## 🔧 Configurações
|
||||
|
||||
Quando se trata de configurações do sistema, você precisará gerenciar os seguintes arquivos:
|
||||
|
||||
- [.env](./docker/.env): Contém as configurações fundamentais para o sistema, como `SVR_HTTP_PORT`, `MYSQL_PASSWORD` e `MINIO_PASSWORD`.
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template): Configura os serviços de back-end. As variáveis de ambiente neste arquivo serão automaticamente preenchidas quando o contêiner Docker for iniciado. Quaisquer variáveis de ambiente definidas dentro do contêiner Docker estarão disponíveis para uso, permitindo personalizar o comportamento do serviço com base no ambiente de implantação.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): O sistema depende do [docker-compose.yml](./docker/docker-compose.yml) para iniciar.
|
||||
|
||||
> O arquivo [./docker/README](./docker/README.md) fornece uma descrição detalhada das configurações do ambiente e dos serviços, que podem ser usadas como `${ENV_VARS}` no arquivo [service_conf.yaml.template](./docker/service_conf.yaml.template).
|
||||
|
||||
Para atualizar a porta HTTP de serviço padrão (80), vá até [docker-compose.yml](./docker/docker-compose.yml) e altere `80:80` para `<SUA_PORTA_DE_SERVIÇO>:80`.
|
||||
|
||||
Atualizações nas configurações acima exigem um reinício de todos os contêineres para que tenham efeito:
|
||||
|
||||
> ```bash
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
### Mudar o mecanismo de documentos de Elasticsearch para Infinity
|
||||
|
||||
O RAGFlow usa o Elasticsearch por padrão para armazenar texto completo e vetores. Para mudar para o [Infinity](https://github.com/infiniflow/infinity/), siga estas etapas:
|
||||
|
||||
1. Pare todos os contêineres em execução:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` irá deletar os volumes do contêiner, e os dados existentes serão apagados.
|
||||
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
||||
|
||||
3. Inicie os contêineres:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!ATENÇÃO]
|
||||
> A mudança para o Infinity em uma máquina Linux/arm64 ainda não é oficialmente suportada.
|
||||
|
||||
## 🔧 Criar uma imagem Docker sem modelos de incorporação
|
||||
|
||||
Esta imagem tem cerca de 2 GB de tamanho e depende de serviços externos de LLM e incorporação.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Criar uma imagem Docker incluindo modelos de incorporação
|
||||
|
||||
Esta imagem tem cerca de 9 GB de tamanho. Como inclui modelos de incorporação, depende apenas de serviços externos de LLM.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
||||
|
||||
1. Instale o `uv`, ou pule esta etapa se ele já estiver instalado:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
```
|
||||
|
||||
2. Clone o código-fonte e instale as dependências Python:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # instala os módulos Python dependentes do RAGFlow
|
||||
```
|
||||
|
||||
3. Inicie os serviços dependentes (MinIO, Elasticsearch, Redis e MySQL) usando Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
Adicione a seguinte linha ao arquivo `/etc/hosts` para resolver todos os hosts especificados em **docker/.env** para `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
|
||||
4. Se não conseguir acessar o HuggingFace, defina a variável de ambiente `HF_ENDPOINT` para usar um site espelho:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Lance o serviço de back-end:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instale as dependências do front-end:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
7. Lance o serviço de front-end:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
||||
|
||||

|
||||
|
||||
## 📚 Documentação
|
||||
|
||||
- [Início rápido](https://ragflow.io/docs/dev/)
|
||||
- [Guia do usuário](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Referências](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
Veja o [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Comunidade
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Contribuindo
|
||||
|
||||
O RAGFlow prospera por meio da colaboração de código aberto. Com esse espírito, abraçamos contribuições diversas da comunidade.
|
||||
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](./CONTRIBUTING.md).
|
||||
356
README_tzh.md
Normal file
356
README_tzh.md
Normal file
@ -0,0 +1,356 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/intent/follow?screen_name=infiniflowai" target="_blank">
|
||||
<img src="https://img.shields.io/twitter/follow/infiniflow?logo=X&color=%20%23f5f5f5" alt="follow on X(Twitter)">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
## 💡 RAGFlow 是什麼?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款基於深度文件理解所建構的開源 RAG(Retrieval-Augmented Generation)引擎。 RAGFlow 可以為各種規模的企業及個人提供一套精簡的 RAG 工作流程,結合大語言模型(LLM)針對用戶各類不同的複雜格式數據提供可靠的問答以及有理有據的引用。
|
||||
|
||||
## 🎮 Demo 試用
|
||||
|
||||
請登入網址 [https://demo.ragflow.io](https://demo.ragflow.io) 試用 demo。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-02-28 結合網路搜尋(Tavily),對於任意大模型實現類似 Deep Research 的推理功能.
|
||||
- 2025-02-05 更新「SILICONFLOW」的型號清單並新增 Deepseek-R1/DeepSeek-V3 的支援。
|
||||
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
||||
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||
- 2024-12-04 支援知識庫的 Pagerank 分數。
|
||||
- 2024-11-22 完善了 Agent 中的變數定義和使用。
|
||||
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
||||
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
||||
|
||||
## 🎉 關注項目
|
||||
|
||||
⭐️ 點擊右上角的 Star 追蹤 RAGFlow,可以取得最新發布的即時通知 !🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🌟 主要功能
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
|
||||
- 基於[深度文件理解](./deepdoc/README.md),能夠從各類複雜格式的非結構化資料中提取真知灼見。
|
||||
- 真正在無限上下文(token)的場景下快速完成大海撈針測試。
|
||||
|
||||
### 🍱 **基於模板的文字切片**
|
||||
|
||||
- 不只是智能,更重要的是可控可解釋。
|
||||
- 多種文字範本可供選擇
|
||||
|
||||
### 🌱 **有理有據、最大程度降低幻覺(hallucination)**
|
||||
|
||||
- 文字切片過程視覺化,支援手動調整。
|
||||
- 有理有據:答案提供關鍵引用的快照並支持追根溯源。
|
||||
|
||||
### 🍔 **相容各類異質資料來源**
|
||||
|
||||
- 支援豐富的文件類型,包括 Word 文件、PPT、excel 表格、txt 檔案、圖片、PDF、影印件、影印件、結構化資料、網頁等。
|
||||
|
||||
### 🛀 **全程無憂、自動化的 RAG 工作流程**
|
||||
|
||||
- 全面優化的 RAG 工作流程可以支援從個人應用乃至超大型企業的各類生態系統。
|
||||
- 大語言模型 LLM 以及向量模型皆支援配置。
|
||||
- 基於多路召回、融合重排序。
|
||||
- 提供易用的 API,可輕鬆整合到各類企業系統。
|
||||
|
||||
## 🔎 系統架構
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 快速開始
|
||||
|
||||
### 📝 前提條件
|
||||
|
||||
- CPU >= 4 核
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||
|
||||
### 🚀 啟動伺服器
|
||||
|
||||
1. 確保 `vm.max_map_count` 不小於 262144:
|
||||
|
||||
> 如需確認 `vm.max_map_count` 的大小:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> 如果 `vm.max_map_count` 的值小於 262144,可以進行重設:
|
||||
>
|
||||
> ```bash
|
||||
> # 這裡我們設為 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> 你的改動會在下次系統重新啟動時被重置。如果希望做永久改動,還需要在 **/etc/sysctl.conf** 檔案裡把 `vm.max_map_count` 的值再相應更新一遍:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. 克隆倉庫:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
||||
|
||||
> [!CAUTION]
|
||||
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.17.1-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.17.1-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1` 來下載 RAGFlow 鏡像的 `v0.17.1` 完整發行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
> [!TIP]
|
||||
> 如果你遇到 Docker 映像檔拉不下來的問題,可以在 **docker/.env** 檔案內根據變數 `RAGFLOW_IMAGE` 的註解提示選擇華為雲或阿里雲的對應映像。
|
||||
>
|
||||
> - 華為雲鏡像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||
> - 阿里雲鏡像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||
|
||||
4. 伺服器啟動成功後再次確認伺服器狀態:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_出現以下介面提示說明伺服器啟動成功:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
```
|
||||
|
||||
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||
|
||||
5. 在你的瀏覽器中輸入你的伺服器對應的 IP 位址並登入 RAGFlow。
|
||||
> 上面這個範例中,您只需輸入 http://IP_OF_YOUR_MACHINE 即可:未改動過設定則無需輸入連接埠(預設的 HTTP 服務連接埠 80)。
|
||||
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 檔案的 `user_default_llm` 欄位設定 LLM factory,並在 `API_KEY` 欄填入和你選擇的大模型相對應的 API key。
|
||||
|
||||
> 詳見 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||
|
||||
_好戲開始,接著奏樂接著舞! _
|
||||
|
||||
## 🔧 系統配置
|
||||
|
||||
系統配置涉及以下三份文件:
|
||||
|
||||
- [.env](./docker/.env):存放一些系統環境變量,例如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template):設定各類別後台服務。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 系統依賴該檔案完成啟動。
|
||||
|
||||
請務必確保 [.env](./docker/.env) 檔案中的變數設定與 [service_conf.yaml.template](./docker/service_conf.yaml.template) 檔案中的設定保持一致!
|
||||
|
||||
如果無法存取映像網站 hub.docker.com 或模型網站 huggingface.co,請依照 [.env](./docker/.env) 註解修改 `RAGFLOW_IMAGE` 和 `HF_ENDPOINT`。
|
||||
|
||||
> [./docker/README](./docker/README.md) 解釋了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的環境變數設定和服務配置。
|
||||
|
||||
如需更新預設的 HTTP 服務連接埠(80), 可以在[docker-compose.yml](./docker/docker-compose.yml) 檔案中將配置`80:80` 改為`<YOUR_SERVING_PORT>:80` 。
|
||||
|
||||
> 所有系統配置都需要透過系統重新啟動生效:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker compose -f docker-compose.yml up -d
|
||||
> ```
|
||||
|
||||
###把文檔引擎從 Elasticsearch 切換成為 Infinity
|
||||
|
||||
RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換為 [Infinity](https://github.com/infiniflow/infinity/), 可以按照下面步驟進行:
|
||||
|
||||
1. 停止所有容器運作:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 將會刪除 docker 容器的 volumes,已有的資料會被清空。
|
||||
|
||||
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
||||
|
||||
3. 啟動容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> Infinity 目前官方並未正式支援在 Linux/arm64 架構下的機器上運行.
|
||||
|
||||
## 🔧 原始碼編譯 Docker 映像(不含 embedding 模型)
|
||||
|
||||
本 Docker 映像大小約 2 GB 左右並且依賴外部的大模型和 embedding 服務。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 原始碼編譯 Docker 映像(包含 embedding 模型)
|
||||
|
||||
本 Docker 大小約 9 GB 左右。由於已包含 embedding 模型,所以只需依賴外部的大模型服務即可。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 以原始碼啟動服務
|
||||
|
||||
1. 安裝 uv。如已安裝,可跳過此步驟:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
2. 下載原始碼並安裝 Python 依賴:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. 透過 Docker Compose 啟動依賴的服務(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
在 `/etc/hosts` 中加入以下程式碼,將 **conf/service_conf.yaml** 檔案中的所有 host 位址都解析為 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
|
||||
4. 如果無法存取 HuggingFace,可以把環境變數 `HF_ENDPOINT` 設為對應的鏡像網站:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5.啟動後端服務:
|
||||
『`bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
|
||||
```
|
||||
|
||||
6. 安裝前端依賴:
|
||||
『`bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
7. 啟動前端服務:
|
||||
『`bash
|
||||
npm run dev
|
||||
|
||||
```
|
||||
|
||||
以下界面說明系統已成功啟動:_
|
||||
|
||||

|
||||
```
|
||||
|
||||
## 📚 技術文檔
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 路線圖
|
||||
|
||||
詳見 [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214) 。
|
||||
|
||||
## 🏄 開源社群
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 貢獻指南
|
||||
|
||||
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](./CONTRIBUTING.md) 。
|
||||
|
||||
## 🤝 商務合作
|
||||
|
||||
- [預約諮詢](https://aao615odquw.feishu.cn/share/base/form/shrcnjw7QleretCLqh1nuPo1xxh)
|
||||
|
||||
## 👥 加入社區
|
||||
|
||||
掃二維碼加入 RAGFlow 小助手,進 RAGFlow 交流群。
|
||||
|
||||
<p align="center">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/bccf284f-46f2-4445-9809-8f1030fb7585" width=50% height=50%>
|
||||
</p>
|
||||
133
README_zh.md
133
README_zh.md
@ -7,9 +7,11 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a>
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -20,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.14.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.14.1">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.1-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,10 +32,9 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
@ -46,27 +47,31 @@
|
||||
## 🎮 Demo 试用
|
||||
|
||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-02-28 结合互联网搜索(Tavily),对于任意大模型实现类似 Deep Research 的推理功能.
|
||||
- 2025-02-05 更新硅基流动的模型列表,增加了对 Deepseek-R1/DeepSeek-V3 的支持。
|
||||
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
||||
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||
- 2024-12-04 支持知识库的 Pagerank 分数。
|
||||
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||
- 2024-09-13 增加知识库问答搜索模式。
|
||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
||||
|
||||
## 🎉 关注项目
|
||||
⭐️点击右上角的 Star 关注RAGFlow,可以获取最新发布的实时通知 !🌟
|
||||
|
||||
⭐️ 点击右上角的 Star 关注 RAGFlow,可以获取最新发布的实时通知 !🌟
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/user-attachments/assets/18c9707e-b8aa-4caf-a154-037089c105ba" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🌟 主要功能
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
@ -142,18 +147,30 @@
|
||||
|
||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||
|
||||
> 运行以下命令会自动下载 dev 版的 RAGFlow slim Docker 镜像(`dev-slim`),该镜像并不包含 embedding 模型以及一些 Python 库,因此镜像大小约 1GB。
|
||||
> [!CAUTION]
|
||||
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.17.1-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.17.1-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.1` 来下载 RAGFlow 镜像的 `v0.17.1` 完整发行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> - 如果你想下载并运行特定版本的 RAGFlow slim Docker 镜像,请在 **docker/.env** 文件中找到 `RAGFLOW_IMAGE` 变量,将其改为对应版本。例如 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1-slim`,然后再运行上述命令。
|
||||
> - 如果您想安装内置 embedding 模型和 Python 库的 dev 版本的 Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:dev`。
|
||||
> - 如果您想安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像,需要将 **docker/.env** 文件中的 `RAGFLOW_IMAGE` 变量修改为: `RAGFLOW_IMAGE=infiniflow/ragflow:v0.14.1`。修改后,再运行上面的命令。
|
||||
> **注意:** 安装内置 embedding 模型和 Python 库的指定版本的 RAGFlow Docker 镜像大小约 9 GB,可能需要更长时间下载,请耐心等待。
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
> [!TIP]
|
||||
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
||||
>
|
||||
> - 华为云镜像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||
> - 阿里云镜像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||
|
||||
4. 服务器启动成功后再次确认服务器状态:
|
||||
|
||||
```bash
|
||||
@ -163,22 +180,20 @@
|
||||
_出现以下界面提示说明服务器启动成功:_
|
||||
|
||||
```bash
|
||||
____ ___ ______ ______ __
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||
|
||||
> 如果您在没有看到上面的提示信息出来之前,就尝试登录 RAGFlow,你的浏览器有可能会提示 `network anormal` 或 `网络异常`。
|
||||
|
||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
6. 在 [service_conf.yaml](./docker/service_conf.yaml) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||
|
||||
> 详见 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||
|
||||
@ -189,14 +204,14 @@
|
||||
系统配置涉及以下三份文件:
|
||||
|
||||
- [.env](./docker/.env):存放一些基本的系统环境变量,比如 `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` 等。
|
||||
- [service_conf.yaml](./docker/service_conf.yaml):配置各类后台服务。
|
||||
- [service_conf.yaml.template](./docker/service_conf.yaml.template):配置各类后台服务。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 系统依赖该文件完成启动。
|
||||
|
||||
请务必确保 [.env](./docker/.env) 文件中的变量设置与 [service_conf.yaml](./docker/service_conf.yaml) 文件中的配置保持一致!
|
||||
请务必确保 [.env](./docker/.env) 文件中的变量设置与 [service_conf.yaml.template](./docker/service_conf.yaml.template) 文件中的配置保持一致!
|
||||
|
||||
如果不能访问镜像站点hub.docker.com或者模型站点huggingface.co,请按照[.env](./docker/.env)注释修改`RAGFLOW_IMAGE`和`HF_ENDPOINT`。
|
||||
如果不能访问镜像站点 hub.docker.com 或者模型站点 huggingface.co,请按照 [.env](./docker/.env) 注释修改 `RAGFLOW_IMAGE` 和 `HF_ENDPOINT`。
|
||||
|
||||
> [./docker/README](./docker/README.md) 文件提供了环境变量设置和服务配置的详细信息。请**一定要**确保 [./docker/README](./docker/README.md) 文件当中列出来的环境变量的值与 [service_conf.yaml](./docker/service_conf.yaml) 文件当中的系统配置保持一致。
|
||||
> [./docker/README](./docker/README.md) 解释了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的环境变量设置和服务配置。
|
||||
|
||||
如需更新默认的 HTTP 服务端口(80), 可以在 [docker-compose.yml](./docker/docker-compose.yml) 文件中将配置 `80:80` 改为 `<YOUR_SERVING_PORT>:80`。
|
||||
|
||||
@ -215,29 +230,27 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml down -v
|
||||
```
|
||||
Note: `-v` 将会删除 docker 容器的 volumes,已有的数据会被清空。
|
||||
|
||||
2. 设置 **docker/.env** 目录中的 `DOC_ENGINE` 为 `infinity`.
|
||||
|
||||
3. 启动容器:
|
||||
|
||||
```bash
|
||||
$ docker compose -f docker/docker-compose.yml up -d
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> [!WARNING]
|
||||
> Infinity 目前官方并未正式支持在 Linux/arm64 架构下的机器上运行.
|
||||
|
||||
|
||||
## 🔧 源码编译 Docker 镜像(不含 embedding 模型)
|
||||
|
||||
本 Docker 镜像大小约 1 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||
本 Docker 镜像大小约 2 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
||||
@ -247,61 +260,64 @@ docker build -f Dockerfile.slim -t infiniflow/ragflow:dev-slim .
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
pip3 install huggingface-hub nltk
|
||||
python3 download_deps.py
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 以源代码启动服务
|
||||
|
||||
1. 安装 Poetry。如已经安装,可跳过本步骤:
|
||||
1. 安装 uv。如已经安装,可跳过本步骤:
|
||||
|
||||
```bash
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
pipx install uv
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
2. 下载源代码并安装 Python 依赖:
|
||||
2. 下载源代码并安装 Python 依赖:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
export POETRY_VIRTUALENVS_CREATE=true POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
~/.local/bin/poetry install --sync --no-root # install RAGFlow dependent python modules
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
```
|
||||
|
||||
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
|
||||
```bash
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
在 `/etc/hosts` 中添加以下代码,将 **docker/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
在 `/etc/hosts` 中添加以下代码,将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
```
|
||||
在文件 **docker/service_conf.yaml** 中,对照 **docker/.env** 的配置将 mysql 端口更新为 `5455`,es 端口更新为 `1200`。
|
||||
```
|
||||
|
||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||
|
||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 启动后端服务:
|
||||
5. 启动后端服务:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 安装前端依赖:
|
||||
6. 安装前端依赖:
|
||||
```bash
|
||||
cd web
|
||||
npm install --force
|
||||
```
|
||||
7. 启动前端服务:
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
npm install
|
||||
```
|
||||
7. 启动前端服务:
|
||||
|
||||
_以下界面说明系统已经成功启动:_
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
_以下界面说明系统已经成功启动:_
|
||||
|
||||

|
||||
|
||||
@ -314,7 +330,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:dev .
|
||||
|
||||
## 📜 路线图
|
||||
|
||||
详见 [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162) 。
|
||||
详见 [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214) 。
|
||||
|
||||
## 🏄 开源社区
|
||||
|
||||
@ -337,4 +353,3 @@ RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们
|
||||
<p align="center">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/bccf284f-46f2-4445-9809-8f1030fb7585" width=50% height=50%>
|
||||
</p>
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ It is used to compose a complex work flow or agent.
|
||||
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
||||
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
||||
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
||||
Otherwise, correct configurations in conf/service_conf.yaml is essential.
|
||||
Otherwise, correct configurations in service_conf.yaml is essential.
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
||||
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
||||
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
||||
否则,正确配置 conf/service_conf.yaml 文件是必不可少的。
|
||||
否则,正确配置 service_conf.yaml 文件是必不可少的。
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
|
||||
@ -0,0 +1,18 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
|
||||
119
agent/canvas.py
119
agent/canvas.py
@ -15,13 +15,16 @@
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
from abc import ABC
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent.component import component_class
|
||||
from agent.component.base import ComponentBase
|
||||
|
||||
class Canvas(ABC):
|
||||
|
||||
class Canvas:
|
||||
"""
|
||||
dsl = {
|
||||
"components": {
|
||||
@ -82,7 +85,8 @@ class Canvas(ABC):
|
||||
}
|
||||
},
|
||||
"downstream": [],
|
||||
"upstream": []
|
||||
"upstream": [],
|
||||
"parent_id": ""
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
@ -133,7 +137,8 @@ class Canvas(ABC):
|
||||
"components": {}
|
||||
}
|
||||
for k in self.dsl.keys():
|
||||
if k in ["components"]:continue
|
||||
if k in ["components"]:
|
||||
continue
|
||||
dsl[k] = deepcopy(self.dsl[k])
|
||||
|
||||
for k, cpn in self.components.items():
|
||||
@ -156,9 +161,10 @@ class Canvas(ABC):
|
||||
self.components[k]["obj"].reset()
|
||||
self._embed_id = ""
|
||||
|
||||
def get_compnent_name(self, cid):
|
||||
def get_component_name(self, cid):
|
||||
for n in self.dsl["graph"]["nodes"]:
|
||||
if cid == n["id"]: return n["data"]["name"]
|
||||
if cid == n["id"]:
|
||||
return n["data"]["name"]
|
||||
return ""
|
||||
|
||||
def run(self, **kwargs):
|
||||
@ -173,7 +179,8 @@ class Canvas(ABC):
|
||||
if kwargs.get("stream"):
|
||||
for an in ans():
|
||||
yield an
|
||||
else: yield ans
|
||||
else:
|
||||
yield ans
|
||||
return
|
||||
|
||||
if not self.path:
|
||||
@ -181,6 +188,7 @@ class Canvas(ABC):
|
||||
self.path.append(["begin"])
|
||||
|
||||
self.path.append([])
|
||||
|
||||
ran = -1
|
||||
waiting = []
|
||||
without_dependent_checking = []
|
||||
@ -188,7 +196,8 @@ class Canvas(ABC):
|
||||
def prepare2run(cpns):
|
||||
nonlocal ran, ans
|
||||
for c in cpns:
|
||||
if self.path[-1] and c == self.path[-1][-1]: continue
|
||||
if self.path[-1] and c == self.path[-1][-1]:
|
||||
continue
|
||||
cpn = self.components[c]["obj"]
|
||||
if cpn.component_name == "Answer":
|
||||
self.answer.append(c)
|
||||
@ -197,49 +206,77 @@ class Canvas(ABC):
|
||||
if c not in without_dependent_checking:
|
||||
cpids = cpn.get_dependent_components()
|
||||
if any([cc not in self.path[-1] for cc in cpids]):
|
||||
if c not in waiting: waiting.append(c)
|
||||
if c not in waiting:
|
||||
waiting.append(c)
|
||||
continue
|
||||
yield "*'{}'* is running...🕞".format(self.get_compnent_name(c))
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
yield "*'{}'* is running...🕞".format(self.get_component_name(c))
|
||||
|
||||
if cpn.component_name.lower() == "iteration":
|
||||
st_cpn = cpn.get_start()
|
||||
assert st_cpn, "Start component not found for Iteration."
|
||||
if not st_cpn["obj"].end():
|
||||
cpn = st_cpn["obj"]
|
||||
c = cpn._id
|
||||
|
||||
try:
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
except Exception as e:
|
||||
logging.exception(f"Canvas.run got exception: {e}")
|
||||
self.path[-1].append(c)
|
||||
ran += 1
|
||||
raise e
|
||||
self.path[-1].append(c)
|
||||
|
||||
ran += 1
|
||||
|
||||
for m in prepare2run(self.components[self.path[-2][-1]]["downstream"]):
|
||||
downstream = self.components[self.path[-2][-1]]["downstream"]
|
||||
if not downstream and self.components[self.path[-2][-1]].get("parent_id"):
|
||||
cid = self.path[-2][-1]
|
||||
pid = self.components[cid]["parent_id"]
|
||||
o, _ = self.components[cid]["obj"].output(allow_partial=False)
|
||||
oo, _ = self.components[pid]["obj"].output(allow_partial=False)
|
||||
self.components[pid]["obj"].set(pd.concat([oo, o], ignore_index=True))
|
||||
downstream = [pid]
|
||||
|
||||
for m in prepare2run(downstream):
|
||||
yield {"content": m, "running_status": True}
|
||||
|
||||
while 0 <= ran < len(self.path[-1]):
|
||||
logging.debug(f"Canvas.run: {ran} {self.path}")
|
||||
cpn_id = self.path[-1][ran]
|
||||
cpn = self.get_component(cpn_id)
|
||||
if not cpn["downstream"]: break
|
||||
if not any([cpn["downstream"], cpn.get("parent_id"), waiting]):
|
||||
break
|
||||
|
||||
loop = self._find_loop()
|
||||
if loop: raise OverflowError(f"Too much loops: {loop}")
|
||||
if loop:
|
||||
raise OverflowError(f"Too much loops: {loop}")
|
||||
|
||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||
assert switch_out in self.components, \
|
||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||
try:
|
||||
for m in prepare2run([switch_out]):
|
||||
yield {"content": m, "running_status": True}
|
||||
except Exception as e:
|
||||
yield {"content": "*Exception*: {}".format(e), "running_status": True}
|
||||
logging.exception("Canvas.run got exception")
|
||||
for m in prepare2run([switch_out]):
|
||||
yield {"content": m, "running_status": True}
|
||||
continue
|
||||
|
||||
try:
|
||||
for m in prepare2run(cpn["downstream"]):
|
||||
yield {"content": m, "running_status": True}
|
||||
except Exception as e:
|
||||
yield {"content": "*Exception*: {}".format(e), "running_status": True}
|
||||
logging.exception("Canvas.run got exception")
|
||||
downstream = cpn["downstream"]
|
||||
if not downstream and cpn.get("parent_id"):
|
||||
pid = cpn["parent_id"]
|
||||
_, o = cpn["obj"].output(allow_partial=False)
|
||||
_, oo = self.components[pid]["obj"].output(allow_partial=False)
|
||||
self.components[pid]["obj"].set_output(pd.concat([oo.dropna(axis=1), o.dropna(axis=1)], ignore_index=True))
|
||||
downstream = [pid]
|
||||
|
||||
for m in prepare2run(downstream):
|
||||
yield {"content": m, "running_status": True}
|
||||
|
||||
if ran >= len(self.path[-1]) and waiting:
|
||||
without_dependent_checking = waiting
|
||||
waiting = []
|
||||
for m in prepare2run(without_dependent_checking):
|
||||
yield {"content": m, "running_status": True}
|
||||
without_dependent_checking = []
|
||||
ran -= 1
|
||||
|
||||
if self.answer:
|
||||
@ -283,19 +320,22 @@ class Canvas(ABC):
|
||||
|
||||
def _find_loop(self, max_loops=6):
|
||||
path = self.path[-1][::-1]
|
||||
if len(path) < 2: return False
|
||||
if len(path) < 2:
|
||||
return False
|
||||
|
||||
for i in range(len(path)):
|
||||
if path[i].lower().find("answer") >= 0:
|
||||
if path[i].lower().find("answer") == 0 or path[i].lower().find("iterationitem") == 0:
|
||||
path = path[:i]
|
||||
break
|
||||
|
||||
if len(path) < 2: return False
|
||||
if len(path) < 2:
|
||||
return False
|
||||
|
||||
for l in range(2, len(path) // 2):
|
||||
pat = ",".join(path[0:l])
|
||||
for loc in range(2, len(path) // 2):
|
||||
pat = ",".join(path[0:loc])
|
||||
path_str = ",".join(path)
|
||||
if len(pat) >= len(path_str): return False
|
||||
if len(pat) >= len(path_str):
|
||||
return False
|
||||
loop = max_loops
|
||||
while path_str.find(pat) == 0 and loop >= 0:
|
||||
loop -= 1
|
||||
@ -303,10 +343,23 @@ class Canvas(ABC):
|
||||
return False
|
||||
path_str = path_str[len(pat)+1:]
|
||||
if loop < 0:
|
||||
pat = " => ".join([p.split(":")[0] for p in path[0:l]])
|
||||
pat = " => ".join([p.split(":")[0] for p in path[0:loc]])
|
||||
return pat + " => " + pat
|
||||
|
||||
return False
|
||||
|
||||
def get_prologue(self):
|
||||
return self.components["begin"]["obj"]._param.prologue
|
||||
|
||||
def set_global_param(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
for q in self.components["begin"]["obj"]._param.query:
|
||||
if k != q["key"]:
|
||||
continue
|
||||
q["value"] = v
|
||||
|
||||
def get_preset_param(self):
|
||||
return self.components["begin"]["obj"]._param.query
|
||||
|
||||
def get_component_input_elements(self, cpnnm):
|
||||
return self.components[cpnnm]["obj"].get_input_elements()
|
||||
@ -1,3 +1,19 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import importlib
|
||||
from .begin import Begin, BeginParam
|
||||
from .generate import Generate, GenerateParam
|
||||
@ -31,9 +47,87 @@ from .akshare import AkShare, AkShareParam
|
||||
from .crawler import Crawler, CrawlerParam
|
||||
from .invoke import Invoke, InvokeParam
|
||||
from .template import Template, TemplateParam
|
||||
from .email import Email, EmailParam
|
||||
from .iteration import Iteration, IterationParam
|
||||
from .iterationitem import IterationItem, IterationItemParam
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
m = importlib.import_module("agent.component")
|
||||
c = getattr(m, class_name)
|
||||
return c
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Begin",
|
||||
"BeginParam",
|
||||
"Generate",
|
||||
"GenerateParam",
|
||||
"Retrieval",
|
||||
"RetrievalParam",
|
||||
"Answer",
|
||||
"AnswerParam",
|
||||
"Categorize",
|
||||
"CategorizeParam",
|
||||
"Switch",
|
||||
"SwitchParam",
|
||||
"Relevant",
|
||||
"RelevantParam",
|
||||
"Message",
|
||||
"MessageParam",
|
||||
"RewriteQuestion",
|
||||
"RewriteQuestionParam",
|
||||
"KeywordExtract",
|
||||
"KeywordExtractParam",
|
||||
"Concentrator",
|
||||
"ConcentratorParam",
|
||||
"Baidu",
|
||||
"BaiduParam",
|
||||
"DuckDuckGo",
|
||||
"DuckDuckGoParam",
|
||||
"Wikipedia",
|
||||
"WikipediaParam",
|
||||
"PubMed",
|
||||
"PubMedParam",
|
||||
"ArXiv",
|
||||
"ArXivParam",
|
||||
"Google",
|
||||
"GoogleParam",
|
||||
"Bing",
|
||||
"BingParam",
|
||||
"GoogleScholar",
|
||||
"GoogleScholarParam",
|
||||
"DeepL",
|
||||
"DeepLParam",
|
||||
"GitHub",
|
||||
"GitHubParam",
|
||||
"BaiduFanyi",
|
||||
"BaiduFanyiParam",
|
||||
"QWeather",
|
||||
"QWeatherParam",
|
||||
"ExeSQL",
|
||||
"ExeSQLParam",
|
||||
"YahooFinance",
|
||||
"YahooFinanceParam",
|
||||
"WenCai",
|
||||
"WenCaiParam",
|
||||
"Jin10",
|
||||
"Jin10Param",
|
||||
"TuShare",
|
||||
"TuShareParam",
|
||||
"AkShare",
|
||||
"AkShareParam",
|
||||
"Crawler",
|
||||
"CrawlerParam",
|
||||
"Invoke",
|
||||
"InvokeParam",
|
||||
"Iteration",
|
||||
"IterationParam",
|
||||
"IterationItem",
|
||||
"IterationItemParam",
|
||||
"Template",
|
||||
"TemplateParam",
|
||||
"Email",
|
||||
"EmailParam",
|
||||
"component_class"
|
||||
]
|
||||
|
||||
@ -1,56 +1,56 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import akshare as ak
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class AkShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the AkShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class AkShare(ComponentBase, ABC):
|
||||
component_name = "AkShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return AkShare.be_output("")
|
||||
|
||||
try:
|
||||
ak_res = []
|
||||
stock_news_em_df = ak.stock_news_em(symbol=ans)
|
||||
stock_news_em_df = stock_news_em_df.head(self._param.top_n)
|
||||
ak_res = [{"content": '<a href="' + i["新闻链接"] + '">' + i["新闻标题"] + '</a>\n 新闻内容: ' + i[
|
||||
"新闻内容"] + " \n发布时间:" + i["发布时间"] + " \n文章来源: " + i["文章来源"]} for index, i in stock_news_em_df.iterrows()]
|
||||
except Exception as e:
|
||||
return AkShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not ak_res:
|
||||
return AkShare.be_output("")
|
||||
|
||||
return pd.DataFrame(ak_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class AkShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the AkShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class AkShare(ComponentBase, ABC):
|
||||
component_name = "AkShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
import akshare as ak
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return AkShare.be_output("")
|
||||
|
||||
try:
|
||||
ak_res = []
|
||||
stock_news_em_df = ak.stock_news_em(symbol=ans)
|
||||
stock_news_em_df = stock_news_em_df.head(self._param.top_n)
|
||||
ak_res = [{"content": '<a href="' + i["新闻链接"] + '">' + i["新闻标题"] + '</a>\n 新闻内容: ' + i[
|
||||
"新闻内容"] + " \n发布时间:" + i["发布时间"] + " \n文章来源: " + i["文章来源"]} for index, i in stock_news_em_df.iterrows()]
|
||||
except Exception as e:
|
||||
return AkShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not ak_res:
|
||||
return AkShare.be_output("")
|
||||
|
||||
return pd.DataFrame(ak_res)
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
from typing import Tuple, Union
|
||||
|
||||
import pandas as pd
|
||||
|
||||
@ -76,4 +77,13 @@ class Answer(ComponentBase, ABC):
|
||||
def set_exception(self, e):
|
||||
self.exception = e
|
||||
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
if allow_partial:
|
||||
return super.output()
|
||||
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
return self._param.output_var_name, pd.DataFrame([{"content": c}])
|
||||
|
||||
self._param.output_var_name, pd.DataFrame([])
|
||||
|
||||
|
||||
@ -44,7 +44,7 @@ class Baidu(ComponentBase, ABC):
|
||||
return Baidu.be_output("")
|
||||
|
||||
try:
|
||||
url = 'https://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
url = 'http://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||
response = requests.get(url=url, headers=headers)
|
||||
|
||||
@ -39,9 +39,6 @@ class BaiduFanyiParam(ComponentParamBase):
|
||||
self.check_empty(self.appid, "BaiduFanyi APPID")
|
||||
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
||||
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
||||
self.check_valid_value(self.trans_type, "Translate domain",
|
||||
['it', 'finance', 'machinery', 'senimed', 'novel', 'academic', 'aerospace', 'wiki',
|
||||
'news', 'law', 'contract'])
|
||||
self.check_valid_value(self.source_lang, "Source language",
|
||||
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||
@ -96,3 +93,4 @@ class BaiduFanyi(ComponentBase, ABC):
|
||||
|
||||
except Exception as e:
|
||||
BaiduFanyi.be_output("**Error**:" + str(e))
|
||||
|
||||
|
||||
@ -37,6 +37,7 @@ class ComponentParamBase(ABC):
|
||||
self.message_history_window_size = 22
|
||||
self.query = []
|
||||
self.inputs = []
|
||||
self.debug_inputs = []
|
||||
|
||||
def set_name(self, name: str):
|
||||
self._name = name
|
||||
@ -410,6 +411,7 @@ class ComponentBase(ABC):
|
||||
def run(self, history, **kwargs):
|
||||
logging.debug("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||
json.dumps(kwargs, ensure_ascii=False)))
|
||||
self._param.debug_inputs = []
|
||||
try:
|
||||
res = self._run(history, **kwargs)
|
||||
self.set_output(res)
|
||||
@ -424,9 +426,14 @@ class ComponentBase(ABC):
|
||||
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
o = getattr(self._param, self._param.output_var_name)
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
if not isinstance(o, list): o = [o]
|
||||
o = pd.DataFrame(o)
|
||||
if not isinstance(o, partial):
|
||||
if not isinstance(o, pd.DataFrame):
|
||||
if isinstance(o, list):
|
||||
return self._param.output_var_name, pd.DataFrame(o)
|
||||
if o is None:
|
||||
return self._param.output_var_name, pd.DataFrame()
|
||||
return self._param.output_var_name, pd.DataFrame([{"content": str(o)}])
|
||||
return self._param.output_var_name, o
|
||||
|
||||
if allow_partial or not isinstance(o, partial):
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
@ -437,17 +444,21 @@ class ComponentBase(ABC):
|
||||
for oo in o():
|
||||
if not isinstance(oo, pd.DataFrame):
|
||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
||||
else: outs = oo
|
||||
else:
|
||||
outs = oo
|
||||
return self._param.output_var_name, outs
|
||||
|
||||
def reset(self):
|
||||
setattr(self._param, self._param.output_var_name, None)
|
||||
self._param.inputs = []
|
||||
|
||||
def set_output(self, v: partial | pd.DataFrame):
|
||||
def set_output(self, v):
|
||||
setattr(self._param, self._param.output_var_name, v)
|
||||
|
||||
def get_input(self):
|
||||
if self._param.debug_inputs:
|
||||
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs if v.get("value")])
|
||||
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
@ -457,7 +468,7 @@ class ComponentBase(ABC):
|
||||
self._param.inputs = []
|
||||
outs = []
|
||||
for q in self._param.query:
|
||||
if q["component_id"]:
|
||||
if q.get("component_id"):
|
||||
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = q["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
@ -470,22 +481,33 @@ class ComponentBase(ABC):
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
if q["component_id"].lower().find("answer") == 0:
|
||||
txt = []
|
||||
for r, c in self._canvas.history[::-1][:self._param.message_history_window_size][::-1]:
|
||||
txt.append(f"{r.upper()}: {c}")
|
||||
txt = "\n".join(txt)
|
||||
self._param.inputs.append({"content": txt, "component_id": q["component_id"]})
|
||||
outs.append(pd.DataFrame([{"content": txt}]))
|
||||
continue
|
||||
|
||||
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": "\n".join(
|
||||
[str(d["content"]) for d in outs[-1].to_dict('records')])})
|
||||
elif q["value"]:
|
||||
elif q.get("value"):
|
||||
self._param.inputs.append({"component_id": None, "content": q["value"]})
|
||||
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||
if outs:
|
||||
df = pd.concat(outs, ignore_index=True)
|
||||
if "content" in df: df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
return df
|
||||
|
||||
upstream_outs = []
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "concentrator"]: continue
|
||||
if self.get_component_name(u) in ["switch", "concentrator"]:
|
||||
continue
|
||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||
if o is not None:
|
||||
@ -522,6 +544,22 @@ class ComponentBase(ABC):
|
||||
|
||||
return df
|
||||
|
||||
def get_input_elements(self):
|
||||
assert self._param.query, "Please identify input parameters firstly."
|
||||
eles = []
|
||||
for q in self._param.query:
|
||||
if q.get("component_id"):
|
||||
cpn_id = q["component_id"]
|
||||
if cpn_id.split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||
continue
|
||||
|
||||
eles.append({"name": self._canvas.get_component_name(cpn_id), "key": cpn_id})
|
||||
else:
|
||||
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||
return eles
|
||||
|
||||
def get_stream_input(self):
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
@ -529,7 +567,8 @@ class ComponentBase(ABC):
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "answer"]: continue
|
||||
if self.get_component_name(u) in ["switch", "answer"]:
|
||||
continue
|
||||
return self._canvas.get_component(u)["obj"].output()[1]
|
||||
|
||||
@staticmethod
|
||||
@ -538,3 +577,10 @@ class ComponentBase(ABC):
|
||||
|
||||
def get_component_name(self, cpn_id):
|
||||
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
|
||||
def get_parent(self):
|
||||
pid = self._canvas.get_component(self._id)["parent_id"]
|
||||
return self._canvas.get_component(pid)["obj"]
|
||||
|
||||
@ -26,6 +26,7 @@ class BeginParam(ComponentParamBase):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
||||
self.query = []
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
@ -42,7 +43,7 @@ class Begin(ComponentBase):
|
||||
def stream_output(self):
|
||||
res = {"content": self._param.prologue}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
self.set_output(self.be_output(res))
|
||||
|
||||
|
||||
|
||||
|
||||
@ -34,15 +34,18 @@ class CategorizeParam(GenerateParam):
|
||||
super().check()
|
||||
self.check_empty(self.category_description, "[Categorize] Category examples")
|
||||
for k, v in self.category_description.items():
|
||||
if not k: raise ValueError("[Categorize] Category name can not be empty!")
|
||||
if not v.get("to"): raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
if not k:
|
||||
raise ValueError("[Categorize] Category name can not be empty!")
|
||||
if not v.get("to"):
|
||||
raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
|
||||
def get_prompt(self):
|
||||
def get_prompt(self, chat_hist):
|
||||
cate_lines = []
|
||||
for c, desc in self.category_description.items():
|
||||
for l in desc.get("examples", "").split("\n"):
|
||||
if not l: continue
|
||||
cate_lines.append("Question: {}\tCategory: {}".format(l, c))
|
||||
for line in desc.get("examples", "").split("\n"):
|
||||
if not line:
|
||||
continue
|
||||
cate_lines.append("USER: {}\nCategory: {}".format(line, c))
|
||||
descriptions = []
|
||||
for c, desc in self.category_description.items():
|
||||
if desc.get("description"):
|
||||
@ -59,11 +62,15 @@ class CategorizeParam(GenerateParam):
|
||||
{}
|
||||
You could learn from the above examples.
|
||||
Just mention the category names, no need for any additional words.
|
||||
|
||||
---- Real Data ----
|
||||
{}
|
||||
""".format(
|
||||
len(self.category_description.keys()),
|
||||
"/".join(list(self.category_description.keys())),
|
||||
"\n".join(descriptions),
|
||||
"- ".join(cate_lines)
|
||||
"- ".join(cate_lines),
|
||||
chat_hist
|
||||
)
|
||||
return self.prompt
|
||||
|
||||
@ -73,9 +80,9 @@ class Categorize(Generate, ABC):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = self.get_input()
|
||||
input = "Question: " + (list(input["content"])[-1] if "content" in input else "") + "\tCategory: "
|
||||
input = " - ".join(input["content"]) if "content" in input else ""
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
||||
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
||||
self._param.gen_conf())
|
||||
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||
for c in self._param.category_description.keys():
|
||||
@ -84,4 +91,8 @@ class Categorize(Generate, ABC):
|
||||
|
||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||
|
||||
def debug(self, **kwargs):
|
||||
df = self._run([], **kwargs)
|
||||
cpn_id = df.iloc[0, 0]
|
||||
return Categorize.be_output(self._canvas.get_component_name(cpn_id))
|
||||
|
||||
|
||||
@ -1,36 +1,36 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class ConcentratorParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Concentrator component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Concentrator(ComponentBase, ABC):
|
||||
component_name = "Concentrator"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class ConcentratorParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Concentrator component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Concentrator(ComponentBase, ABC):
|
||||
component_name = "Concentrator"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
return Concentrator.be_output("")
|
||||
@ -41,7 +41,7 @@ class Crawler(ComponentBase, ABC):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not is_valid_url(ans):
|
||||
return Crawler.be_output("")
|
||||
return Crawler.be_output("URL not valid")
|
||||
try:
|
||||
result = asyncio.run(self.get_web(ans))
|
||||
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import deepl
|
||||
|
||||
|
||||
138
agent/component/email.py
Normal file
138
agent/component/email.py
Normal file
@ -0,0 +1,138 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from abc import ABC
|
||||
import json
|
||||
import smtplib
|
||||
import logging
|
||||
from email.mime.text import MIMEText
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.header import Header
|
||||
from email.utils import formataddr
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
class EmailParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Email component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Fixed configuration parameters
|
||||
self.smtp_server = "" # SMTP server address
|
||||
self.smtp_port = 465 # SMTP port
|
||||
self.email = "" # Sender email
|
||||
self.password = "" # Email authorization code
|
||||
self.sender_name = "" # Sender name
|
||||
|
||||
def check(self):
|
||||
# Check required parameters
|
||||
self.check_empty(self.smtp_server, "SMTP Server")
|
||||
self.check_empty(self.email, "Email")
|
||||
self.check_empty(self.password, "Password")
|
||||
self.check_empty(self.sender_name, "Sender Name")
|
||||
|
||||
class Email(ComponentBase, ABC):
|
||||
component_name = "Email"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
# Get upstream component output and parse JSON
|
||||
ans = self.get_input()
|
||||
content = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not content:
|
||||
return Email.be_output("No content to send")
|
||||
|
||||
success = False
|
||||
try:
|
||||
# Parse JSON string passed from upstream
|
||||
email_data = json.loads(content)
|
||||
|
||||
# Validate required fields
|
||||
if "to_email" not in email_data:
|
||||
return Email.be_output("Missing required field: to_email")
|
||||
|
||||
# Create email object
|
||||
msg = MIMEMultipart('alternative')
|
||||
|
||||
# Properly handle sender name encoding
|
||||
msg['From'] = formataddr((str(Header(self._param.sender_name,'utf-8')), self._param.email))
|
||||
msg['To'] = email_data["to_email"]
|
||||
if "cc_email" in email_data and email_data["cc_email"]:
|
||||
msg['Cc'] = email_data["cc_email"]
|
||||
msg['Subject'] = Header(email_data.get("subject", "No Subject"), 'utf-8').encode()
|
||||
|
||||
# Use content from email_data or default content
|
||||
email_content = email_data.get("content", "No content provided")
|
||||
# msg.attach(MIMEText(email_content, 'plain', 'utf-8'))
|
||||
msg.attach(MIMEText(email_content, 'html', 'utf-8'))
|
||||
|
||||
# Connect to SMTP server and send
|
||||
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
||||
|
||||
context = smtplib.ssl.create_default_context()
|
||||
with smtplib.SMTP_SSL(self._param.smtp_server, self._param.smtp_port, context=context) as server:
|
||||
# Login
|
||||
logging.info(f"Attempting to login with email: {self._param.email}")
|
||||
server.login(self._param.email, self._param.password)
|
||||
|
||||
# Get all recipient list
|
||||
recipients = [email_data["to_email"]]
|
||||
if "cc_email" in email_data and email_data["cc_email"]:
|
||||
recipients.extend(email_data["cc_email"].split(','))
|
||||
|
||||
# Send email
|
||||
logging.info(f"Sending email to recipients: {recipients}")
|
||||
try:
|
||||
server.send_message(msg, self._param.email, recipients)
|
||||
success = True
|
||||
except Exception as e:
|
||||
logging.error(f"Error during send_message: {str(e)}")
|
||||
# Try alternative method
|
||||
server.sendmail(self._param.email, recipients, msg.as_string())
|
||||
success = True
|
||||
|
||||
try:
|
||||
server.quit()
|
||||
except Exception as e:
|
||||
# Ignore errors when closing connection
|
||||
logging.warning(f"Non-fatal error during connection close: {str(e)}")
|
||||
|
||||
if success:
|
||||
return Email.be_output("Email sent successfully")
|
||||
|
||||
except json.JSONDecodeError:
|
||||
error_msg = "Invalid JSON format in input"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(error_msg)
|
||||
|
||||
except smtplib.SMTPAuthenticationError:
|
||||
error_msg = "SMTP Authentication failed. Please check your email and authorization code."
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except smtplib.SMTPConnectError:
|
||||
error_msg = f"Failed to connect to SMTP server {self._param.smtp_server}:{self._param.smtp_port}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except smtplib.SMTPException as e:
|
||||
error_msg = f"SMTP error occurred: {str(e)}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error: {str(e)}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
@ -15,13 +15,17 @@
|
||||
#
|
||||
from abc import ABC
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
import pandas as pd
|
||||
import pymysql
|
||||
import psycopg2
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from agent.component import GenerateParam, Generate
|
||||
import pyodbc
|
||||
import logging
|
||||
|
||||
|
||||
class ExeSQLParam(ComponentParamBase):
|
||||
class ExeSQLParam(GenerateParam):
|
||||
"""
|
||||
Define the ExeSQL component parameters.
|
||||
"""
|
||||
@ -38,7 +42,8 @@ class ExeSQLParam(ComponentParamBase):
|
||||
self.top_n = 30
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb'])
|
||||
super().check()
|
||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb', 'mssql'])
|
||||
self.check_empty(self.database, "Database name")
|
||||
self.check_empty(self.username, "database username")
|
||||
self.check_empty(self.host, "IP Address")
|
||||
@ -46,58 +51,104 @@ class ExeSQLParam(ComponentParamBase):
|
||||
self.check_empty(self.password, "Database password")
|
||||
self.check_positive_integer(self.top_n, "Number of records")
|
||||
if self.database == "rag_flow":
|
||||
if self.host == "ragflow-mysql": raise ValueError("The host is not accessible.")
|
||||
if self.password == "infini_rag_flow": raise ValueError("The host is not accessible.")
|
||||
if self.host == "ragflow-mysql":
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
if self.password == "infini_rag_flow":
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
|
||||
|
||||
class ExeSQL(ComponentBase, ABC):
|
||||
class ExeSQL(Generate, ABC):
|
||||
component_name = "ExeSQL"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
if self._loop >= self._param.loop:
|
||||
self._loop = 0
|
||||
raise Exception("Maximum loop time exceeds. Can't query the correct data via SQL statement.")
|
||||
self._loop += 1
|
||||
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
||||
def _refactor(self, ans):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||
if match:
|
||||
ans = match.group(1) # Query content
|
||||
return ans
|
||||
else:
|
||||
print("no markdown")
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', (ans), flags=re.IGNORECASE)
|
||||
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
||||
ans = re.sub(r';[^;]*$', r';', ans)
|
||||
if not ans:
|
||||
raise Exception("SQL statement not found!")
|
||||
return ans
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||
ans = self._refactor(ans)
|
||||
if self._param.db_type in ["mysql", "mariadb"]:
|
||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
elif self._param.db_type == 'postgresql':
|
||||
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
|
||||
elif self._param.db_type == 'mssql':
|
||||
conn_str = (
|
||||
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
||||
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
||||
r'DATABASE=' + self._param.database + ';'
|
||||
r'UID=' + self._param.username + ';'
|
||||
r'PWD=' + self._param.password
|
||||
)
|
||||
db = pyodbc.connect(conn_str)
|
||||
try:
|
||||
cursor = db.cursor()
|
||||
except Exception as e:
|
||||
raise Exception("Database Connection Failed! \n" + str(e))
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
self._loop += 1
|
||||
input_list = re.split(r';', ans.replace(r"\n", " "))
|
||||
sql_res = []
|
||||
for single_sql in re.split(r';', ans.replace(r"\n", " ")):
|
||||
if not single_sql:
|
||||
continue
|
||||
try:
|
||||
cursor.execute(single_sql)
|
||||
if cursor.rowcount == 0:
|
||||
sql_res.append({"content": "\nTotal: 0\n No record in the database!"})
|
||||
continue
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(size=self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in cursor.description]
|
||||
sql_res.append({"content": "\nTotal: " + str(cursor.rowcount) + "\n" + single_res.to_markdown()})
|
||||
except Exception as e:
|
||||
sql_res.append({"content": "**Error**:" + str(e) + "\nError SQL Statement:" + single_sql})
|
||||
pass
|
||||
for i in range(len(input_list)):
|
||||
single_sql = input_list[i]
|
||||
while self._loop <= self._param.loop:
|
||||
self._loop += 1
|
||||
if not single_sql:
|
||||
break
|
||||
try:
|
||||
cursor.execute(single_sql)
|
||||
if cursor.rowcount == 0:
|
||||
sql_res.append({"content": "No record in the database!"})
|
||||
break
|
||||
if self._param.db_type == 'mssql':
|
||||
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),
|
||||
columns=[desc[0] for desc in cursor.description])
|
||||
else:
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in cursor.description]
|
||||
sql_res.append({"content": single_res.to_markdown(index=False, floatfmt=".6f")})
|
||||
break
|
||||
except Exception as e:
|
||||
single_sql = self._regenerate_sql(single_sql, str(e), **kwargs)
|
||||
single_sql = self._refactor(single_sql)
|
||||
if self._loop > self._param.loop:
|
||||
sql_res.append({"content": "Can't query the correct data via SQL statement."})
|
||||
db.close()
|
||||
|
||||
if not sql_res:
|
||||
return ExeSQL.be_output("")
|
||||
|
||||
return pd.DataFrame(sql_res)
|
||||
|
||||
def _regenerate_sql(self, failed_sql, error_message, **kwargs):
|
||||
prompt = f'''
|
||||
## You are the Repair SQL Statement Helper, please modify the original SQL statement based on the SQL query error report.
|
||||
## The original SQL statement is as follows:{failed_sql}.
|
||||
## The contents of the SQL query error report is as follows:{error_message}.
|
||||
## Answer only the modified SQL statement. Please do not give any explanation, just answer the code.
|
||||
'''
|
||||
self._param.prompt = prompt
|
||||
kwargs_ = deepcopy(kwargs)
|
||||
kwargs_["stream"] = False
|
||||
response = Generate._run(self, [], **kwargs_)
|
||||
try:
|
||||
regenerated_sql = response.loc[0, "content"]
|
||||
return regenerated_sql
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to regenerate SQL: {e}")
|
||||
return None
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
|
||||
@ -17,10 +17,11 @@ import re
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import message_fit_in
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.prompts import message_fit_in
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
@ -51,11 +52,16 @@ class GenerateParam(ComponentParamBase):
|
||||
|
||||
def gen_conf(self):
|
||||
conf = {}
|
||||
if self.max_tokens > 0: conf["max_tokens"] = self.max_tokens
|
||||
if self.temperature > 0: conf["temperature"] = self.temperature
|
||||
if self.top_p > 0: conf["top_p"] = self.top_p
|
||||
if self.presence_penalty > 0: conf["presence_penalty"] = self.presence_penalty
|
||||
if self.frequency_penalty > 0: conf["frequency_penalty"] = self.frequency_penalty
|
||||
if self.max_tokens > 0:
|
||||
conf["max_tokens"] = self.max_tokens
|
||||
if self.temperature > 0:
|
||||
conf["temperature"] = self.temperature
|
||||
if self.top_p > 0:
|
||||
conf["top_p"] = self.top_p
|
||||
if self.presence_penalty > 0:
|
||||
conf["presence_penalty"] = self.presence_penalty
|
||||
if self.frequency_penalty > 0:
|
||||
conf["frequency_penalty"] = self.frequency_penalty
|
||||
return conf
|
||||
|
||||
|
||||
@ -63,10 +69,8 @@ class Generate(ComponentBase):
|
||||
component_name = "Generate"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs[1:] if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def set_cite(self, retrieval_res, answer):
|
||||
@ -83,7 +87,8 @@ class Generate(ComponentBase):
|
||||
recall_docs = []
|
||||
for i in idx:
|
||||
did = retrieval_res.loc[int(i), "doc_id"]
|
||||
if did in doc_ids: continue
|
||||
if did in doc_ids:
|
||||
continue
|
||||
doc_ids.add(did)
|
||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
||||
|
||||
@ -96,32 +101,54 @@ class Generate(ComponentBase):
|
||||
}
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||
res = {"content": answer, "reference": reference}
|
||||
res = structure_answer(None, res, "", "")
|
||||
|
||||
return res
|
||||
|
||||
def get_input_elements(self):
|
||||
key_set = set([])
|
||||
res = [{"key": "user", "name": "Input your question here:"}]
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.prompt, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
|
||||
retrieval_res = []
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"): continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for para in self.get_input_elements()[1:]:
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
{"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
@ -137,12 +164,13 @@ class Generate(ComponentBase):
|
||||
else:
|
||||
if cpn.component_name.lower() == "retrieval":
|
||||
retrieval_res.append(out)
|
||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
kwargs[para["key"]] = " - " + "\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
|
||||
if retrieval_res:
|
||||
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||
else: retrieval_res = pd.DataFrame([])
|
||||
else:
|
||||
retrieval_res = pd.DataFrame([])
|
||||
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
@ -159,15 +187,18 @@ class Generate(ComponentBase):
|
||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
return pd.DataFrame([res])
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1: msg.append({"role": "user", "content": ""})
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2: msg.append({"role": "user", "content": ""})
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, ans)
|
||||
@ -178,16 +209,20 @@ class Generate(ComponentBase):
|
||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||
res = None
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1: msg.append({"role": "user", "content": ""})
|
||||
if msg and msg[0]['role'] == 'assistant':
|
||||
msg.pop(0)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2: msg.append({"role": "user", "content": ""})
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
@ -198,4 +233,18 @@ class Generate(ComponentBase):
|
||||
res = self.set_cite(retrieval_res, answer)
|
||||
yield res
|
||||
|
||||
self.set_output(res)
|
||||
self.set_output(Generate.be_output(res))
|
||||
|
||||
def debug(self, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
|
||||
for para in self._param.debug_inputs:
|
||||
kwargs[para["key"]] = para.get("value", "")
|
||||
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
|
||||
u = kwargs.get("user")
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": u if u else "Output: "}], self._param.gen_conf())
|
||||
return pd.DataFrame([ans])
|
||||
|
||||
@ -35,12 +35,14 @@ class InvokeParam(ComponentParamBase):
|
||||
self.url = ""
|
||||
self.timeout = 60
|
||||
self.clean_html = False
|
||||
self.datatype = "json" # New parameter to determine data posting type
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
||||
self.check_empty(self.url, "End point URL")
|
||||
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||
self.check_boolean(self.clean_html, "Clean HTML")
|
||||
self.check_valid_value(self.datatype.lower(), "Data post type", ['json', 'formdata']) # Check for valid datapost value
|
||||
|
||||
|
||||
class Invoke(ComponentBase, ABC):
|
||||
@ -50,14 +52,24 @@ class Invoke(ComponentBase, ABC):
|
||||
args = {}
|
||||
for para in self._param.variables:
|
||||
if para.get("component_id"):
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
args[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
args[para["key"]] = "\n".join(out["content"])
|
||||
if '@' in para["component_id"]:
|
||||
component = para["component_id"].split('@')[0]
|
||||
field = para["component_id"].split('@')[1]
|
||||
cpn = self._canvas.get_component(component)["obj"]
|
||||
for param in cpn._param.query:
|
||||
if param["key"] == field:
|
||||
if "value" in param:
|
||||
args[para["key"]] = param["value"]
|
||||
else:
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
args[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if not out.empty:
|
||||
args[para["key"]] = "\n".join(out["content"])
|
||||
else:
|
||||
args[para["key"]] = "\n".join(para["value"])
|
||||
args[para["key"]] = para["value"]
|
||||
|
||||
url = self._param.url.strip()
|
||||
if url.find("http") != 0:
|
||||
@ -84,22 +96,36 @@ class Invoke(ComponentBase, ABC):
|
||||
return Invoke.be_output(response.text)
|
||||
|
||||
if method == 'put':
|
||||
response = requests.put(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.put(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.put(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
return Invoke.be_output(response.text)
|
||||
|
||||
if method == 'post':
|
||||
response = requests.post(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.post(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.post(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
|
||||
45
agent/component/iteration.py
Normal file
45
agent/component/iteration.py
Normal file
@ -0,0 +1,45 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class IterationParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Iteration component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.delimiter = ","
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.delimiter, "Delimiter")
|
||||
|
||||
|
||||
class Iteration(ComponentBase, ABC):
|
||||
component_name = "Iteration"
|
||||
|
||||
def get_start(self):
|
||||
for cid in self._canvas.components.keys():
|
||||
if self._canvas.get_component(cid)["obj"].component_name.lower() != "iterationitem":
|
||||
continue
|
||||
if self._canvas.get_component(cid)["parent_id"] == self._id:
|
||||
return self._canvas.get_component(cid)
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
return self.output(allow_partial=False)[1]
|
||||
|
||||
49
agent/component/iterationitem.py
Normal file
49
agent/component/iterationitem.py
Normal file
@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class IterationItemParam(ComponentParamBase):
|
||||
"""
|
||||
Define the IterationItem component parameters.
|
||||
"""
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class IterationItem(ComponentBase, ABC):
|
||||
component_name = "IterationItem"
|
||||
|
||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||
super().__init__(canvas, id, param)
|
||||
self._idx = 0
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
parent = self.get_parent()
|
||||
ans = parent.get_input()
|
||||
ans = parent._param.delimiter.join(ans["content"]) if "content" in ans else ""
|
||||
ans = [a.strip() for a in ans.split(parent._param.delimiter)]
|
||||
df = pd.DataFrame([{"content": ans[self._idx]}])
|
||||
self._idx += 1
|
||||
if self._idx >= len(ans):
|
||||
self._idx = -1
|
||||
return df
|
||||
|
||||
def end(self):
|
||||
return self._idx == -1
|
||||
|
||||
@ -1,130 +1,130 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class Jin10Param(ComponentParamBase):
|
||||
"""
|
||||
Define the Jin10 component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.type = "flash"
|
||||
self.secret_key = "xxx"
|
||||
self.flash_type = '1'
|
||||
self.calendar_type = 'cj'
|
||||
self.calendar_datatype = 'data'
|
||||
self.symbols_type = 'GOODS'
|
||||
self.symbols_datatype = 'symbols'
|
||||
self.contain = ""
|
||||
self.filter = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.type, "Type", ['flash', 'calendar', 'symbols', 'news'])
|
||||
self.check_valid_value(self.flash_type, "Flash Type", ['1', '2', '3', '4', '5'])
|
||||
self.check_valid_value(self.calendar_type, "Calendar Type", ['cj', 'qh', 'hk', 'us'])
|
||||
self.check_valid_value(self.calendar_datatype, "Calendar DataType", ['data', 'event', 'holiday'])
|
||||
self.check_valid_value(self.symbols_type, "Symbols Type", ['GOODS', 'FOREX', 'FUTURE', 'CRYPTO'])
|
||||
self.check_valid_value(self.symbols_datatype, 'Symbols DataType', ['symbols', 'quotes'])
|
||||
|
||||
|
||||
class Jin10(ComponentBase, ABC):
|
||||
component_name = "Jin10"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Jin10.be_output("")
|
||||
|
||||
jin10_res = []
|
||||
headers = {'secret-key': self._param.secret_key}
|
||||
try:
|
||||
if self._param.type == "flash":
|
||||
params = {
|
||||
'category': self._param.flash_type,
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/flash?category=' + self._param.flash_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
for i in response['data']:
|
||||
jin10_res.append({"content": i['data']['content']})
|
||||
if self._param.type == "calendar":
|
||||
params = {
|
||||
'category': self._param.calendar_type
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/calendar/' + self._param.calendar_datatype + '?category=' + self._param.calendar_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "symbols":
|
||||
params = {
|
||||
'type': self._param.symbols_type
|
||||
}
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
params['codes'] = 'BTCUSD'
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
if self._param.symbols_datatype == "symbols":
|
||||
for i in response['data']:
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Commodity Name'] = i['n']
|
||||
i['Commodity Type'] = i['t']
|
||||
del i['c'], i['e'], i['n'], i['t']
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
for i in response['data']:
|
||||
i['Selling Price'] = i['a']
|
||||
i['Buying Price'] = i['b']
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Highest Price'] = i['h']
|
||||
i['Yesterday’s Closing Price'] = i['hc']
|
||||
i['Lowest Price'] = i['l']
|
||||
i['Opening Price'] = i['o']
|
||||
i['Latest Price'] = i['p']
|
||||
i['Market Quote Time'] = i['t']
|
||||
del i['a'], i['b'], i['c'], i['e'], i['h'], i['hc'], i['l'], i['o'], i['p'], i['t']
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "news":
|
||||
params = {
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/news',
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
except Exception as e:
|
||||
return Jin10.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not jin10_res:
|
||||
return Jin10.be_output("")
|
||||
|
||||
return pd.DataFrame(jin10_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class Jin10Param(ComponentParamBase):
|
||||
"""
|
||||
Define the Jin10 component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.type = "flash"
|
||||
self.secret_key = "xxx"
|
||||
self.flash_type = '1'
|
||||
self.calendar_type = 'cj'
|
||||
self.calendar_datatype = 'data'
|
||||
self.symbols_type = 'GOODS'
|
||||
self.symbols_datatype = 'symbols'
|
||||
self.contain = ""
|
||||
self.filter = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.type, "Type", ['flash', 'calendar', 'symbols', 'news'])
|
||||
self.check_valid_value(self.flash_type, "Flash Type", ['1', '2', '3', '4', '5'])
|
||||
self.check_valid_value(self.calendar_type, "Calendar Type", ['cj', 'qh', 'hk', 'us'])
|
||||
self.check_valid_value(self.calendar_datatype, "Calendar DataType", ['data', 'event', 'holiday'])
|
||||
self.check_valid_value(self.symbols_type, "Symbols Type", ['GOODS', 'FOREX', 'FUTURE', 'CRYPTO'])
|
||||
self.check_valid_value(self.symbols_datatype, 'Symbols DataType', ['symbols', 'quotes'])
|
||||
|
||||
|
||||
class Jin10(ComponentBase, ABC):
|
||||
component_name = "Jin10"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Jin10.be_output("")
|
||||
|
||||
jin10_res = []
|
||||
headers = {'secret-key': self._param.secret_key}
|
||||
try:
|
||||
if self._param.type == "flash":
|
||||
params = {
|
||||
'category': self._param.flash_type,
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/flash?category=' + self._param.flash_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
for i in response['data']:
|
||||
jin10_res.append({"content": i['data']['content']})
|
||||
if self._param.type == "calendar":
|
||||
params = {
|
||||
'category': self._param.calendar_type
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/calendar/' + self._param.calendar_datatype + '?category=' + self._param.calendar_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "symbols":
|
||||
params = {
|
||||
'type': self._param.symbols_type
|
||||
}
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
params['codes'] = 'BTCUSD'
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
if self._param.symbols_datatype == "symbols":
|
||||
for i in response['data']:
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Commodity Name'] = i['n']
|
||||
i['Commodity Type'] = i['t']
|
||||
del i['c'], i['e'], i['n'], i['t']
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
for i in response['data']:
|
||||
i['Selling Price'] = i['a']
|
||||
i['Buying Price'] = i['b']
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Highest Price'] = i['h']
|
||||
i['Yesterday’s Closing Price'] = i['hc']
|
||||
i['Lowest Price'] = i['l']
|
||||
i['Opening Price'] = i['o']
|
||||
i['Latest Price'] = i['p']
|
||||
i['Market Quote Time'] = i['t']
|
||||
del i['a'], i['b'], i['c'], i['e'], i['h'], i['hc'], i['l'], i['o'], i['p'], i['t']
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "news":
|
||||
params = {
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/news',
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
except Exception as e:
|
||||
return Jin10.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not jin10_res:
|
||||
return Jin10.be_output("")
|
||||
|
||||
return pd.DataFrame(jin10_res)
|
||||
|
||||
@ -60,3 +60,6 @@ class KeywordExtract(Generate, ABC):
|
||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||
logging.debug(f"ans: {ans}")
|
||||
return KeywordExtract.be_output(ans)
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
@ -78,4 +78,6 @@ class Relevant(Generate, ABC):
|
||||
return Relevant.be_output(self._param.no)
|
||||
assert False, f"Relevant component got: {ans}"
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
|
||||
|
||||
@ -23,6 +23,7 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
class RetrievalParam(ComponentParamBase):
|
||||
@ -42,7 +43,7 @@ class RetrievalParam(ComponentParamBase):
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keyword similarity weight")
|
||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||
|
||||
|
||||
@ -52,7 +53,9 @@ class Retrieval(ComponentBase, ABC):
|
||||
def _run(self, history, **kwargs):
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
|
||||
lines = query.split('\n')
|
||||
user_queries = [line.split("USER:", 1)[1] for line in lines if line.startswith("USER:")]
|
||||
query = user_queries[-1] if user_queries else ""
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
if not kbs:
|
||||
return Retrieval.be_output("")
|
||||
@ -70,7 +73,8 @@ class Retrieval(ComponentBase, ABC):
|
||||
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl)
|
||||
aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(query, kbs))
|
||||
|
||||
if not kbinfos["chunks"]:
|
||||
df = Retrieval.be_output("")
|
||||
|
||||
@ -13,99 +13,82 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from rag.prompts import full_question
|
||||
|
||||
|
||||
class RewriteQuestionParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the QuestionRewrite component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.temperature = 0.9
|
||||
self.prompt = ""
|
||||
self.loop = 1
|
||||
self.language = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
|
||||
def get_prompt(self, conv):
|
||||
self.prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
return f"""
|
||||
Role: A helpful assistant
|
||||
Task: Generate a full user question that would follow the conversation.
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
# Example 1
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
###############
|
||||
Output: What's the name of Donald Trump's mother?
|
||||
------------
|
||||
# Example 2
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
User: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
######################
|
||||
# Real Data
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
return self.prompt
|
||||
|
||||
|
||||
class RewriteQuestion(Generate, ABC):
|
||||
component_name = "RewriteQuestion"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
if self._loop >= self._param.loop:
|
||||
self._loop = 0
|
||||
raise Exception("Sorry! Nothing relevant found.")
|
||||
self._loop += 1
|
||||
|
||||
hist = self._canvas.get_history(4)
|
||||
conv = []
|
||||
for m in hist:
|
||||
if m["role"] not in ["user", "assistant"]: continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(conv), [{"role": "user", "content": "Output: "}],
|
||||
self._param.gen_conf())
|
||||
hist = self._canvas.get_history(self._param.message_history_window_size)
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
messages = [h for h in hist if h["role"]!="system"]
|
||||
if messages[-1]["role"] != "user":
|
||||
messages.append({"role": "user", "content": query})
|
||||
ans = full_question(self._canvas.get_tenant_id(), self._param.llm_id, messages, self.gen_lang(self._param.language))
|
||||
self._canvas.history.pop()
|
||||
self._canvas.history.append(("user", ans))
|
||||
|
||||
logging.debug(ans)
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def gen_lang(language):
|
||||
# convert code lang to language word for the prompt
|
||||
language_dict = {'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'ws': 'Samoan', 'am': 'Amharic',
|
||||
'ar': 'Arabic', 'hy': 'Armenian', 'az': 'Azerbaijani', 'eu': 'Basque', 'be': 'Belarusian',
|
||||
'bem': 'Bemba', 'bn': 'Bengali', 'bh': 'Bihari',
|
||||
'xx-bork': 'Bork', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'bt': 'Bhutani',
|
||||
'km': 'Cambodian', 'ca': 'Catalan', 'chr': 'Cherokee', 'ny': 'Chichewa', 'zh-cn': 'Chinese',
|
||||
'zh-tw': 'Chinese', 'co': 'Corsican',
|
||||
'hr': 'Croatian', 'cs': 'Czech', 'da': 'Danish', 'nl': 'Dutch', 'xx-elmer': 'Elmer',
|
||||
'en': 'English', 'eo': 'Esperanto', 'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese',
|
||||
'tl': 'Filipino', 'fi': 'Finnish', 'fr': 'French',
|
||||
'fy': 'Frisian', 'gaa': 'Ga', 'gl': 'Galician', 'ka': 'Georgian', 'de': 'German',
|
||||
'el': 'Greek', 'kl': 'Greenlandic', 'gn': 'Guarani', 'gu': 'Gujarati', 'xx-hacker': 'Hacker',
|
||||
'ht': 'Haitian Creole', 'ha': 'Hausa', 'haw': 'Hawaiian',
|
||||
'iw': 'Hebrew', 'hi': 'Hindi', 'hu': 'Hungarian', 'is': 'Icelandic', 'ig': 'Igbo',
|
||||
'id': 'Indonesian', 'ia': 'Interlingua', 'ga': 'Irish', 'it': 'Italian', 'ja': 'Japanese',
|
||||
'jw': 'Javanese', 'kn': 'Kannada', 'kk': 'Kazakh', 'rw': 'Kinyarwanda',
|
||||
'rn': 'Kirundi', 'xx-klingon': 'Klingon', 'kg': 'Kongo', 'ko': 'Korean', 'kri': 'Krio',
|
||||
'ku': 'Kurdish', 'ckb': 'Kurdish (Sorani)', 'ky': 'Kyrgyz', 'lo': 'Laothian', 'la': 'Latin',
|
||||
'lv': 'Latvian', 'ln': 'Lingala', 'lt': 'Lithuanian',
|
||||
'loz': 'Lozi', 'lg': 'Luganda', 'ach': 'Luo', 'mk': 'Macedonian', 'mg': 'Malagasy',
|
||||
'ms': 'Malay', 'ml': 'Malayalam', 'mt': 'Maltese', 'mv': 'Maldivian', 'mi': 'Maori',
|
||||
'mr': 'Marathi', 'mfe': 'Mauritian Creole', 'mo': 'Moldavian', 'mn': 'Mongolian',
|
||||
'sr-me': 'Montenegrin', 'my': 'Burmese', 'ne': 'Nepali', 'pcm': 'Nigerian Pidgin',
|
||||
'nso': 'Northern Sotho', 'no': 'Norwegian', 'nn': 'Norwegian Nynorsk', 'oc': 'Occitan',
|
||||
'or': 'Oriya', 'om': 'Oromo', 'ps': 'Pashto', 'fa': 'Persian',
|
||||
'xx-pirate': 'Pirate', 'pl': 'Polish', 'pt': 'Portuguese', 'pt-br': 'Portuguese (Brazilian)',
|
||||
'pt-pt': 'Portuguese (Portugal)', 'pa': 'Punjabi', 'qu': 'Quechua', 'ro': 'Romanian',
|
||||
'rm': 'Romansh', 'nyn': 'Runyankole', 'ru': 'Russian', 'gd': 'Scots Gaelic',
|
||||
'sr': 'Serbian', 'sh': 'Serbo-Croatian', 'st': 'Sesotho', 'tn': 'Setswana',
|
||||
'crs': 'Seychellois Creole', 'sn': 'Shona', 'sd': 'Sindhi', 'si': 'Sinhalese', 'sk': 'Slovak',
|
||||
'sl': 'Slovenian', 'so': 'Somali', 'es': 'Spanish', 'es-419': 'Spanish (Latin America)',
|
||||
'su': 'Sundanese',
|
||||
'sw': 'Swahili', 'sv': 'Swedish', 'tg': 'Tajik', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu',
|
||||
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tongan', 'lua': 'Tshiluba', 'tum': 'Tumbuka',
|
||||
'tr': 'Turkish', 'tk': 'Turkmen', 'tw': 'Twi',
|
||||
'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 'vu': 'Vanuatu',
|
||||
'vi': 'Vietnamese', 'cy': 'Welsh', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish',
|
||||
'yo': 'Yoruba', 'zu': 'Zulu'}
|
||||
if language in language_dict:
|
||||
return language_dict[language]
|
||||
else:
|
||||
return ""
|
||||
|
||||
@ -41,7 +41,8 @@ class SwitchParam(ComponentParamBase):
|
||||
def check(self):
|
||||
self.check_empty(self.conditions, "[Switch] conditions")
|
||||
for cond in self.conditions:
|
||||
if not cond["to"]: raise ValueError(f"[Switch] 'To' can not be empty!")
|
||||
if not cond["to"]:
|
||||
raise ValueError("[Switch] 'To' can not be empty!")
|
||||
|
||||
|
||||
class Switch(ComponentBase, ABC):
|
||||
@ -51,7 +52,8 @@ class Switch(ComponentBase, ABC):
|
||||
res = []
|
||||
for cond in self._param.conditions:
|
||||
for item in cond["items"]:
|
||||
if not item["cpn_id"]: continue
|
||||
if not item["cpn_id"]:
|
||||
continue
|
||||
if item["cpn_id"].find("begin") >= 0:
|
||||
continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
@ -63,7 +65,8 @@ class Switch(ComponentBase, ABC):
|
||||
for cond in self._param.conditions:
|
||||
res = []
|
||||
for item in cond["items"]:
|
||||
if not item["cpn_id"]:continue
|
||||
if not item["cpn_id"]:
|
||||
continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
if item["cpn_id"].find("@") > 0:
|
||||
cpn_id, key = item["cpn_id"].split("@")
|
||||
@ -107,22 +110,22 @@ class Switch(ComponentBase, ABC):
|
||||
elif operator == ">":
|
||||
try:
|
||||
return True if float(input) > float(value) else False
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return True if input > value else False
|
||||
elif operator == "<":
|
||||
try:
|
||||
return True if float(input) < float(value) else False
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return True if input < value else False
|
||||
elif operator == "≥":
|
||||
try:
|
||||
return True if float(input) >= float(value) else False
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return True if input >= value else False
|
||||
elif operator == "≤":
|
||||
try:
|
||||
return True if float(input) <= float(value) else False
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return True if input <= value else False
|
||||
|
||||
raise ValueError('Not supported operator' + operator)
|
||||
@ -13,8 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from jinja2 import Template as Jinja2Template
|
||||
|
||||
|
||||
class TemplateParam(ComponentParamBase):
|
||||
@ -36,31 +38,49 @@ class Template(ComponentBase):
|
||||
component_name = "Template"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.parameters \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def get_input_elements(self):
|
||||
key_set = set([])
|
||||
res = []
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.content, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
content = self._param.content
|
||||
|
||||
self._param.inputs = []
|
||||
for para in self._param.parameters:
|
||||
if not para.get("component_id"): continue
|
||||
component_id = para["component_id"].split("@")[0]
|
||||
if para["component_id"].lower().find("@") >= 0:
|
||||
cpn_id, key = para["component_id"].split("@")
|
||||
for para in self.get_input_elements():
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
value = p.get("value", "")
|
||||
self.make_kwargs(para, kwargs, value)
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
@ -68,18 +88,49 @@ class Template(ComponentBase):
|
||||
hist = hist[0]["content"]
|
||||
else:
|
||||
hist = ""
|
||||
kwargs[para["key"]] = hist
|
||||
self.make_kwargs(para, kwargs, hist)
|
||||
continue
|
||||
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if "content" not in out.columns:
|
||||
kwargs[para["key"]] = ""
|
||||
else:
|
||||
kwargs[para["key"]] = " - "+"\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["component_id"], "content": kwargs[para["key"]]})
|
||||
|
||||
result = ""
|
||||
if "content" in out.columns:
|
||||
result = "\n".join(
|
||||
[o if isinstance(o, str) else str(o) for o in out["content"]]
|
||||
)
|
||||
|
||||
self.make_kwargs(para, kwargs, result)
|
||||
|
||||
template = Jinja2Template(content)
|
||||
|
||||
try:
|
||||
content = template.render(kwargs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for n, v in kwargs.items():
|
||||
content = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), content)
|
||||
try:
|
||||
v = json.dumps(v, ensure_ascii=False)
|
||||
except Exception:
|
||||
pass
|
||||
content = re.sub(
|
||||
r"\{%s\}" % re.escape(n), v, content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(\\\"|\")", "", content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(#+)", r" \1 ", content
|
||||
)
|
||||
|
||||
return Template.be_output(content)
|
||||
|
||||
def make_kwargs(self, para, kwargs, value):
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["key"], "content": value}
|
||||
)
|
||||
try:
|
||||
value = json.loads(value)
|
||||
except Exception:
|
||||
pass
|
||||
kwargs[para["key"]] = value
|
||||
|
||||
@ -1,72 +1,72 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import time
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class TuShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the TuShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.token = "xxx"
|
||||
self.src = "eastmoney"
|
||||
self.start_date = "2024-01-01 09:00:00"
|
||||
self.end_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
self.keyword = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.src, "Quick News Source",
|
||||
["sina", "wallstreetcn", "10jqka", "eastmoney", "yuncaijing", "fenghuang", "jinrongjie"])
|
||||
|
||||
|
||||
class TuShare(ComponentBase, ABC):
|
||||
component_name = "TuShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return TuShare.be_output("")
|
||||
|
||||
try:
|
||||
tus_res = []
|
||||
params = {
|
||||
"api_name": "news",
|
||||
"token": self._param.token,
|
||||
"params": {"src": self._param.src, "start_date": self._param.start_date,
|
||||
"end_date": self._param.end_date}
|
||||
}
|
||||
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
||||
response = response.json()
|
||||
if response['code'] != 0:
|
||||
return TuShare.be_output(response['msg'])
|
||||
df = pd.DataFrame(response['data']['items'])
|
||||
df.columns = response['data']['fields']
|
||||
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
||||
except Exception as e:
|
||||
return TuShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not tus_res:
|
||||
return TuShare.be_output("")
|
||||
|
||||
return pd.DataFrame(tus_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import time
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class TuShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the TuShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.token = "xxx"
|
||||
self.src = "eastmoney"
|
||||
self.start_date = "2024-01-01 09:00:00"
|
||||
self.end_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
self.keyword = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.src, "Quick News Source",
|
||||
["sina", "wallstreetcn", "10jqka", "eastmoney", "yuncaijing", "fenghuang", "jinrongjie"])
|
||||
|
||||
|
||||
class TuShare(ComponentBase, ABC):
|
||||
component_name = "TuShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return TuShare.be_output("")
|
||||
|
||||
try:
|
||||
tus_res = []
|
||||
params = {
|
||||
"api_name": "news",
|
||||
"token": self._param.token,
|
||||
"params": {"src": self._param.src, "start_date": self._param.start_date,
|
||||
"end_date": self._param.end_date}
|
||||
}
|
||||
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
||||
response = response.json()
|
||||
if response['code'] != 0:
|
||||
return TuShare.be_output(response['msg'])
|
||||
df = pd.DataFrame(response['data']['items'])
|
||||
df.columns = response['data']['fields']
|
||||
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
||||
except Exception as e:
|
||||
return TuShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not tus_res:
|
||||
return TuShare.be_output("")
|
||||
|
||||
return pd.DataFrame(tus_res)
|
||||
|
||||
@ -1,80 +1,80 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import pywencai
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class WenCaiParam(ComponentParamBase):
|
||||
"""
|
||||
Define the WenCai component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.query_type = "stock"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.query_type, "Query type",
|
||||
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
||||
'futures', 'lccp',
|
||||
'foreign_exchange'])
|
||||
|
||||
|
||||
class WenCai(ComponentBase, ABC):
|
||||
component_name = "WenCai"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return WenCai.be_output("")
|
||||
|
||||
try:
|
||||
wencai_res = []
|
||||
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
||||
if isinstance(res, pd.DataFrame):
|
||||
wencai_res.append({"content": res.to_markdown()})
|
||||
if isinstance(res, dict):
|
||||
for item in res.items():
|
||||
if isinstance(item[1], list):
|
||||
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], str):
|
||||
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
||||
continue
|
||||
if isinstance(item[1], dict):
|
||||
if "meta" in item[1].keys():
|
||||
continue
|
||||
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], pd.DataFrame):
|
||||
if "image_url" in item[1].columns:
|
||||
continue
|
||||
wencai_res.append({"content": item[1].to_markdown()})
|
||||
continue
|
||||
|
||||
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
||||
except Exception as e:
|
||||
return WenCai.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not wencai_res:
|
||||
return WenCai.be_output("")
|
||||
|
||||
return pd.DataFrame(wencai_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import pywencai
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class WenCaiParam(ComponentParamBase):
|
||||
"""
|
||||
Define the WenCai component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.query_type = "stock"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.query_type, "Query type",
|
||||
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
||||
'futures', 'lccp',
|
||||
'foreign_exchange'])
|
||||
|
||||
|
||||
class WenCai(ComponentBase, ABC):
|
||||
component_name = "WenCai"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return WenCai.be_output("")
|
||||
|
||||
try:
|
||||
wencai_res = []
|
||||
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
||||
if isinstance(res, pd.DataFrame):
|
||||
wencai_res.append({"content": res.to_markdown()})
|
||||
if isinstance(res, dict):
|
||||
for item in res.items():
|
||||
if isinstance(item[1], list):
|
||||
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], str):
|
||||
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
||||
continue
|
||||
if isinstance(item[1], dict):
|
||||
if "meta" in item[1].keys():
|
||||
continue
|
||||
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], pd.DataFrame):
|
||||
if "image_url" in item[1].columns:
|
||||
continue
|
||||
wencai_res.append({"content": item[1].to_markdown()})
|
||||
continue
|
||||
|
||||
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
||||
except Exception as e:
|
||||
return WenCai.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not wencai_res:
|
||||
return WenCai.be_output("")
|
||||
|
||||
return pd.DataFrame(wencai_res)
|
||||
|
||||
@ -1,84 +1,84 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import yfinance as yf
|
||||
|
||||
|
||||
class YahooFinanceParam(ComponentParamBase):
|
||||
"""
|
||||
Define the YahooFinance component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.info = True
|
||||
self.history = False
|
||||
self.count = False
|
||||
self.financials = False
|
||||
self.income_stmt = False
|
||||
self.balance_sheet = False
|
||||
self.cash_flow_statement = False
|
||||
self.news = True
|
||||
|
||||
def check(self):
|
||||
self.check_boolean(self.info, "get all stock info")
|
||||
self.check_boolean(self.history, "get historical market data")
|
||||
self.check_boolean(self.count, "show share count")
|
||||
self.check_boolean(self.financials, "show financials")
|
||||
self.check_boolean(self.income_stmt, "income statement")
|
||||
self.check_boolean(self.balance_sheet, "balance sheet")
|
||||
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
||||
self.check_boolean(self.news, "show news")
|
||||
|
||||
|
||||
class YahooFinance(ComponentBase, ABC):
|
||||
component_name = "YahooFinance"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
yohoo_res = []
|
||||
try:
|
||||
msft = yf.Ticker(ans)
|
||||
if self._param.info:
|
||||
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
||||
if self._param.history:
|
||||
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
||||
if self._param.financials:
|
||||
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
||||
if self._param.balance_sheet:
|
||||
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
||||
if self._param.cash_flow_statement:
|
||||
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||
if self._param.news:
|
||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||
except Exception:
|
||||
logging.exception("YahooFinance got exception")
|
||||
|
||||
if not yohoo_res:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
return pd.DataFrame(yohoo_res)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import yfinance as yf
|
||||
|
||||
|
||||
class YahooFinanceParam(ComponentParamBase):
|
||||
"""
|
||||
Define the YahooFinance component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.info = True
|
||||
self.history = False
|
||||
self.count = False
|
||||
self.financials = False
|
||||
self.income_stmt = False
|
||||
self.balance_sheet = False
|
||||
self.cash_flow_statement = False
|
||||
self.news = True
|
||||
|
||||
def check(self):
|
||||
self.check_boolean(self.info, "get all stock info")
|
||||
self.check_boolean(self.history, "get historical market data")
|
||||
self.check_boolean(self.count, "show share count")
|
||||
self.check_boolean(self.financials, "show financials")
|
||||
self.check_boolean(self.income_stmt, "income statement")
|
||||
self.check_boolean(self.balance_sheet, "balance sheet")
|
||||
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
||||
self.check_boolean(self.news, "show news")
|
||||
|
||||
|
||||
class YahooFinance(ComponentBase, ABC):
|
||||
component_name = "YahooFinance"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
yohoo_res = []
|
||||
try:
|
||||
msft = yf.Ticker(ans)
|
||||
if self._param.info:
|
||||
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
||||
if self._param.history:
|
||||
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
||||
if self._param.financials:
|
||||
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
||||
if self._param.balance_sheet:
|
||||
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
||||
if self._param.cash_flow_statement:
|
||||
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||
if self._param.news:
|
||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||
except Exception:
|
||||
logging.exception("YahooFinance got exception")
|
||||
|
||||
if not yohoo_res:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
return pd.DataFrame(yohoo_res)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2019 The FATE Authors. All Rights Reserved.
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
1107
agent/templates/research_report.json
Normal file
1107
agent/templates/research_report.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -43,6 +43,7 @@ if __name__ == '__main__':
|
||||
else:
|
||||
print(ans["content"])
|
||||
|
||||
if DEBUG: print(canvas.path)
|
||||
if DEBUG:
|
||||
print(canvas.path)
|
||||
question = input("\n==================== User =====================\n> ")
|
||||
canvas.add_user_input(question)
|
||||
|
||||
@ -1,113 +1,113 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
||||
"to": "concentrator:0"
|
||||
},
|
||||
"others": {
|
||||
"description": "The question is not about the product usage, appearance and how it works.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||
"to": "concentrator:1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": ["concentrator:0","concentrator:1"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"concentrator:0": {
|
||||
"obj": {
|
||||
"component_name": "Concentrator",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["message:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"concentrator:1": {
|
||||
"obj": {
|
||||
"component_name": "Concentrator",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["message:1_0","message:1_1","message:1_2"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 0_0!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:0"]
|
||||
},
|
||||
"message:1_0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_0!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
},
|
||||
"message:1_1": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_1!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
},
|
||||
"message:1_2": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_2!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
||||
"to": "concentrator:0"
|
||||
},
|
||||
"others": {
|
||||
"description": "The question is not about the product usage, appearance and how it works.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||
"to": "concentrator:1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": ["concentrator:0","concentrator:1"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"concentrator:0": {
|
||||
"obj": {
|
||||
"component_name": "Concentrator",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["message:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"concentrator:1": {
|
||||
"obj": {
|
||||
"component_name": "Concentrator",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["message:1_0","message:1_1","message:1_2"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 0_0!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:0"]
|
||||
},
|
||||
"message:1_0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_0!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
},
|
||||
"message:1_1": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_1!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
},
|
||||
"message:1_2": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Message 1_2!!!!!!!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["concentrator:1"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
}
|
||||
1
agentic_reasoning/__init__.py
Normal file
1
agentic_reasoning/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
from .deep_research import DeepResearcher as DeepResearcher
|
||||
167
agentic_reasoning/deep_research.py
Normal file
167
agentic_reasoning/deep_research.py
Normal file
@ -0,0 +1,167 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
from functools import partial
|
||||
from agentic_reasoning.prompts import BEGIN_SEARCH_QUERY, BEGIN_SEARCH_RESULT, END_SEARCH_RESULT, MAX_SEARCH_LIMIT, \
|
||||
END_SEARCH_QUERY, REASON_PROMPT, RELEVANT_EXTRACTION_PROMPT
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from rag.nlp import extract_between
|
||||
from rag.prompts import kb_prompt
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DeepResearcher:
|
||||
def __init__(self,
|
||||
chat_mdl: LLMBundle,
|
||||
prompt_config: dict,
|
||||
kb_retrieve: partial = None,
|
||||
kg_retrieve: partial = None
|
||||
):
|
||||
self.chat_mdl = chat_mdl
|
||||
self.prompt_config = prompt_config
|
||||
self._kb_retrieve = kb_retrieve
|
||||
self._kg_retrieve = kg_retrieve
|
||||
|
||||
def thinking(self, chunk_info: dict, question: str):
|
||||
def rm_query_tags(line):
|
||||
pattern = re.escape(BEGIN_SEARCH_QUERY) + r"(.*?)" + re.escape(END_SEARCH_QUERY)
|
||||
return re.sub(pattern, "", line)
|
||||
|
||||
def rm_result_tags(line):
|
||||
pattern = re.escape(BEGIN_SEARCH_RESULT) + r"(.*?)" + re.escape(END_SEARCH_RESULT)
|
||||
return re.sub(pattern, "", line)
|
||||
|
||||
executed_search_queries = []
|
||||
msg_hisotry = [{"role": "user", "content": f'Question:\"{question}\"\n'}]
|
||||
all_reasoning_steps = []
|
||||
think = "<think>"
|
||||
for ii in range(MAX_SEARCH_LIMIT + 1):
|
||||
if ii == MAX_SEARCH_LIMIT - 1:
|
||||
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nThe maximum search limit is exceeded. You are not allowed to search.\n{END_SEARCH_RESULT}\n"
|
||||
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append({"role": "assistant", "content": summary_think})
|
||||
break
|
||||
|
||||
query_think = ""
|
||||
if msg_hisotry[-1]["role"] != "user":
|
||||
msg_hisotry.append({"role": "user", "content": "Continues reasoning with the new information.\n"})
|
||||
else:
|
||||
msg_hisotry[-1]["content"] += "\n\nContinues reasoning with the new information.\n"
|
||||
for ans in self.chat_mdl.chat_streamly(REASON_PROMPT, msg_hisotry, {"temperature": 0.7}):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
query_think = ans
|
||||
yield {"answer": think + rm_query_tags(query_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
think += rm_query_tags(query_think)
|
||||
all_reasoning_steps.append(query_think)
|
||||
queries = extract_between(query_think, BEGIN_SEARCH_QUERY, END_SEARCH_QUERY)
|
||||
if not queries:
|
||||
if ii > 0:
|
||||
break
|
||||
queries = [question]
|
||||
|
||||
for search_query in queries:
|
||||
logging.info(f"[THINK]Query: {ii}. {search_query}")
|
||||
msg_hisotry.append({"role": "assistant", "content": search_query})
|
||||
think += f"\n\n> {ii +1}. {search_query}\n\n"
|
||||
yield {"answer": think + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
summary_think = ""
|
||||
# The search query has been searched in previous steps.
|
||||
if search_query in executed_search_queries:
|
||||
summary_think = f"\n{BEGIN_SEARCH_RESULT}\nYou have searched this query. Please refer to previous results.\n{END_SEARCH_RESULT}\n"
|
||||
yield {"answer": think + summary_think + "</think>", "reference": {}, "audio_binary": None}
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append({"role": "user", "content": summary_think})
|
||||
think += summary_think
|
||||
continue
|
||||
|
||||
truncated_prev_reasoning = ""
|
||||
for i, step in enumerate(all_reasoning_steps):
|
||||
truncated_prev_reasoning += f"Step {i + 1}: {step}\n\n"
|
||||
|
||||
prev_steps = truncated_prev_reasoning.split('\n\n')
|
||||
if len(prev_steps) <= 5:
|
||||
truncated_prev_reasoning = '\n\n'.join(prev_steps)
|
||||
else:
|
||||
truncated_prev_reasoning = ''
|
||||
for i, step in enumerate(prev_steps):
|
||||
if i == 0 or i >= len(prev_steps) - 4 or BEGIN_SEARCH_QUERY in step or BEGIN_SEARCH_RESULT in step:
|
||||
truncated_prev_reasoning += step + '\n\n'
|
||||
else:
|
||||
if truncated_prev_reasoning[-len('\n\n...\n\n'):] != '\n\n...\n\n':
|
||||
truncated_prev_reasoning += '...\n\n'
|
||||
truncated_prev_reasoning = truncated_prev_reasoning.strip('\n')
|
||||
|
||||
# Retrieval procedure:
|
||||
# 1. KB search
|
||||
# 2. Web search (optional)
|
||||
# 3. KG search (optional)
|
||||
kbinfos = self._kb_retrieve(question=search_query) if self._kb_retrieve else {"chunks": [], "doc_aggs": []}
|
||||
|
||||
if self.prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(self.prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(search_query)
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if self.prompt_config.get("use_kg") and self._kg_retrieve:
|
||||
ck = self._kg_retrieve(question=search_query)
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
# Merge chunk info for citations
|
||||
if not chunk_info["chunks"]:
|
||||
for k in chunk_info.keys():
|
||||
chunk_info[k] = kbinfos[k]
|
||||
else:
|
||||
cids = [c["chunk_id"] for c in chunk_info["chunks"]]
|
||||
for c in kbinfos["chunks"]:
|
||||
if c["chunk_id"] in cids:
|
||||
continue
|
||||
chunk_info["chunks"].append(c)
|
||||
dids = [d["doc_id"] for d in chunk_info["doc_aggs"]]
|
||||
for d in kbinfos["doc_aggs"]:
|
||||
if d["doc_id"] in dids:
|
||||
continue
|
||||
chunk_info["doc_aggs"].append(d)
|
||||
|
||||
think += "\n\n"
|
||||
for ans in self.chat_mdl.chat_streamly(
|
||||
RELEVANT_EXTRACTION_PROMPT.format(
|
||||
prev_reasoning=truncated_prev_reasoning,
|
||||
search_query=search_query,
|
||||
document="\n".join(kb_prompt(kbinfos, 4096))
|
||||
),
|
||||
[{"role": "user",
|
||||
"content": f'Now you should analyze each web page and find helpful information based on the current search query "{search_query}" and previous reasoning steps.'}],
|
||||
{"temperature": 0.7}):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
if not ans:
|
||||
continue
|
||||
summary_think = ans
|
||||
yield {"answer": think + rm_result_tags(summary_think) + "</think>", "reference": {}, "audio_binary": None}
|
||||
|
||||
all_reasoning_steps.append(summary_think)
|
||||
msg_hisotry.append(
|
||||
{"role": "user", "content": f"\n\n{BEGIN_SEARCH_RESULT}{summary_think}{END_SEARCH_RESULT}\n\n"})
|
||||
think += rm_result_tags(summary_think)
|
||||
logging.info(f"[THINK]Summary: {ii}. {summary_think}")
|
||||
|
||||
yield think + "</think>"
|
||||
112
agentic_reasoning/prompts.py
Normal file
112
agentic_reasoning/prompts.py
Normal file
@ -0,0 +1,112 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
BEGIN_SEARCH_QUERY = "<|begin_search_query|>"
|
||||
END_SEARCH_QUERY = "<|end_search_query|>"
|
||||
BEGIN_SEARCH_RESULT = "<|begin_search_result|>"
|
||||
END_SEARCH_RESULT = "<|end_search_result|>"
|
||||
MAX_SEARCH_LIMIT = 6
|
||||
|
||||
REASON_PROMPT = (
|
||||
"You are a reasoning assistant with the ability to perform dataset searches to help "
|
||||
"you answer the user's question accurately. You have special tools:\n\n"
|
||||
f"- To perform a search: write {BEGIN_SEARCH_QUERY} your query here {END_SEARCH_QUERY}.\n"
|
||||
f"Then, the system will search and analyze relevant content, then provide you with helpful information in the format {BEGIN_SEARCH_RESULT} ...search results... {END_SEARCH_RESULT}.\n\n"
|
||||
f"You can repeat the search process multiple times if necessary. The maximum number of search attempts is limited to {MAX_SEARCH_LIMIT}.\n\n"
|
||||
"Once you have all the information you need, continue your reasoning.\n\n"
|
||||
"-- Example 1 --\n" ########################################
|
||||
"Question: \"Are both the directors of Jaws and Casino Royale from the same country?\"\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who is the director of Jaws?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nThe director of Jaws is Steven Spielberg...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information.\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Where is Steven Spielberg from?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nSteven Allan Spielberg is an American filmmaker...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who is the director of Casino Royale?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCasino Royale is a 2006 spy film directed by Martin Campbell...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Where is Martin Campbell from?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nMartin Campbell (born 24 October 1943) is a New Zealand film and television director...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\nIt's enough to answer the question\n"
|
||||
|
||||
"-- Example 2 --\n" #########################################
|
||||
"Question: \"When was the founder of craigslist born?\"\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY}Who was the founder of craigslist?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCraigslist was founded by Craig Newmark...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information.\n"
|
||||
"Assistant:\n"
|
||||
f" {BEGIN_SEARCH_QUERY} When was Craig Newmark born?{END_SEARCH_QUERY}\n\n"
|
||||
"User:\n"
|
||||
f" {BEGIN_SEARCH_RESULT}\nCraig Newmark was born on December 6, 1952...\n{END_SEARCH_RESULT}\n\n"
|
||||
"Continues reasoning with the new information...\n\n"
|
||||
"Assistant:\nIt's enough to answer the question\n"
|
||||
"**Remember**:\n"
|
||||
f"- You have a dataset to search, so you just provide a proper search query.\n"
|
||||
f"- Use {BEGIN_SEARCH_QUERY} to request a dataset search and end with {END_SEARCH_QUERY}.\n"
|
||||
"- The language of query MUST be as the same as 'Question' or 'search result'.\n"
|
||||
"- When done searching, continue your reasoning.\n\n"
|
||||
'Please answer the following question. You should think step by step to solve it.\n\n'
|
||||
)
|
||||
|
||||
RELEVANT_EXTRACTION_PROMPT = """**Task Instruction:**
|
||||
|
||||
You are tasked with reading and analyzing web pages based on the following inputs: **Previous Reasoning Steps**, **Current Search Query**, and **Searched Web Pages**. Your objective is to extract relevant and helpful information for **Current Search Query** from the **Searched Web Pages** and seamlessly integrate this information into the **Previous Reasoning Steps** to continue reasoning for the original question.
|
||||
|
||||
**Guidelines:**
|
||||
|
||||
1. **Analyze the Searched Web Pages:**
|
||||
- Carefully review the content of each searched web page.
|
||||
- Identify factual information that is relevant to the **Current Search Query** and can aid in the reasoning process for the original question.
|
||||
|
||||
2. **Extract Relevant Information:**
|
||||
- Select the information from the Searched Web Pages that directly contributes to advancing the **Previous Reasoning Steps**.
|
||||
- Ensure that the extracted information is accurate and relevant.
|
||||
|
||||
3. **Output Format:**
|
||||
- **If the web pages provide helpful information for current search query:** Present the information beginning with `**Final Information**` as shown below.
|
||||
- The language of query **MUST BE** as the same as 'Search Query' or 'Web Pages'.\n"
|
||||
**Final Information**
|
||||
|
||||
[Helpful information]
|
||||
|
||||
- **If the web pages do not provide any helpful information for current search query:** Output the following text.
|
||||
|
||||
**Final Information**
|
||||
|
||||
No helpful information found.
|
||||
|
||||
**Inputs:**
|
||||
- **Previous Reasoning Steps:**
|
||||
{prev_reasoning}
|
||||
|
||||
- **Current Search Query:**
|
||||
{search_query}
|
||||
|
||||
- **Searched Web Pages:**
|
||||
{document}
|
||||
|
||||
"""
|
||||
@ -0,0 +1,18 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
|
||||
@ -119,8 +119,9 @@ def register_page(page_path):
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
page_name = getattr(page, "page_name", page_name)
|
||||
sdk_path = "\\sdk\\" if sys.platform.startswith("win") else "/sdk/"
|
||||
url_prefix = (
|
||||
f"/api/{API_VERSION}" if "/sdk/" in path else f"/{API_VERSION}/{page_name}"
|
||||
f"/api/{API_VERSION}" if sdk_path in path else f"/{API_VERSION}/{page_name}"
|
||||
)
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
@ -152,8 +153,8 @@ def load_user(web_request):
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
except Exception:
|
||||
logging.exception("load_user got exception")
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
@ -25,7 +25,7 @@ from api.db import FileType, LLMType, ParserType, FileSource
|
||||
from api.db.db_models import APIToken, Task, File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
from api.db.services.dialog_service import DialogService, chat, keyword_extraction
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
@ -38,6 +38,8 @@ from api.utils.api_utils import server_error_response, get_data_error_result, ge
|
||||
generate_confirmation_token
|
||||
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.app.tag import label_question
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
@ -45,7 +47,7 @@ from agent.canvas import Canvas
|
||||
from functools import partial
|
||||
|
||||
|
||||
@manager.route('/new_token', methods=['POST'])
|
||||
@manager.route('/new_token', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def new_token():
|
||||
req = request.json
|
||||
@ -75,7 +77,7 @@ def new_token():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/token_list', methods=['GET'])
|
||||
@manager.route('/token_list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def token_list():
|
||||
try:
|
||||
@ -90,7 +92,7 @@ def token_list():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@validate_request("tokens", "tenant_id")
|
||||
@login_required
|
||||
def rm():
|
||||
@ -104,7 +106,7 @@ def rm():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/stats', methods=['GET'])
|
||||
@manager.route('/stats', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def stats():
|
||||
try:
|
||||
@ -135,14 +137,13 @@ def stats():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/new_conversation', methods=['GET'])
|
||||
@manager.route('/new_conversation', methods=['GET']) # noqa: F821
|
||||
def set_conversation():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
if objs[0].source == "agent":
|
||||
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
||||
@ -176,19 +177,20 @@ def set_conversation():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req: req["quote"] = False
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
@ -197,7 +199,8 @@ def completion():
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
def fillin_conv(ans):
|
||||
@ -340,14 +343,14 @@ def completion():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/conversation/<conversation_id>', methods=['GET'])
|
||||
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get(conversation_id):
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||
@ -356,7 +359,7 @@ def get(conversation_id):
|
||||
|
||||
conv = conv.to_dict()
|
||||
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
||||
return get_json_result(data=False, message='Token is not valid for this conversation_id!"',
|
||||
return get_json_result(data=False, message='Authentication error: API key is invalid for this conversation_id!"',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
for referenct_i in conv['reference']:
|
||||
@ -371,14 +374,14 @@ def get(conversation_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/upload', methods=['POST'])
|
||||
@manager.route('/document/upload', methods=['POST']) # noqa: F821
|
||||
@validate_request("kb_name")
|
||||
def upload():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
kb_name = request.form.get("kb_name").strip()
|
||||
tenant_id = objs[0].tenant_id
|
||||
@ -483,14 +486,14 @@ def upload():
|
||||
return get_json_result(data=doc_result.to_json())
|
||||
|
||||
|
||||
@manager.route('/document/upload_and_parse', methods=['POST'])
|
||||
@manager.route('/document/upload_and_parse', methods=['POST']) # noqa: F821
|
||||
@validate_request("conversation_id")
|
||||
def upload_parse():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
@ -506,14 +509,14 @@ def upload_parse():
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/list_chunks', methods=['POST'])
|
||||
@manager.route('/list_chunks', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
def list_chunks():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
|
||||
@ -546,14 +549,14 @@ def list_chunks():
|
||||
return get_json_result(data=res)
|
||||
|
||||
|
||||
@manager.route('/list_kb_docs', methods=['POST'])
|
||||
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
def list_kb_docs():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
tenant_id = objs[0].tenant_id
|
||||
@ -586,28 +589,28 @@ def list_kb_docs():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/infos', methods=['POST'])
|
||||
@manager.route('/document/infos', methods=['POST']) # noqa: F821
|
||||
@validate_request("doc_ids")
|
||||
def docinfos():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/document', methods=['DELETE'])
|
||||
@manager.route('/document', methods=['DELETE']) # noqa: F821
|
||||
# @login_required
|
||||
def document_rm():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
tenant_id = objs[0].tenant_id
|
||||
req = request.json
|
||||
@ -659,7 +662,7 @@ def document_rm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/completion_aibotk', methods=['POST'])
|
||||
@manager.route('/completion_aibotk', methods=['POST']) # noqa: F821
|
||||
@validate_request("Authorization", "conversation_id", "word")
|
||||
def completion_faq():
|
||||
import base64
|
||||
@ -669,16 +672,18 @@ def completion_faq():
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req: req["quote"] = True
|
||||
if "quote" not in req:
|
||||
req["quote"] = True
|
||||
|
||||
msg = []
|
||||
msg.append({"role": "user", "content": req["word"]})
|
||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
def fillin_conv(ans):
|
||||
@ -799,14 +804,14 @@ def completion_faq():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval', methods=['POST'])
|
||||
@manager.route('/retrieval', methods=['POST']) # noqa: F821
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Token is not valid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
kb_ids = req.get("kb_id", [])
|
||||
@ -837,7 +842,8 @@ def retrieval():
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl)
|
||||
doc_ids, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(question, kbs))
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
return get_json_result(data=ranks)
|
||||
|
||||
@ -13,10 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
import traceback
|
||||
from functools import partial
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||
@ -25,15 +23,16 @@ from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||
from agent.canvas import Canvas
|
||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||
from api.db.db_models import APIToken
|
||||
|
||||
|
||||
@manager.route('/templates', methods=['GET'])
|
||||
@manager.route('/templates', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def templates():
|
||||
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.get_all()])
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def canvas_list():
|
||||
return get_json_result(data=sorted([c.to_dict() for c in \
|
||||
@ -41,7 +40,7 @@ def canvas_list():
|
||||
)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@validate_request("canvas_ids")
|
||||
@login_required
|
||||
def rm():
|
||||
@ -54,18 +53,19 @@ def rm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@validate_request("dsl", "title")
|
||||
@login_required
|
||||
def save():
|
||||
req = request.json
|
||||
req["user_id"] = current_user.id
|
||||
if not isinstance(req["dsl"], str): req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
if "id" not in req:
|
||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||
return server_error_response(ValueError("Duplicated title."))
|
||||
return get_data_error_result(message=f"{req['title'].strip()} already exists.")
|
||||
req["id"] = get_uuid()
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(message="Fail to save canvas.")
|
||||
@ -78,7 +78,7 @@ def save():
|
||||
return get_json_result(data=req)
|
||||
|
||||
|
||||
@manager.route('/get/<canvas_id>', methods=['GET'])
|
||||
@manager.route('/get/<canvas_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get(canvas_id):
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
@ -86,8 +86,22 @@ def get(canvas_id):
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
|
||||
@manager.route('/getsse/<canvas_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||
def getsse(canvas_id):
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_data_error_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_data_error_result(message='Authentication error: API key is invalid!"')
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def run():
|
||||
@ -132,12 +146,16 @@ def run():
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if not canvas.path[-1]:
|
||||
canvas.path.pop(-1)
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
except Exception as e:
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
if not canvas.path[-1]:
|
||||
canvas.path.pop(-1)
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
traceback.print_exc()
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
@ -153,7 +171,8 @@ def run():
|
||||
return resp
|
||||
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"): continue
|
||||
if answer.get("running_status"):
|
||||
continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
@ -163,7 +182,7 @@ def run():
|
||||
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
||||
|
||||
|
||||
@manager.route('/reset', methods=['POST'])
|
||||
@manager.route('/reset', methods=['POST']) # noqa: F821
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def reset():
|
||||
@ -186,7 +205,51 @@ def reset():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/test_db_connect', methods=['POST'])
|
||||
@manager.route('/input_elements', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def input_elements():
|
||||
cvs_id = request.args.get("id")
|
||||
cpn_id = request.args.get("component_id")
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(cvs_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=cvs_id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
return get_json_result(data=canvas.get_component_input_elements(cpn_id))
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/debug', methods=['POST']) # noqa: F821
|
||||
@validate_request("id", "component_id", "params")
|
||||
@login_required
|
||||
def debug():
|
||||
req = request.json
|
||||
for p in req["params"]:
|
||||
assert p.get("key")
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
canvas.get_component(req["component_id"])["obj"]._param.debug_inputs = req["params"]
|
||||
df = canvas.get_component(req["component_id"])["obj"].debug()
|
||||
return get_json_result(data=df.to_dict(orient="records"))
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/test_db_connect', methods=['POST']) # noqa: F821
|
||||
@validate_request("db_type", "database", "username", "host", "port", "password")
|
||||
@login_required
|
||||
def test_db_connect():
|
||||
@ -198,8 +261,26 @@ def test_db_connect():
|
||||
elif req["db_type"] == 'postgresql':
|
||||
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||
password=req["password"])
|
||||
db.connect()
|
||||
elif req["db_type"] == 'mssql':
|
||||
import pyodbc
|
||||
connection_string = (
|
||||
f"DRIVER={{ODBC Driver 17 for SQL Server}};"
|
||||
f"SERVER={req['host']},{req['port']};"
|
||||
f"DATABASE={req['database']};"
|
||||
f"UID={req['username']};"
|
||||
f"PWD={req['password']};"
|
||||
)
|
||||
db = pyodbc.connect(connection_string)
|
||||
cursor = db.cursor()
|
||||
cursor.execute("SELECT 1")
|
||||
cursor.close()
|
||||
else:
|
||||
return server_error_response("Unsupported database type.")
|
||||
if req["db_type"] != 'mssql':
|
||||
db.connect()
|
||||
db.close()
|
||||
|
||||
return get_json_result(data="Database Connection Successful!")
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@ -19,9 +19,11 @@ import json
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.settings import PAGERANK_FLD
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
@ -31,11 +33,11 @@ from api.utils.api_utils import server_error_response, get_data_error_result, va
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
import hashlib
|
||||
import xxhash
|
||||
import re
|
||||
|
||||
|
||||
@manager.route('/list', methods=['POST'])
|
||||
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def list_chunk():
|
||||
@ -68,9 +70,10 @@ def list_chunk():
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"question_kwd": sres.field[id].get("question_kwd", []),
|
||||
"image_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": json.loads(sres.field[id].get("position_list", "[]")),
|
||||
"available_int": int(sres.field[id].get("available_int", 1)),
|
||||
"positions": sres.field[id].get("position_int", []),
|
||||
}
|
||||
assert isinstance(d["positions"], list)
|
||||
assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5)
|
||||
@ -83,7 +86,7 @@ def list_chunk():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
chunk_id = request.args["chunk_id"]
|
||||
@ -91,12 +94,14 @@ def get():
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
tenant_id = tenants[0].tenant_id
|
||||
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
for tenant in tenants:
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant.tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant.tenant_id), kb_ids)
|
||||
if chunk:
|
||||
break
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
@ -112,10 +117,9 @@ def get():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
||||
"important_kwd")
|
||||
@validate_request("doc_id", "chunk_id", "content_with_weight")
|
||||
def set():
|
||||
req = request.json
|
||||
d = {
|
||||
@ -123,8 +127,16 @@ def set():
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
if "important_kwd" in req:
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
if "question_kwd" in req:
|
||||
d["question_kwd"] = req["question_kwd"]
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["question_kwd"]))
|
||||
if "tag_kwd" in req:
|
||||
d["tag_kwd"] = req["tag_kwd"]
|
||||
if "tag_feas" in req:
|
||||
d["tag_feas"] = req["tag_feas"]
|
||||
if "available_int" in req:
|
||||
d["available_int"] = req["available_int"]
|
||||
|
||||
@ -145,14 +157,11 @@ def set():
|
||||
t for t in re.split(
|
||||
r"[\n\t]",
|
||||
req["content_with_weight"]) if len(t) > 1]
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(
|
||||
message="Q&A must be separated by TAB/ENTER key.")
|
||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
||||
d = beAdoc(d, arr[0], arr[1], not any(
|
||||
q, a = rmPrefix(arr[0]), rmPrefix("\n".join(arr[1:]))
|
||||
d = beAdoc(d, q, a, not any(
|
||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d.get("question_kwd") else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.update({"id": req["chunk_id"]}, d, search.index_name(tenant_id), doc.kb_id)
|
||||
@ -161,7 +170,7 @@ def set():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/switch', methods=['POST'])
|
||||
@manager.route('/switch', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "available_int", "doc_id")
|
||||
def switch():
|
||||
@ -181,7 +190,7 @@ def switch():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "doc_id")
|
||||
def rm():
|
||||
@ -200,19 +209,19 @@ def rm():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "content_with_weight")
|
||||
def create():
|
||||
req = request.json
|
||||
md5 = hashlib.md5()
|
||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
||||
chunck_id = md5.hexdigest()
|
||||
chunck_id = xxhash.xxh64((req["content_with_weight"] + req["doc_id"]).encode("utf-8")).hexdigest()
|
||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req.get("important_kwd", [])
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||
d["question_kwd"] = req.get("question_kwd", [])
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req.get("question_kwd", [])))
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
|
||||
@ -222,16 +231,23 @@ def create():
|
||||
return get_data_error_result(message="Document not found!")
|
||||
d["kb_id"] = [doc.kb_id]
|
||||
d["docnm_kwd"] = doc.name
|
||||
d["title_tks"] = rag_tokenizer.tokenize(doc.name)
|
||||
d["doc_id"] = doc.id
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
if kb.pagerank:
|
||||
d[PAGERANK_FLD] = kb.pagerank
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.insert([d], search.index_name(tenant_id), doc.kb_id)
|
||||
@ -243,7 +259,7 @@ def create():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval_test', methods=['POST'])
|
||||
@manager.route('/retrieval_test', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval_test():
|
||||
@ -257,6 +273,7 @@ def retrieval_test():
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
use_kg = req.get("use_kg", False)
|
||||
top = int(req.get("top_k", 1024))
|
||||
tenant_ids = []
|
||||
|
||||
@ -287,12 +304,24 @@ def retrieval_test():
|
||||
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
|
||||
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||
ranks = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
|
||||
labels = label_question(question, [kb])
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"),
|
||||
rank_feature=labels
|
||||
)
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
tenant_ids,
|
||||
kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
ranks["labels"] = labels
|
||||
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
@ -302,7 +331,7 @@ def retrieval_test():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/knowledge_graph', methods=['GET'])
|
||||
@manager.route('/knowledge_graph', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def knowledge_graph():
|
||||
doc_id = request.args["doc_id"]
|
||||
|
||||
@ -17,21 +17,26 @@ import json
|
||||
import re
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
import trio
|
||||
from api.db.db_models import APIToken
|
||||
|
||||
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
|
||||
from api.db.services.dialog_service import DialogService, chat, ask
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def set_conversation():
|
||||
req = request.json
|
||||
@ -63,38 +68,76 @@ def set_conversation():
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Fail to new a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
conv_id = request.args["conversation_id"]
|
||||
try:
|
||||
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
avatar =None
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
||||
if dialog and len(dialog)>0:
|
||||
avatar = dialog[0].icon
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of conversation authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
|
||||
for ref in conv.reference:
|
||||
if isinstance(ref, list):
|
||||
continue
|
||||
ref["chunks"] = [{
|
||||
"id": get_value(ck, "chunk_id", "id"),
|
||||
"content": get_value(ck, "content", "content_with_weight"),
|
||||
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(ck, "image_id", "img_id"),
|
||||
"positions": get_value(ck, "positions", "position_int"),
|
||||
} for ck in ref.get("chunks", [])]
|
||||
|
||||
conv = conv.to_dict()
|
||||
conv["avatar"]=avatar
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/getsse/<dialog_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||
def getsse(dialog_id):
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_data_error_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_data_error_result(message='Authentication error: API key is invalid!"')
|
||||
try:
|
||||
e, conv = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
conv = conv.to_dict()
|
||||
conv["avatar"]= conv["icon"]
|
||||
del conv["icon"]
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def rm():
|
||||
conv_ids = request.json["conversation_ids"]
|
||||
@ -117,7 +160,7 @@ def rm():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_convsersation():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
@ -130,13 +173,14 @@ def list_convsersation():
|
||||
dialog_id=dialog_id,
|
||||
order_by=ConversationService.model.create_time,
|
||||
reverse=True)
|
||||
|
||||
convs = [d.to_dict() for d in convs]
|
||||
return get_json_result(data=convs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
@ -162,24 +206,31 @@ def completion():
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
else:
|
||||
def get_value(d, k1, k2):
|
||||
return d.get(k1, d.get(k2))
|
||||
|
||||
for ref in conv.reference:
|
||||
if isinstance(ref, list):
|
||||
continue
|
||||
ref["chunks"] = [{
|
||||
"id": get_value(ck, "chunk_id", "id"),
|
||||
"content": get_value(ck, "content", "content_with_weight"),
|
||||
"document_id": get_value(ck, "doc_id", "document_id"),
|
||||
"document_name": get_value(ck, "docnm_kwd", "document_name"),
|
||||
"dataset_id": get_value(ck, "kb_id", "dataset_id"),
|
||||
"image_id": get_value(ck, "image_id", "img_id"),
|
||||
"positions": get_value(ck, "positions", "position_int"),
|
||||
} for ck in ref.get("chunks", [])]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||
ans["id"] = message_id
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
ans = structure_answer(conv, ans, message_id, conv.id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
@ -200,8 +251,7 @@ def completion():
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
answer = structure_answer(conv, ans, message_id, req["conversation_id"])
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_json_result(data=answer)
|
||||
@ -209,7 +259,7 @@ def completion():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/tts', methods=['POST'])
|
||||
@manager.route('/tts', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def tts():
|
||||
req = request.json
|
||||
@ -243,7 +293,7 @@ def tts():
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/delete_msg', methods=['POST'])
|
||||
@manager.route('/delete_msg', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def delete_msg():
|
||||
@ -266,7 +316,7 @@ def delete_msg():
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/thumbup', methods=['POST'])
|
||||
@manager.route('/thumbup', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def thumbup():
|
||||
@ -281,17 +331,19 @@ def thumbup():
|
||||
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
||||
if up_down:
|
||||
msg["thumbup"] = True
|
||||
if "feedback" in msg: del msg["feedback"]
|
||||
if "feedback" in msg:
|
||||
del msg["feedback"]
|
||||
else:
|
||||
msg["thumbup"] = False
|
||||
if feedback: msg["feedback"] = feedback
|
||||
if feedback:
|
||||
msg["feedback"] = feedback
|
||||
break
|
||||
|
||||
ConversationService.update_by_id(conv["id"], conv)
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/ask', methods=['POST'])
|
||||
@manager.route('/ask', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def ask_about():
|
||||
@ -317,7 +369,7 @@ def ask_about():
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/mindmap', methods=['POST'])
|
||||
@manager.route('/mindmap', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def mindmap():
|
||||
@ -327,19 +379,22 @@ def mindmap():
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||
ranks = settings.retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||
0.3, 0.3, aggs=False)
|
||||
question = req["question"]
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||
0.3, 0.3, aggs=False,
|
||||
rank_feature=label_question(question, [kb])
|
||||
)
|
||||
mindmap = MindMapExtractor(chat_mdl)
|
||||
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
||||
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in ranks["chunks"]])
|
||||
mind_map = mind_map.output
|
||||
if "error" in mind_map:
|
||||
return server_error_response(Exception(mind_map["error"]))
|
||||
return get_json_result(data=mind_map)
|
||||
|
||||
|
||||
@manager.route('/related_questions', methods=['POST'])
|
||||
@manager.route('/related_questions', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question")
|
||||
def related_questions():
|
||||
|
||||
@ -18,6 +18,7 @@ from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api import settings
|
||||
@ -26,21 +27,21 @@ from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def set_dialog():
|
||||
req = request.json
|
||||
dialog_id = req.get("dialog_id")
|
||||
name = req.get("name", "New Dialog")
|
||||
description = req.get("description", "A helpful Dialog")
|
||||
description = req.get("description", "A helpful dialog")
|
||||
icon = req.get("icon", "")
|
||||
top_n = req.get("top_n", 6)
|
||||
top_k = req.get("top_k", 1024)
|
||||
rerank_id = req.get("rerank_id", "")
|
||||
if not rerank_id: req["rerank_id"] = ""
|
||||
if not rerank_id:
|
||||
req["rerank_id"] = ""
|
||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
||||
llm_setting = req.get("llm_setting", {})
|
||||
default_prompt = {
|
||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||
@ -57,11 +58,6 @@ def set_dialog():
|
||||
|
||||
if not prompt_config["system"]:
|
||||
prompt_config["system"] = default_prompt["system"]
|
||||
# if len(prompt_config["parameters"]) < 1:
|
||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
||||
# for p in prompt_config["parameters"]:
|
||||
# if p["key"] == "knowledge":break
|
||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["optional"]:
|
||||
@ -74,22 +70,19 @@ def set_dialog():
|
||||
e, tenant = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids"))
|
||||
embd_count = len(set([kb.embd_id for kb in kbs]))
|
||||
if embd_count != 1:
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids", []))
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = len(set(embd_ids))
|
||||
if embd_count > 1:
|
||||
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
||||
|
||||
llm_id = req.get("llm_id", tenant.llm_id)
|
||||
if not dialog_id:
|
||||
if not req.get("kb_ids"):
|
||||
return get_data_error_result(
|
||||
message="Fail! Please select knowledgebase!")
|
||||
|
||||
dia = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
"name": name,
|
||||
"kb_ids": req["kb_ids"],
|
||||
"kb_ids": req.get("kb_ids", []),
|
||||
"description": description,
|
||||
"llm_id": llm_id,
|
||||
"llm_setting": llm_setting,
|
||||
@ -103,10 +96,7 @@ def set_dialog():
|
||||
}
|
||||
if not DialogService.save(**dia):
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
e, dia = DialogService.get_by_id(dia["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
return get_json_result(data=dia.to_json())
|
||||
return get_json_result(data=dia)
|
||||
else:
|
||||
del req["dialog_id"]
|
||||
if "kb_names" in req:
|
||||
@ -117,13 +107,14 @@ def set_dialog():
|
||||
if not e:
|
||||
return get_data_error_result(message="Fail to update a dialog!")
|
||||
dia = dia.to_dict()
|
||||
dia.update(req)
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
@ -149,7 +140,7 @@ def get_kb_names(kb_ids):
|
||||
return ids, nms
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_dialogs():
|
||||
try:
|
||||
@ -166,7 +157,7 @@ def list_dialogs():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("dialog_ids")
|
||||
def rm():
|
||||
|
||||
@ -22,19 +22,25 @@ import flask
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.db_models import Task, File
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||
from rag.nlp import search
|
||||
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api.db.db_models import File, Task
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import queue_tasks
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api.db.services.task_service import TaskService
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.utils.api_utils import (
|
||||
server_error_response,
|
||||
get_data_error_result,
|
||||
validate_request,
|
||||
)
|
||||
from api.utils import get_uuid
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
@ -43,7 +49,7 @@ from api.utils.web_utils import html2pdf, is_valid_url
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def upload():
|
||||
@ -65,14 +71,16 @@ def upload():
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
err, files = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
files = [f[0] for f in files] # remove the blob
|
||||
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=False, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=True)
|
||||
data=files, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=files)
|
||||
|
||||
|
||||
@manager.route('/web_crawl', methods=['POST'])
|
||||
@manager.route('/web_crawl', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "url")
|
||||
def web_crawl():
|
||||
@ -90,7 +98,8 @@ def web_crawl():
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
blob = html2pdf(url)
|
||||
if not blob: return server_error_response(ValueError("Download failure."))
|
||||
if not blob:
|
||||
return server_error_response(ValueError("Download failure."))
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
@ -138,7 +147,7 @@ def web_crawl():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name", "kb_id")
|
||||
def create():
|
||||
@ -174,7 +183,7 @@ def create():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_docs():
|
||||
kb_id = request.args.get("kb_id")
|
||||
@ -209,7 +218,7 @@ def list_docs():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/infos', methods=['POST'])
|
||||
@manager.route('/infos', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def docinfos():
|
||||
req = request.json
|
||||
@ -225,7 +234,7 @@ def docinfos():
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/thumbnails', methods=['GET'])
|
||||
@manager.route('/thumbnails', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def thumbnails():
|
||||
doc_ids = request.args.get("doc_ids").split(",")
|
||||
@ -245,7 +254,7 @@ def thumbnails():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_status', methods=['POST'])
|
||||
@manager.route('/change_status', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "status")
|
||||
def change_status():
|
||||
@ -284,13 +293,14 @@ def change_status():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def rm():
|
||||
req = request.json
|
||||
doc_ids = req["doc_id"]
|
||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
||||
if isinstance(doc_ids, str):
|
||||
doc_ids = [doc_ids]
|
||||
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
||||
@ -315,6 +325,7 @@ def rm():
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
|
||||
TaskService.filter_delete([Task.doc_id == doc_id])
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
@ -333,7 +344,7 @@ def rm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/run', methods=['POST'])
|
||||
@manager.route('/run', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_ids", "run")
|
||||
def run():
|
||||
@ -348,23 +359,23 @@ def run():
|
||||
try:
|
||||
for id in req["doc_ids"]:
|
||||
info = {"run": str(req["run"]), "progress": 0}
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value and req.get("delete", False):
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
DocumentService.update_by_id(id, info)
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), doc.kb_id)
|
||||
if req.get("delete", False):
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
@ -376,7 +387,7 @@ def run():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST'])
|
||||
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "name")
|
||||
def rename():
|
||||
@ -417,7 +428,7 @@ def rename():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<doc_id>', methods=['GET'])
|
||||
@manager.route('/get/<doc_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get(doc_id):
|
||||
try:
|
||||
@ -442,7 +453,7 @@ def get(doc_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_parser', methods=['POST'])
|
||||
@manager.route('/change_parser', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "parser_id")
|
||||
def change_parser():
|
||||
@ -493,10 +504,13 @@ def change_parser():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/image/<image_id>', methods=['GET'])
|
||||
@manager.route('/image/<image_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get_image(image_id):
|
||||
try:
|
||||
arr = image_id.split("-")
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(message="Image not found.")
|
||||
bkt, nm = image_id.split("-")
|
||||
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
||||
response.headers.set('Content-Type', 'image/JPEG')
|
||||
@ -505,7 +519,7 @@ def get_image(image_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/upload_and_parse', methods=['POST'])
|
||||
@manager.route('/upload_and_parse', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id")
|
||||
def upload_and_parse():
|
||||
@ -524,7 +538,7 @@ def upload_and_parse():
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/parse', methods=['POST'])
|
||||
@manager.route('/parse', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def parse():
|
||||
url = request.json.get("url") if request.json else ""
|
||||
@ -548,7 +562,7 @@ def parse():
|
||||
})
|
||||
driver = Chrome(options=options)
|
||||
driver.get(url)
|
||||
res_headers = [r.response.headers for r in driver.requests]
|
||||
res_headers = [r.response.headers for r in driver.requests if r and r.response]
|
||||
if len(res_headers) > 1:
|
||||
sections = RAGFlowHtmlParser().parser_txt(driver.page_source)
|
||||
driver.quit()
|
||||
@ -582,3 +596,38 @@ def parse():
|
||||
txt = FileService.parse_docs(file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=txt)
|
||||
|
||||
|
||||
@manager.route('/set_meta', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "meta")
|
||||
def set_meta():
|
||||
req = request.json
|
||||
if not DocumentService.accessible(req["doc_id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
meta = json.loads(req["meta"])
|
||||
except Exception as e:
|
||||
return get_json_result(
|
||||
data=False, message=f'Json syntax error: {e}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if not isinstance(meta, dict):
|
||||
return get_json_result(
|
||||
data=False, message='Meta data should be in Json map format, like {"key": "value"}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"meta_fields": meta}):
|
||||
return get_data_error_result(
|
||||
message="Database error (meta updates)!")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -28,7 +28,7 @@ from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/convert', methods=['POST'])
|
||||
@manager.route('/convert', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("file_ids", "kb_ids")
|
||||
def convert():
|
||||
@ -92,7 +92,7 @@ def convert():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("file_ids")
|
||||
def rm():
|
||||
|
||||
@ -34,7 +34,7 @@ from api.utils.file_utils import filename_type
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@manager.route('/upload', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
# @validate_request("parent_id")
|
||||
def upload():
|
||||
@ -120,7 +120,7 @@ def upload():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
@ -160,7 +160,7 @@ def create():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_files():
|
||||
pf_id = request.args.get("parent_id")
|
||||
@ -192,7 +192,7 @@ def list_files():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/root_folder', methods=['GET'])
|
||||
@manager.route('/root_folder', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get_root_folder():
|
||||
try:
|
||||
@ -202,7 +202,7 @@ def get_root_folder():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/parent_folder', methods=['GET'])
|
||||
@manager.route('/parent_folder', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get_parent_folder():
|
||||
file_id = request.args.get("file_id")
|
||||
@ -217,7 +217,7 @@ def get_parent_folder():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/all_parent_folder', methods=['GET'])
|
||||
@manager.route('/all_parent_folder', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get_all_parent_folders():
|
||||
file_id = request.args.get("file_id")
|
||||
@ -235,7 +235,7 @@ def get_all_parent_folders():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("file_ids")
|
||||
def rm():
|
||||
@ -284,7 +284,7 @@ def rm():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST'])
|
||||
@manager.route('/rename', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("file_id", "name")
|
||||
def rename():
|
||||
@ -322,15 +322,20 @@ def rename():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<file_id>', methods=['GET'])
|
||||
# @login_required
|
||||
@manager.route('/get/<file_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get(file_id):
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||
|
||||
blob = STORAGE_IMPL.get(file.parent_id, file.location)
|
||||
if not blob:
|
||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||
blob = STORAGE_IMPL.get(b, n)
|
||||
|
||||
response = flask.make_response(blob)
|
||||
ext = re.search(r"\.([^.]+)$", file.name)
|
||||
if ext:
|
||||
if file.type == FileType.VISUAL.value:
|
||||
@ -345,7 +350,7 @@ def get(file_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/mv', methods=['POST'])
|
||||
@manager.route('/mv', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("src_file_ids", "dest_file_id")
|
||||
def move():
|
||||
|
||||
@ -13,6 +13,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
@ -21,7 +24,7 @@ from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request, not_allowed_parameters
|
||||
from api.utils import get_uuid
|
||||
from api.db import StatusEnum, FileSource
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
@ -30,9 +33,10 @@ from api.utils.api_utils import get_json_result
|
||||
from api import settings
|
||||
from rag.nlp import search
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
from rag.settings import PAGERANK_FLD
|
||||
|
||||
|
||||
@manager.route('/create', methods=['post'])
|
||||
@manager.route('/create', methods=['post']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
@ -67,9 +71,10 @@ def create():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/update', methods=['post'])
|
||||
@manager.route('/update', methods=['post']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
||||
@not_allowed_parameters("id", "tenant_id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||
def update():
|
||||
req = request.json
|
||||
req["name"] = req["name"].strip()
|
||||
@ -91,6 +96,13 @@ def update():
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
|
||||
if req.get("parser_id", "") == "tag" and os.environ.get('DOC_ENGINE', "elasticsearch") == "infinity":
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='The chunk method Tag has not been supported by Infinity yet.',
|
||||
code=settings.RetCode.OPERATING_ERROR
|
||||
)
|
||||
|
||||
if req["name"].lower() != kb.name.lower() \
|
||||
and len(
|
||||
KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||
@ -101,17 +113,28 @@ def update():
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_data_error_result()
|
||||
|
||||
if kb.pagerank != req.get("pagerank", 0):
|
||||
if req.get("pagerank", 0) > 0:
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: req["pagerank"]},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
else:
|
||||
# Elasticsearch requires PAGERANK_FLD be non-zero!
|
||||
settings.docStoreConn.update({"exists": PAGERANK_FLD}, {"remove": PAGERANK_FLD},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Database error (Knowledgebase rename)!")
|
||||
kb = kb.to_dict()
|
||||
kb.update(req)
|
||||
|
||||
return get_json_result(data=kb.to_json())
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/detail', methods=['GET'])
|
||||
@manager.route('/detail', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def detail():
|
||||
kb_id = request.args["kb_id"]
|
||||
@ -134,24 +157,26 @@ def detail():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_kbs():
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 150))
|
||||
parser_id = request.args.get("parser_id")
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc, keywords)
|
||||
[m["tenant_id"] for m in tenants], current_user.id, page_number,
|
||||
items_per_page, orderby, desc, keywords, parser_id)
|
||||
return get_json_result(data={"kbs": kbs, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['post'])
|
||||
@manager.route('/rm', methods=['post']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def rm():
|
||||
@ -175,7 +200,8 @@ def rm():
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
if f2d:
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
@ -188,3 +214,112 @@ def rm():
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/<kb_id>/tags', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_tags(kb_id):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
tags = settings.retrievaler.all_tags(current_user.id, [kb_id])
|
||||
return get_json_result(data=tags)
|
||||
|
||||
|
||||
@manager.route('/tags', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_tags_from_kbs():
|
||||
kb_ids = request.args.get("kb_ids", "").split(",")
|
||||
for kb_id in kb_ids:
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
tags = settings.retrievaler.all_tags(current_user.id, kb_ids)
|
||||
return get_json_result(data=tags)
|
||||
|
||||
|
||||
@manager.route('/<kb_id>/rm_tags', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def rm_tags(kb_id):
|
||||
req = request.json
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
|
||||
for t in req["tags"]:
|
||||
settings.docStoreConn.update({"tag_kwd": t, "kb_id": [kb_id]},
|
||||
{"remove": {"tag_kwd": t}},
|
||||
search.index_name(kb.tenant_id),
|
||||
kb_id)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/<kb_id>/rename_tag', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def rename_tags(kb_id):
|
||||
req = request.json
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
|
||||
settings.docStoreConn.update({"tag_kwd": req["from_tag"], "kb_id": [kb_id]},
|
||||
{"remove": {"tag_kwd": req["from_tag"].strip()}, "add": {"tag_kwd": req["to_tag"]}},
|
||||
search.index_name(kb.tenant_id),
|
||||
kb_id)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/<kb_id>/knowledge_graph', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def knowledge_graph(kb_id):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
req = {
|
||||
"kb_id": [kb_id],
|
||||
"knowledge_graph_kwd": ["graph"]
|
||||
}
|
||||
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
if not settings.docStoreConn.indexExist(search.index_name(kb.tenant_id), kb_id):
|
||||
return get_json_result(data=obj)
|
||||
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [kb_id])
|
||||
if not len(sres.ids):
|
||||
return get_json_result(data=obj)
|
||||
|
||||
for id in sres.ids[:1]:
|
||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||
try:
|
||||
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
obj[ty] = content_json
|
||||
|
||||
if "nodes" in obj["graph"]:
|
||||
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||
if "edges" in obj["graph"]:
|
||||
node_id_set = { o["id"] for o in obj["graph"]["nodes"] }
|
||||
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
return get_json_result(data=obj)
|
||||
@ -15,7 +15,7 @@
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
|
||||
import os
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
||||
@ -24,11 +24,11 @@ from api.utils.api_utils import server_error_response, get_data_error_result, va
|
||||
from api.db import StatusEnum, LLMType
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
||||
import requests
|
||||
|
||||
|
||||
@manager.route('/factories', methods=['GET'])
|
||||
@manager.route('/factories', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def factories():
|
||||
try:
|
||||
@ -50,7 +50,7 @@ def factories():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set_api_key', methods=['POST'])
|
||||
@manager.route('/set_api_key', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory", "api_key")
|
||||
def set_api_key():
|
||||
@ -129,12 +129,14 @@ def set_api_key():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/add_llm', methods=['POST'])
|
||||
@manager.route('/add_llm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory")
|
||||
def add_llm():
|
||||
req = request.json
|
||||
factory = req["llm_factory"]
|
||||
api_key = req.get("api_key", "")
|
||||
llm_name = req["llm_name"]
|
||||
|
||||
def apikey_json(keys):
|
||||
nonlocal req
|
||||
@ -143,7 +145,6 @@ def add_llm():
|
||||
if factory == "VolcEngine":
|
||||
# For VolcEngine, due to its special authentication method
|
||||
# Assemble ark_api_key endpoint_id into api_key
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["ark_api_key", "endpoint_id"])
|
||||
|
||||
elif factory == "Tencent Hunyuan":
|
||||
@ -152,52 +153,43 @@ def add_llm():
|
||||
|
||||
elif factory == "Tencent Cloud":
|
||||
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
||||
return set_api_key()
|
||||
|
||||
elif factory == "Bedrock":
|
||||
# For Bedrock, due to its special authentication method
|
||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
||||
|
||||
elif factory == "LocalAI":
|
||||
llm_name = req["llm_name"] + "___LocalAI"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
llm_name += "___LocalAI"
|
||||
|
||||
elif factory == "HuggingFace":
|
||||
llm_name = req["llm_name"] + "___HuggingFace"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
llm_name += "___HuggingFace"
|
||||
|
||||
elif factory == "OpenAI-API-Compatible":
|
||||
llm_name = req["llm_name"] + "___OpenAI-API"
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
llm_name += "___OpenAI-API"
|
||||
|
||||
elif factory == "VLLM":
|
||||
llm_name += "___VLLM"
|
||||
|
||||
elif factory == "XunFei Spark":
|
||||
llm_name = req["llm_name"]
|
||||
if req["model_type"] == "chat":
|
||||
api_key = req.get("spark_api_password", "xxxxxxxxxxxxxxx")
|
||||
api_key = req.get("spark_api_password", "")
|
||||
elif req["model_type"] == "tts":
|
||||
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
||||
|
||||
elif factory == "BaiduYiyan":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["yiyan_ak", "yiyan_sk"])
|
||||
|
||||
elif factory == "Fish Audio":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["fish_audio_ak", "fish_audio_refid"])
|
||||
|
||||
elif factory == "Google Cloud":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
||||
|
||||
elif factory == "Azure-OpenAI":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = apikey_json(["api_key", "api_version"])
|
||||
|
||||
else:
|
||||
llm_name = req["llm_name"]
|
||||
api_key = req.get("api_key", "xxxxxxxxxxxxxxx")
|
||||
|
||||
llm = {
|
||||
"tenant_id": current_user.id,
|
||||
"llm_factory": factory,
|
||||
@ -209,74 +201,69 @@ def add_llm():
|
||||
}
|
||||
|
||||
msg = ""
|
||||
mdl_nm = llm["llm_name"].split("___")[0]
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0 or tc == 0:
|
||||
if len(arr[0]) == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access embedding model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
if not tc and m.find("**ERROR**:") >= 0:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.RERANK:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"])
|
||||
if len(arr) == 0 or tc == 0:
|
||||
if len(arr) == 0:
|
||||
raise Exception("Not known.")
|
||||
except KeyError:
|
||||
msg += f"{factory} dose not support this model({mdl_nm})"
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||
mdl = CvModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
img_url = (
|
||||
"https://upload.wikimedia.org/wikipedia/comm"
|
||||
"ons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/256"
|
||||
"0px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
)
|
||||
res = requests.get(img_url)
|
||||
if res.status_code == 200:
|
||||
m, tc = mdl.describe(res.content)
|
||||
if not tc:
|
||||
with open(os.path.join(get_project_base_directory(), "web/src/assets/yay.jpg"), "rb") as f:
|
||||
m, tc = mdl.describe(f.read())
|
||||
if not m and not tc:
|
||||
raise Exception(m)
|
||||
else:
|
||||
pass
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.TTS:
|
||||
mdl = TTSModel[factory](
|
||||
key=llm["api_key"], model_name=llm["llm_name"], base_url=llm["api_base"]
|
||||
key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
for resp in mdl.tts("Hello~ Ragflower!"):
|
||||
pass
|
||||
except RuntimeError as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
msg += f"\nFail to access model({mdl_nm})." + str(e)
|
||||
else:
|
||||
# TODO: check other type of models
|
||||
pass
|
||||
@ -292,7 +279,7 @@ def add_llm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_llm', methods=['POST'])
|
||||
@manager.route('/delete_llm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory", "llm_name")
|
||||
def delete_llm():
|
||||
@ -303,7 +290,7 @@ def delete_llm():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_factory', methods=['POST'])
|
||||
@manager.route('/delete_factory', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory")
|
||||
def delete_factory():
|
||||
@ -313,7 +300,7 @@ def delete_factory():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/my_llms', methods=['GET'])
|
||||
@manager.route('/my_llms', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def my_llms():
|
||||
try:
|
||||
@ -334,10 +321,10 @@ def my_llms():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_app():
|
||||
self_deploied = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio"]
|
||||
self_deployed = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"]
|
||||
weighted = ["Youdao", "FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
|
||||
model_type = request.args.get("model_type")
|
||||
try:
|
||||
@ -347,12 +334,14 @@ def list_app():
|
||||
llms = [m.to_dict()
|
||||
for m in llms if m.status == StatusEnum.VALID.value and m.fid not in weighted]
|
||||
for m in llms:
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deploied
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deployed
|
||||
|
||||
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||
for o in objs:
|
||||
if not o.api_key: continue
|
||||
if o.llm_name + "@" + o.llm_factory in llm_set: continue
|
||||
if not o.api_key:
|
||||
continue
|
||||
if o.llm_name + "@" + o.llm_factory in llm_set:
|
||||
continue
|
||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||
|
||||
res = {}
|
||||
@ -365,4 +354,4 @@ def list_app():
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
return server_error_response(e)
|
||||
|
||||
39
api/apps/sdk/agent.py
Normal file
39
api/apps/sdk/agent.py
Normal file
@ -0,0 +1,39 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.utils.api_utils import get_error_data_result, token_required
|
||||
from api.utils.api_utils import get_result
|
||||
from flask import request
|
||||
|
||||
@manager.route('/agents', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_agents(tenant_id):
|
||||
id = request.args.get("id")
|
||||
title = request.args.get("title")
|
||||
if id or title:
|
||||
canvas = UserCanvasService.query(id=id, title=title, user_id=tenant_id)
|
||||
if not canvas:
|
||||
return get_error_data_result("The agent doesn't exist.")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "update_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
canvas = UserCanvasService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,title)
|
||||
return get_result(data=canvas)
|
||||
@ -13,45 +13,46 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
|
||||
from flask import request
|
||||
from api import settings
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result, token_required
|
||||
from api.utils.api_utils import get_result
|
||||
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['POST'])
|
||||
@manager.route('/chats', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
req=request.json
|
||||
ids= req.get("dataset_ids")
|
||||
if not ids:
|
||||
return get_error_data_result(message="`dataset_ids` is required")
|
||||
req = request.json
|
||||
ids = [i for i in req.get("dataset_ids", []) if i]
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id,user_id=tenant_id)
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(message='Datasets use different embedding models."',code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
kbs = KnowledgebaseService.get_by_ids(ids) if ids else []
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) > 1:
|
||||
return get_result(message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
# llm
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
@ -65,7 +66,7 @@ def create(tenant_id):
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id","top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
@ -82,7 +83,10 @@ def create(tenant_id):
|
||||
req["top_k"] = req.get("top_k", 1024)
|
||||
req["rerank_id"] = req.get("rerank_id", "")
|
||||
if req.get("rerank_id"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_id"),model_type="rerank"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
|
||||
llm_name=req.get("rerank_id"),
|
||||
model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if not req.get("llm_id"):
|
||||
req["llm_id"] = tenant.llm_id
|
||||
@ -104,9 +108,12 @@ def create(tenant_id):
|
||||
"parameters": [
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!"
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
|
||||
"quote": True,
|
||||
"tts": False,
|
||||
"refine_multiturn": True
|
||||
}
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response"]
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response", "quote", "tts", "refine_multiturn"]
|
||||
if "prompt_config" not in req:
|
||||
req['prompt_config'] = {}
|
||||
for key in key_list_2:
|
||||
@ -134,7 +141,7 @@ def create(tenant_id):
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
res["prompt"].update(new_dict)
|
||||
@ -147,21 +154,22 @@ def create(tenant_id):
|
||||
res["avatar"] = res.pop("icon")
|
||||
return get_result(data=res)
|
||||
|
||||
@manager.route('/chats/<chat_id>', methods=['PUT'])
|
||||
|
||||
@manager.route('/chats/<chat_id>', methods=['PUT']) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id,chat_id):
|
||||
def update(tenant_id, chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message='You do not own the chat')
|
||||
req =request.json
|
||||
req = request.json
|
||||
ids = req.get("dataset_ids")
|
||||
if "show_quotation" in req:
|
||||
req["do_refer"]=req.pop("show_quotation")
|
||||
req["do_refer"] = req.pop("show_quotation")
|
||||
if "dataset_ids" in req:
|
||||
if not ids:
|
||||
return get_error_data_result("`datasets` can't be empty")
|
||||
return get_error_data_result("`dataset_ids` can't be empty")
|
||||
if ids:
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=chat_id, user_id=tenant_id)
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
@ -169,8 +177,9 @@ def update(tenant_id,chat_id):
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_count=list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_count) != 1 :
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
@ -179,15 +188,12 @@ def update(tenant_id,chat_id):
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req["llm_id"],model_type="chat"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=req["llm_id"], model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
if req.get("rerank_model"):
|
||||
if not TenantLLMService.query(tenant_id=tenant_id,llm_name=req.get("rerank_model"),model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_model')} doesn't exist")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables",
|
||||
@ -196,7 +202,7 @@ def update(tenant_id,chat_id):
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id","top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
@ -207,6 +213,12 @@ def update(tenant_id,chat_id):
|
||||
req["prompt_config"] = req.pop("prompt")
|
||||
e, res = DialogService.get_by_id(chat_id)
|
||||
res = res.to_json()
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id,
|
||||
llm_name=req.get("rerank_id"),
|
||||
model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if "name" in req:
|
||||
if not req.get("name"):
|
||||
return get_error_data_result(message="`name` is not empty.")
|
||||
@ -235,21 +247,21 @@ def update(tenant_id,chat_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats', methods=['DELETE'])
|
||||
@manager.route('/chats', methods=['DELETE']) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
req = request.json
|
||||
if not req:
|
||||
ids=None
|
||||
ids = None
|
||||
else:
|
||||
ids=req.get("ids")
|
||||
ids = req.get("ids")
|
||||
if not ids:
|
||||
id_list = []
|
||||
dias=DialogService.query(tenant_id=tenant_id,status=StatusEnum.VALID.value)
|
||||
dias = DialogService.query(tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
for dia in dias:
|
||||
id_list.append(dia.id)
|
||||
else:
|
||||
id_list=ids
|
||||
id_list = ids
|
||||
for id in id_list:
|
||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message=f"You don't own the chat {id}")
|
||||
@ -257,14 +269,16 @@ def delete(tenant_id):
|
||||
DialogService.update_by_id(id, temp_dict)
|
||||
return get_result()
|
||||
|
||||
@manager.route('/chats', methods=['GET'])
|
||||
|
||||
@manager.route('/chats', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_chat(tenant_id):
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
chat = DialogService.query(id=id,name=name,status=StatusEnum.VALID.value,tenant_id=tenant_id)
|
||||
if not chat:
|
||||
return get_error_data_result(message="The chat doesn't exist")
|
||||
if id or name:
|
||||
chat = DialogService.query(id=id, name=name, status=StatusEnum.VALID.value, tenant_id=tenant_id)
|
||||
if not chat:
|
||||
return get_error_data_result(message="The chat doesn't exist")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
@ -272,27 +286,27 @@ def list_chat(tenant_id):
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
chats = DialogService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,name)
|
||||
chats = DialogService.get_list(tenant_id, page_number, items_per_page, orderby, desc, id, name)
|
||||
if not chats:
|
||||
return get_result(data=[])
|
||||
list_assts = []
|
||||
renamed_dict = {}
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight",
|
||||
"do_refer":"show_quotation"}
|
||||
"do_refer": "show_quotation"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
for res in chats:
|
||||
renamed_dict = {}
|
||||
for key, value in res["prompt_config"].items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||
"keywords_similarity_weight": 1-res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
res["prompt"].update(new_dict)
|
||||
@ -303,11 +317,12 @@ def list_chat(tenant_id):
|
||||
kb_list = []
|
||||
for kb_id in res["kb_ids"]:
|
||||
kb = KnowledgebaseService.query(id=kb_id)
|
||||
if not kb :
|
||||
return get_error_data_result(message=f"Don't exist the kb {kb_id}")
|
||||
if not kb:
|
||||
logging.warning(f"The kb {kb_id} does not exist.")
|
||||
continue
|
||||
kb_list.append(kb[0].to_json())
|
||||
del res["kb_ids"]
|
||||
res["datasets"] = kb_list
|
||||
res["avatar"] = res.pop("icon")
|
||||
list_assts.append(res)
|
||||
return get_result(data=list_assts)
|
||||
return get_result(data=list_assts)
|
||||
|
||||
@ -30,11 +30,11 @@ from api.utils.api_utils import (
|
||||
token_required,
|
||||
get_error_data_result,
|
||||
valid,
|
||||
get_parser_config,
|
||||
get_parser_config, valid_parser_config,
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["POST"])
|
||||
@manager.route("/datasets", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
"""
|
||||
@ -66,14 +66,11 @@ def create(tenant_id):
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Dataset permission.
|
||||
language:
|
||||
type: string
|
||||
enum: ['Chinese', 'English']
|
||||
description: Language of the dataset.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "knowledge_graph", "email"]
|
||||
"presentation", "picture", "one", "knowledge_graph", "email", "tag"
|
||||
]
|
||||
description: Chunking method.
|
||||
parser_config:
|
||||
type: object
|
||||
@ -90,11 +87,10 @@ def create(tenant_id):
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_parser_config(parser_config)
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
@ -108,12 +104,11 @@ def create(tenant_id):
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
language,
|
||||
valid_language,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
@ -132,13 +127,18 @@ def create(tenant_id):
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"] == "":
|
||||
return get_error_data_result(message="`name` is not empty string!")
|
||||
if len(req["name"]) >= 128:
|
||||
return get_error_data_result(
|
||||
message="Dataset name should not be longer than 128 characters."
|
||||
)
|
||||
if KnowledgebaseService.query(
|
||||
name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
|
||||
):
|
||||
return get_error_data_result(
|
||||
message="Duplicated dataset name in creating dataset."
|
||||
)
|
||||
req["tenant_id"] = req["created_by"] = tenant_id
|
||||
req["tenant_id"] = tenant_id
|
||||
req["created_by"] = tenant_id
|
||||
if not req.get("embedding_model"):
|
||||
req["embedding_model"] = t.embd_id
|
||||
else:
|
||||
@ -180,6 +180,10 @@ def create(tenant_id):
|
||||
if old_key in req
|
||||
}
|
||||
req.update(mapped_keys)
|
||||
flds = list(req.keys())
|
||||
for f in flds:
|
||||
if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
|
||||
del req[f]
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||
renamed_data = {}
|
||||
@ -190,7 +194,7 @@ def create(tenant_id):
|
||||
return get_result(data=renamed_data)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["DELETE"])
|
||||
@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
"""
|
||||
@ -224,6 +228,8 @@ def delete(tenant_id):
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
errors = []
|
||||
success_count = 0
|
||||
req = request.json
|
||||
if not req:
|
||||
ids = None
|
||||
@ -239,12 +245,12 @@ def delete(tenant_id):
|
||||
for id in id_list:
|
||||
kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(message=f"You don't own the dataset {id}")
|
||||
errors.append(f"You don't own the dataset {id}")
|
||||
continue
|
||||
for doc in DocumentService.query(kb_id=id):
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_error_data_result(
|
||||
message="Remove document error.(Database error)"
|
||||
)
|
||||
errors.append(f"Remove document error for dataset {id}")
|
||||
continue
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[
|
||||
@ -256,11 +262,21 @@ def delete(tenant_id):
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
if not KnowledgebaseService.delete_by_id(id):
|
||||
return get_error_data_result(message="Delete dataset error.(Database error)")
|
||||
errors.append(f"Delete dataset error for {id}")
|
||||
continue
|
||||
success_count += 1
|
||||
if errors:
|
||||
if success_count > 0:
|
||||
return get_result(
|
||||
data={"success_count": success_count, "errors": errors},
|
||||
message=f"Partially deleted {success_count} datasets with {len(errors)} errors"
|
||||
)
|
||||
else:
|
||||
return get_error_data_result(message="; ".join(errors))
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"])
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -295,14 +311,11 @@ def update(tenant_id, dataset_id):
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Updated permission.
|
||||
language:
|
||||
type: string
|
||||
enum: ['Chinese', 'English']
|
||||
description: Updated language.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
|
||||
"presentation", "picture", "one", "knowledge_graph", "email"]
|
||||
"presentation", "picture", "one", "knowledge_graph", "email", "tag"
|
||||
]
|
||||
description: Updated chunking method.
|
||||
parser_config:
|
||||
type: object
|
||||
@ -321,11 +334,10 @@ def update(tenant_id, dataset_id):
|
||||
if any(key in req for key in invalid_keys):
|
||||
return get_error_data_result(message="The input parameters are invalid.")
|
||||
permission = req.get("permission")
|
||||
language = req.get("language")
|
||||
chunk_method = req.get("chunk_method")
|
||||
parser_config = req.get("parser_config")
|
||||
valid_parser_config(parser_config)
|
||||
valid_permission = ["me", "team"]
|
||||
valid_language = ["Chinese", "English"]
|
||||
valid_chunk_method = [
|
||||
"naive",
|
||||
"manual",
|
||||
@ -339,12 +351,11 @@ def update(tenant_id, dataset_id):
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
]
|
||||
check_validation = valid(
|
||||
permission,
|
||||
valid_permission,
|
||||
language,
|
||||
valid_language,
|
||||
chunk_method,
|
||||
valid_chunk_method,
|
||||
)
|
||||
@ -412,6 +423,10 @@ def update(tenant_id, dataset_id):
|
||||
req["embd_id"] = req.pop("embedding_model")
|
||||
if "name" in req:
|
||||
req["name"] = req["name"].strip()
|
||||
if len(req["name"]) >= 128:
|
||||
return get_error_data_result(
|
||||
message="Dataset name should not be longer than 128 characters."
|
||||
)
|
||||
if (
|
||||
req["name"].lower() != kb.name.lower()
|
||||
and len(
|
||||
@ -429,9 +444,9 @@ def update(tenant_id, dataset_id):
|
||||
return get_result(code=settings.RetCode.SUCCESS)
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["GET"])
|
||||
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list(tenant_id):
|
||||
def list_datasets(tenant_id):
|
||||
"""
|
||||
List datasets.
|
||||
---
|
||||
@ -500,7 +515,9 @@ def list(tenant_id):
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
if request.args.get("desc", "false").lower() not in ["true", "false"]:
|
||||
return get_error_data_result("desc should be true or false")
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
|
||||
@ -15,20 +15,22 @@
|
||||
#
|
||||
from flask import request, jsonify
|
||||
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
@manager.route('/dify/retrieval', methods=['POST'])
|
||||
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
||||
@apikey_required
|
||||
@validate_request("knowledge_id", "query")
|
||||
def retrieval(tenant_id):
|
||||
req = request.json
|
||||
question = req["query"]
|
||||
kb_id = req["knowledge_id"]
|
||||
use_kg = req.get("use_kg", False)
|
||||
retrieval_setting = req.get("retrieval_setting", {})
|
||||
similarity_threshold = float(retrieval_setting.get("score_threshold", 0.0))
|
||||
top = int(retrieval_setting.get("top_k", 1024))
|
||||
@ -44,8 +46,7 @@ def retrieval(tenant_id):
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
|
||||
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||
ranks = retr.retrieval(
|
||||
ranks = settings.retrievaler.retrieval(
|
||||
question,
|
||||
embd_mdl,
|
||||
kb.tenant_id,
|
||||
@ -54,13 +55,24 @@ def retrieval(tenant_id):
|
||||
page_size=top,
|
||||
similarity_threshold=similarity_threshold,
|
||||
vector_similarity_weight=0.3,
|
||||
top=top
|
||||
top=top,
|
||||
rank_feature=label_question(question, [kb])
|
||||
)
|
||||
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
[tenant_id],
|
||||
[kb_id],
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
records = []
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
records.append({
|
||||
"content": c["content_ltks"],
|
||||
"content": c["content_with_weight"],
|
||||
"score": c["similarity"],
|
||||
"title": c["docnm_kwd"],
|
||||
"metadata": {}
|
||||
|
||||
@ -16,13 +16,12 @@
|
||||
import pathlib
|
||||
import datetime
|
||||
|
||||
from api.db.services.dialog_service import keyword_extraction
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import rag_tokenizer
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.llm_service import TenantLLMService, LLMBundle
|
||||
from api import settings
|
||||
import hashlib
|
||||
import xxhash
|
||||
import re
|
||||
from api.utils.api_utils import token_required
|
||||
from api.db.db_models import Task
|
||||
@ -39,14 +38,36 @@ from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import construct_json_result, get_parser_config
|
||||
from rag.nlp import search
|
||||
from rag.prompts import keyword_extraction
|
||||
from rag.app.tag import label_question
|
||||
from rag.utils import rmSpace
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
import os
|
||||
|
||||
from pydantic import BaseModel, Field, validator
|
||||
|
||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["POST"])
|
||||
class Chunk(BaseModel):
|
||||
id: str = ""
|
||||
content: str = ""
|
||||
document_id: str = ""
|
||||
docnm_kwd: str = ""
|
||||
important_keywords: list = Field(default_factory=list)
|
||||
questions: list = Field(default_factory=list)
|
||||
question_tks: str = ""
|
||||
image_id: str = ""
|
||||
available: bool = True
|
||||
positions: list[list[int]] = Field(default_factory=list)
|
||||
|
||||
@validator('positions')
|
||||
def validate_positions(cls, value):
|
||||
for sublist in value:
|
||||
if len(sublist) != 5:
|
||||
raise ValueError("Each sublist in positions must have a length of 5")
|
||||
return value
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def upload(dataset_id, tenant_id):
|
||||
"""
|
||||
@ -154,7 +175,7 @@ def upload(dataset_id, tenant_id):
|
||||
return get_result(data=renamed_doc_list)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["PUT"])
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update_doc(tenant_id, dataset_id, document_id):
|
||||
"""
|
||||
@ -219,6 +240,11 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
if req["progress"] != doc.progress:
|
||||
return get_error_data_result(message="Can't change `progress`.")
|
||||
|
||||
if "meta_fields" in req:
|
||||
if not isinstance(req["meta_fields"], dict):
|
||||
return get_error_data_result(message="meta_fields must be a dictionary")
|
||||
DocumentService.update_meta_fields(document_id, req["meta_fields"])
|
||||
|
||||
if "name" in req and req["name"] != doc.name:
|
||||
if (
|
||||
pathlib.Path(req["name"].lower()).suffix
|
||||
@ -240,6 +266,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if "chunk_method" in req:
|
||||
@ -256,6 +283,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
"one",
|
||||
"knowledge_graph",
|
||||
"email",
|
||||
"tag"
|
||||
}
|
||||
if req.get("chunk_method") not in valid_chunk_method:
|
||||
return get_error_data_result(
|
||||
@ -297,7 +325,7 @@ def update_doc(tenant_id, dataset_id, document_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["GET"])
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def download(tenant_id, dataset_id, document_id):
|
||||
"""
|
||||
@ -361,7 +389,7 @@ def download(tenant_id, dataset_id, document_id):
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["GET"])
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_docs(dataset_id, tenant_id):
|
||||
"""
|
||||
@ -451,10 +479,12 @@ def list_docs(dataset_id, tenant_id):
|
||||
return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
if not DocumentService.query(id=id, kb_id=dataset_id):
|
||||
|
||||
if id and not DocumentService.query(id=id, kb_id=dataset_id):
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if not DocumentService.query(name=name, kb_id=dataset_id):
|
||||
if name and not DocumentService.query(name=name, kb_id=dataset_id):
|
||||
return get_error_data_result(message=f"You don't own the document {name}.")
|
||||
|
||||
page = int(request.args.get("page", 1))
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_size = int(request.args.get("page_size", 30))
|
||||
@ -495,7 +525,7 @@ def list_docs(dataset_id, tenant_id):
|
||||
return get_result(data={"total": tol, "docs": renamed_doc_list})
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["DELETE"])
|
||||
@manager.route("/datasets/<dataset_id>/documents", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -587,7 +617,7 @@ def delete(tenant_id, dataset_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/chunks", methods=["POST"])
|
||||
@manager.route("/datasets/<dataset_id>/chunks", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def parse(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -654,7 +684,7 @@ def parse(tenant_id, dataset_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/chunks", methods=["DELETE"])
|
||||
@manager.route("/datasets/<dataset_id>/chunks", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def stop_parsing(tenant_id, dataset_id):
|
||||
"""
|
||||
@ -702,17 +732,17 @@ def stop_parsing(tenant_id, dataset_id):
|
||||
doc = DocumentService.query(id=id, kb_id=dataset_id)
|
||||
if not doc:
|
||||
return get_error_data_result(message=f"You don't own the document {id}.")
|
||||
if int(doc[0].progress) == 1 or int(doc[0].progress) == 0:
|
||||
if int(doc[0].progress) == 1 or doc[0].progress == 0:
|
||||
return get_error_data_result(
|
||||
"Can't stop parsing document with progress at 0 or 1"
|
||||
)
|
||||
info = {"run": "2", "progress": 0, "chunk_num": 0}
|
||||
DocumentService.update_by_id(id, info)
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
|
||||
settings.docStoreConn.delete({"doc_id": doc[0].id}, search.index_name(tenant_id), dataset_id)
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["GET"])
|
||||
@manager.route("/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_chunks(tenant_id, dataset_id, document_id):
|
||||
"""
|
||||
@ -827,74 +857,59 @@ def list_chunks(tenant_id, dataset_id, document_id):
|
||||
renamed_doc["run"] = run_mapping.get(str(value))
|
||||
|
||||
res = {"total": 0, "chunks": [], "doc": renamed_doc}
|
||||
origin_chunks = []
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), dataset_id):
|
||||
if req.get("id"):
|
||||
chunk = settings.docStoreConn.get(req.get("id"), search.index_name(tenant_id), [dataset_id])
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del chunk[n]
|
||||
if not chunk:
|
||||
return get_error_data_result(f"Chunk `{req.get('id')}` not found.")
|
||||
res['total'] = 1
|
||||
final_chunk = {
|
||||
"id":chunk.get("id",chunk.get("chunk_id")),
|
||||
"content":chunk["content_with_weight"],
|
||||
"document_id":chunk.get("doc_id",chunk.get("document_id")),
|
||||
"docnm_kwd":chunk["docnm_kwd"],
|
||||
"important_keywords":chunk.get("important_kwd",[]),
|
||||
"questions":chunk.get("question_kwd",[]),
|
||||
"dataset_id":chunk.get("kb_id",chunk.get("dataset_id")),
|
||||
"image_id":chunk["img_id"],
|
||||
"available":bool(chunk.get("available_int",1)),
|
||||
"positions":chunk.get("position_int",[]),
|
||||
}
|
||||
res["chunks"].append(final_chunk)
|
||||
_ = Chunk(**final_chunk)
|
||||
|
||||
elif settings.docStoreConn.indexExist(search.index_name(tenant_id), dataset_id):
|
||||
sres = settings.retrievaler.search(query, search.index_name(tenant_id), [dataset_id], emb_mdl=None,
|
||||
highlight=True)
|
||||
res["total"] = sres.total
|
||||
sign = 0
|
||||
for id in sres.ids:
|
||||
d = {
|
||||
"id": id,
|
||||
"content_with_weight": (
|
||||
"content": (
|
||||
rmSpace(sres.highlight[id])
|
||||
if question and id in sres.highlight
|
||||
else sres.field[id].get("content_with_weight", "")
|
||||
),
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"document_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"img_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": sres.field[id].get("position_int", "").split("\t"),
|
||||
"important_keywords": sres.field[id].get("important_kwd", []),
|
||||
"questions": sres.field[id].get("question_kwd", []),
|
||||
"dataset_id": sres.field[id].get("kb_id", sres.field[id].get("dataset_id")),
|
||||
"image_id": sres.field[id].get("img_id", ""),
|
||||
"available": bool(sres.field[id].get("available_int", 1)),
|
||||
"positions": sres.field[id].get("position_int",[]),
|
||||
}
|
||||
if len(d["positions"]) % 5 == 0:
|
||||
poss = []
|
||||
for i in range(0, len(d["positions"]), 5):
|
||||
poss.append(
|
||||
[
|
||||
float(d["positions"][i]),
|
||||
float(d["positions"][i + 1]),
|
||||
float(d["positions"][i + 2]),
|
||||
float(d["positions"][i + 3]),
|
||||
float(d["positions"][i + 4]),
|
||||
]
|
||||
)
|
||||
d["positions"] = poss
|
||||
|
||||
origin_chunks.append(d)
|
||||
if req.get("id"):
|
||||
if req.get("id") == id:
|
||||
origin_chunks.clear()
|
||||
origin_chunks.append(d)
|
||||
sign = 1
|
||||
break
|
||||
if req.get("id"):
|
||||
if sign == 0:
|
||||
return get_error_data_result(f"Can't find this chunk {req.get('id')}")
|
||||
|
||||
for chunk in origin_chunks:
|
||||
key_mapping = {
|
||||
"id": "id",
|
||||
"content_with_weight": "content",
|
||||
"doc_id": "document_id",
|
||||
"important_kwd": "important_keywords",
|
||||
"img_id": "image_id",
|
||||
"available_int": "available",
|
||||
}
|
||||
renamed_chunk = {}
|
||||
for key, value in chunk.items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_chunk[new_key] = value
|
||||
if renamed_chunk["available"] == 0:
|
||||
renamed_chunk["available"] = False
|
||||
if renamed_chunk["available"] == 1:
|
||||
renamed_chunk["available"] = True
|
||||
res["chunks"].append(renamed_chunk)
|
||||
res["chunks"].append(d)
|
||||
_ = Chunk(**d) # validate the chunk
|
||||
return get_result(data=res)
|
||||
|
||||
|
||||
@manager.route(
|
||||
@manager.route( # noqa: F821
|
||||
"/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["POST"]
|
||||
)
|
||||
@token_required
|
||||
@ -974,14 +989,16 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
if not req.get("content"):
|
||||
return get_error_data_result(message="`content` is required")
|
||||
if "important_keywords" in req:
|
||||
if type(req["important_keywords"]) != list:
|
||||
if not isinstance(req["important_keywords"], list):
|
||||
return get_error_data_result(
|
||||
"`important_keywords` is required to be a list"
|
||||
)
|
||||
md5 = hashlib.md5()
|
||||
md5.update((req["content"] + document_id).encode("utf-8"))
|
||||
|
||||
chunk_id = md5.hexdigest()
|
||||
if "questions" in req:
|
||||
if not isinstance(req["questions"], list):
|
||||
return get_error_data_result(
|
||||
"`questions` is required to be a list"
|
||||
)
|
||||
chunk_id = xxhash.xxh64((req["content"] + document_id).encode("utf-8")).hexdigest()
|
||||
d = {
|
||||
"id": chunk_id,
|
||||
"content_ltks": rag_tokenizer.tokenize(req["content"]),
|
||||
@ -992,6 +1009,10 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
d["important_tks"] = rag_tokenizer.tokenize(
|
||||
" ".join(req.get("important_keywords", []))
|
||||
)
|
||||
d["question_kwd"] = req.get("questions", [])
|
||||
d["question_tks"] = rag_tokenizer.tokenize(
|
||||
"\n".join(req.get("questions", []))
|
||||
)
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
d["kb_id"] = dataset_id
|
||||
@ -1001,7 +1022,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id
|
||||
)
|
||||
v, c = embd_mdl.encode([doc.name, req["content"]])
|
||||
v, c = embd_mdl.encode([doc.name, req["content"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.insert([d], search.index_name(tenant_id), dataset_id)
|
||||
@ -1013,6 +1034,7 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
"content_with_weight": "content",
|
||||
"doc_id": "document_id",
|
||||
"important_kwd": "important_keywords",
|
||||
"question_kwd": "questions",
|
||||
"kb_id": "dataset_id",
|
||||
"create_timestamp_flt": "create_timestamp",
|
||||
"create_time": "create_time",
|
||||
@ -1023,11 +1045,12 @@ def add_chunk(tenant_id, dataset_id, document_id):
|
||||
if key in key_mapping:
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_chunk[new_key] = value
|
||||
_ = Chunk(**renamed_chunk) # validate the chunk
|
||||
return get_result(data={"chunk": renamed_chunk})
|
||||
# return get_result(data={"chunk_id": chunk_id})
|
||||
|
||||
|
||||
@manager.route(
|
||||
@manager.route( # noqa: F821
|
||||
"datasets/<dataset_id>/documents/<document_id>/chunks", methods=["DELETE"]
|
||||
)
|
||||
@token_required
|
||||
@ -1087,7 +1110,7 @@ def rm_chunk(tenant_id, dataset_id, document_id):
|
||||
return get_result(message=f"deleted {chunk_number} chunks")
|
||||
|
||||
|
||||
@manager.route(
|
||||
@manager.route( # noqa: F821
|
||||
"/datasets/<dataset_id>/documents/<document_id>/chunks/<chunk_id>", methods=["PUT"]
|
||||
)
|
||||
@token_required
|
||||
@ -1166,8 +1189,13 @@ def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
|
||||
if "important_keywords" in req:
|
||||
if not isinstance(req["important_keywords"], list):
|
||||
return get_error_data_result("`important_keywords` should be a list")
|
||||
d["important_kwd"] = req.get("important_keywords")
|
||||
d["important_kwd"] = req.get("important_keywords", [])
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
|
||||
if "questions" in req:
|
||||
if not isinstance(req["questions"], list):
|
||||
return get_error_data_result("`questions` should be a list")
|
||||
d["question_kwd"] = req.get("questions")
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["questions"]))
|
||||
if "available" in req:
|
||||
d["available_int"] = int(req["available"])
|
||||
embd_id = DocumentService.get_embd_id(document_id)
|
||||
@ -1185,14 +1213,14 @@ def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
|
||||
d, arr[0], arr[1], not any([rag_tokenizer.is_chinese(t) for t in q + a])
|
||||
)
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, d["content_with_weight"]])
|
||||
v, c = embd_mdl.encode([doc.name, d["content_with_weight"] if not d.get("question_kwd") else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.update({"id": chunk_id}, d, search.index_name(tenant_id), dataset_id)
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/retrieval", methods=["POST"])
|
||||
@manager.route("/retrieval", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def retrieval_test(tenant_id):
|
||||
"""
|
||||
@ -1278,15 +1306,15 @@ def retrieval_test(tenant_id):
|
||||
kb_ids = req["dataset_ids"]
|
||||
if not isinstance(kb_ids, list):
|
||||
return get_error_data_result("`dataset_ids` should be a list")
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
for id in kb_ids:
|
||||
if not KnowledgebaseService.accessible(kb_id=id, user_id=tenant_id):
|
||||
return get_error_data_result(f"You don't own the dataset {id}.")
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embd_nms = list(set([TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs])) # remove vendor suffix for comparison
|
||||
if len(embd_nms) != 1:
|
||||
return get_result(
|
||||
message='Datasets use different embedding models."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
code=settings.RetCode.DATA_ERROR,
|
||||
)
|
||||
if "question" not in req:
|
||||
return get_error_data_result("`question` is required.")
|
||||
@ -1294,6 +1322,7 @@ def retrieval_test(tenant_id):
|
||||
size = int(req.get("page_size", 30))
|
||||
question = req["question"]
|
||||
doc_ids = req.get("document_ids", [])
|
||||
use_kg = req.get("use_kg", False)
|
||||
if not isinstance(doc_ids, list):
|
||||
return get_error_data_result("`documents` should be a list")
|
||||
doc_ids_list = KnowledgebaseService.list_documents_by_ids(kb_ids)
|
||||
@ -1313,22 +1342,17 @@ def retrieval_test(tenant_id):
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||
if not e:
|
||||
return get_error_data_result(message="Dataset not found!")
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id
|
||||
)
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
|
||||
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"]
|
||||
)
|
||||
rerank_mdl = LLMBundle(kb.tenant_id, LLMType.RERANK, llm_name=req["rerank_id"])
|
||||
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
|
||||
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
|
||||
retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
|
||||
ranks = retr.retrieval(
|
||||
ranks = settings.retrievaler.retrieval(
|
||||
question,
|
||||
embd_mdl,
|
||||
kb.tenant_id,
|
||||
@ -1341,7 +1365,17 @@ def retrieval_test(tenant_id):
|
||||
doc_ids,
|
||||
rerank_mdl=rerank_mdl,
|
||||
highlight=highlight,
|
||||
rank_feature=label_question(question, kbs)
|
||||
)
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
[k.tenant_id for k in kbs],
|
||||
kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
|
||||
@ -1353,7 +1387,9 @@ def retrieval_test(tenant_id):
|
||||
"content_with_weight": "content",
|
||||
"doc_id": "document_id",
|
||||
"important_kwd": "important_keywords",
|
||||
"question_kwd": "questions",
|
||||
"docnm_kwd": "document_keyword",
|
||||
"kb_id":"dataset_id"
|
||||
}
|
||||
rename_chunk = {}
|
||||
for key, value in chunk.items():
|
||||
|
||||
@ -15,27 +15,31 @@
|
||||
#
|
||||
import re
|
||||
import json
|
||||
from copy import deepcopy
|
||||
from uuid import uuid4
|
||||
import time
|
||||
|
||||
from api.db import LLMType
|
||||
from flask import request, Response
|
||||
from api.db.services.dialog_service import ask
|
||||
from api.db.services.conversation_service import ConversationService, iframe_completion
|
||||
from api.db.services.conversation_service import completion as rag_completion
|
||||
from api.db.services.canvas_service import completion as agent_completion
|
||||
from api.db.services.dialog_service import ask, chat
|
||||
from agent.canvas import Canvas
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import API4Conversation
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_error_data_result
|
||||
from api.utils.api_utils import get_error_data_result, validate_request
|
||||
from api.utils.api_utils import get_result, token_required
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.file_service import FileService
|
||||
|
||||
from flask import jsonify, request, Response
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST'])
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id,chat_id):
|
||||
def create(tenant_id, chat_id):
|
||||
req = request.json
|
||||
req["dialog_id"] = chat_id
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||
@ -45,7 +49,8 @@ def create(tenant_id,chat_id):
|
||||
"id": get_uuid(),
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}],
|
||||
"user_id": req.get("user_id", "")
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
@ -60,39 +65,82 @@ def create(tenant_id,chat_id):
|
||||
return get_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['POST'])
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create_agent_session(tenant_id, agent_id):
|
||||
req = request.json
|
||||
if not request.is_json:
|
||||
req = request.form
|
||||
files = request.files
|
||||
user_id = request.args.get('user_id', '')
|
||||
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
if cvs.user_id != tenant_id:
|
||||
return get_error_data_result(message="You do not own the agent.")
|
||||
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_error_data_result("You cannot access the agent.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
canvas.reset()
|
||||
query = canvas.get_preset_param()
|
||||
if query:
|
||||
for ele in query:
|
||||
if not ele["optional"]:
|
||||
if ele["type"] == "file":
|
||||
if files is None or not files.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||
upload_file = files.get(ele["key"])
|
||||
file_content = FileService.parse_docs([upload_file], user_id)
|
||||
file_name = upload_file.filename
|
||||
ele["value"] = file_name + "\n" + file_content
|
||||
else:
|
||||
if req is None or not req.get(ele["key"]):
|
||||
return get_error_data_result(f"`{ele['key']}` with type `{ele['type']}` is required")
|
||||
ele["value"] = req[ele["key"]]
|
||||
else:
|
||||
if ele["type"] == "file":
|
||||
if files is not None and files.get(ele["key"]):
|
||||
upload_file = files.get(ele["key"])
|
||||
file_content = FileService.parse_docs([upload_file], user_id)
|
||||
file_name = upload_file.filename
|
||||
ele["value"] = file_name + "\n" + file_content
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
else:
|
||||
if req is not None and req.get(ele["key"]):
|
||||
ele["value"] = req[ele['key']]
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
else:
|
||||
for ans in canvas.run(stream=False):
|
||||
pass
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": req.get("usr_id","") if isinstance(req, dict) else "",
|
||||
"user_id": user_id,
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent"
|
||||
"source": "agent",
|
||||
"dsl": cvs.dsl
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv["agent_id"] = conv.pop("dialog_id")
|
||||
return get_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT'])
|
||||
@manager.route('/chats/<chat_id>/sessions/<session_id>', methods=['PUT']) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id,chat_id,session_id):
|
||||
def update(tenant_id, chat_id, session_id):
|
||||
req = request.json
|
||||
req["dialog_id"] = chat_id
|
||||
conv_id = session_id
|
||||
conv = ConversationService.query(id=conv_id,dialog_id=chat_id)
|
||||
conv = ConversationService.query(id=conv_id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="Session does not exist")
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
@ -108,276 +156,246 @@ def update(tenant_id,chat_id,session_id):
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/completions', methods=['POST'])
|
||||
@manager.route('/chats/<chat_id>/completions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def completion(tenant_id, chat_id):
|
||||
def chat_completion(tenant_id, chat_id):
|
||||
req = request.json
|
||||
if not req:
|
||||
req = {"question": ""}
|
||||
if not req.get("session_id"):
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": chat_id,
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_error_data_result(message="`name` can not be empty.")
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
session_id=conv.id
|
||||
else:
|
||||
session_id = req.get("session_id")
|
||||
if not req.get("question"):
|
||||
return get_error_data_result(message="Please input your question.")
|
||||
conv = ConversationService.query(id=session_id,dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="Session does not exist")
|
||||
conv = conv[0]
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You do not own the chat")
|
||||
msg = []
|
||||
question = {
|
||||
"content": req.get("question"),
|
||||
"role": "user",
|
||||
"id": str(uuid4())
|
||||
}
|
||||
conv.message.append(question)
|
||||
for m in conv.message:
|
||||
if m["role"] == "system": continue
|
||||
if m["role"] == "assistant" and not msg: continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
reference = ans["reference"]
|
||||
temp_reference = deepcopy(ans["reference"])
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(temp_reference)
|
||||
else:
|
||||
conv.reference[-1] = temp_reference
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||
if "chunks" in reference:
|
||||
chunks = reference.get("chunks")
|
||||
chunk_list = []
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk.get("image_id", ""),
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk.get("positions", []),
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"]=session_id
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e),"reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
req["question"]=""
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
if req.get("session_id"):
|
||||
if not ConversationService.query(id=req["session_id"], dialog_id=chat_id):
|
||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp = Response(rag_completion(tenant_id, chat_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
for ans in rag_completion(tenant_id, chat_id, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/completions', methods=['POST'])
|
||||
@manager.route('chats_openai/<chat_id>/chat/completions', methods=['POST']) # noqa: F821
|
||||
@validate_request("model", "messages") # noqa: F821
|
||||
@token_required
|
||||
def agent_completion(tenant_id, agent_id):
|
||||
def chat_completion_openai_like(tenant_id, chat_id):
|
||||
"""
|
||||
OpenAI-like chat completion API that simulates the behavior of OpenAI's completions endpoint.
|
||||
|
||||
This function allows users to interact with a model and receive responses based on a series of historical messages.
|
||||
If `stream` is set to True (by default), the response will be streamed in chunks, mimicking the OpenAI-style API.
|
||||
Set `stream` to False explicitly, the response will be returned in a single complete answer.
|
||||
Example usage:
|
||||
|
||||
curl -X POST https://ragflow_address.com/api/v1/chats_openai/<chat_id>/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $RAGFLOW_API_KEY" \
|
||||
-d '{
|
||||
"model": "model",
|
||||
"messages": [{"role": "user", "content": "Say this is a test!"}],
|
||||
"stream": true
|
||||
}'
|
||||
|
||||
Alternatively, you can use Python's `OpenAI` client:
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
model = "model"
|
||||
client = OpenAI(api_key="ragflow-api-key", base_url=f"http://ragflow_address/api/v1/chats_openai/<chat_id>")
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
model=model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who are you?"},
|
||||
{"role": "assistant", "content": "I am an AI assistant named..."},
|
||||
{"role": "user", "content": "Can you tell me how to install neovim"},
|
||||
],
|
||||
stream=True
|
||||
)
|
||||
|
||||
stream = True
|
||||
if stream:
|
||||
for chunk in completion:
|
||||
print(chunk)
|
||||
else:
|
||||
print(completion.choices[0].message.content)
|
||||
"""
|
||||
req = request.json
|
||||
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
if not e:
|
||||
return get_error_data_result("Agent not found.")
|
||||
if cvs.user_id != tenant_id:
|
||||
return get_error_data_result(message="You do not own the agent.")
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
messages = req.get("messages", [])
|
||||
# To prevent empty [] input
|
||||
if len(messages) < 1:
|
||||
return get_error_data_result("You have to provide messages.")
|
||||
if messages[-1]["role"] != "user":
|
||||
return get_error_data_result("The last content of this conversation is not from user.")
|
||||
|
||||
if not req.get("session_id"):
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": req.get("user_id",""),
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent"
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv = API4Conversation(**conv)
|
||||
else:
|
||||
session_id = req.get("session_id")
|
||||
e, conv = API4ConversationService.get_by_id(req["session_id"])
|
||||
if not e:
|
||||
return get_error_data_result(message="Session not found!")
|
||||
prompt = messages[-1]["content"]
|
||||
# Treat context tokens as reasoning tokens
|
||||
context_token_used = sum(len(message["content"]) for message in messages)
|
||||
|
||||
messages = conv.message
|
||||
question = req.get("question")
|
||||
if not question:
|
||||
return get_error_data_result("`question` is required.")
|
||||
question={
|
||||
"role":"user",
|
||||
"content":question,
|
||||
"id": str(uuid4())
|
||||
}
|
||||
messages.append(question)
|
||||
msg = []
|
||||
for m in messages:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"): msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value)
|
||||
if not dia:
|
||||
return get_error_data_result(f"You don't own the chat {chat_id}")
|
||||
dia = dia[0]
|
||||
|
||||
if "quote" not in req: req["quote"] = False
|
||||
stream = req.get("stream", True)
|
||||
# Filter system and non-sense assistant messages
|
||||
msg = None
|
||||
msg = [m for m in messages if m["role"] != "system" and (m["role"] != "assistant" or msg)]
|
||||
|
||||
def fillin_conv(ans):
|
||||
reference = ans["reference"]
|
||||
temp_reference = deepcopy(ans["reference"])
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(temp_reference)
|
||||
else:
|
||||
conv.reference[-1] = temp_reference
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
if "chunks" in reference:
|
||||
chunks = reference.get("chunks")
|
||||
chunk_list = []
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk["image_id"],
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"] = session_id
|
||||
if req.get("stream", True):
|
||||
# The value for the usage field on all chunks except for the last one will be null.
|
||||
# The usage field on the last chunk contains token usage statistics for the entire request.
|
||||
# The choices field on the last chunk will always be an empty array [].
|
||||
def streamed_response_generator(chat_id, dia, msg):
|
||||
token_used = 0
|
||||
response = {
|
||||
"id": f"chatcmpl-{chat_id}",
|
||||
"choices": [
|
||||
{
|
||||
"delta": {
|
||||
"content": "",
|
||||
"role": "assistant",
|
||||
"function_call": None,
|
||||
"tool_calls": None
|
||||
},
|
||||
"finish_reason": None,
|
||||
"index": 0,
|
||||
"logprobs": None
|
||||
}
|
||||
],
|
||||
"created": int(time.time()),
|
||||
"model": "model",
|
||||
"object": "chat.completion.chunk",
|
||||
"system_fingerprint": "",
|
||||
"usage": None
|
||||
}
|
||||
|
||||
def rename_field(ans):
|
||||
reference = ans['reference']
|
||||
if not isinstance(reference, dict):
|
||||
return
|
||||
for chunk_i in reference.get('chunks', []):
|
||||
if 'docnm_kwd' in chunk_i:
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
conv.message.append(msg[-1])
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
|
||||
canvas.messages.append(msg[-1])
|
||||
canvas.add_user_input(msg[-1]["content"])
|
||||
|
||||
if stream:
|
||||
def sse():
|
||||
nonlocal answer, cvs
|
||||
try:
|
||||
for ans in canvas.run(stream=True):
|
||||
if ans.get("running_status"):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {"answer": ans["content"],
|
||||
"running_status": True}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
continue
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
for ans in chat(dia, msg, True):
|
||||
answer = ans["answer"]
|
||||
incremental = answer[token_used:]
|
||||
token_used += len(incremental)
|
||||
response["choices"][0]["delta"]["content"] = incremental
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
except Exception as e:
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
response["choices"][0]["delta"]["content"] = "**ERROR**: " + str(e)
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
# The last chunk
|
||||
response["choices"][0]["delta"]["content"] = None
|
||||
response["choices"][0]["finish_reason"] = "stop"
|
||||
response["usage"] = {
|
||||
"prompt_tokens": len(prompt),
|
||||
"completion_tokens": token_used,
|
||||
"total_tokens": len(prompt) + token_used
|
||||
}
|
||||
yield f"data:{json.dumps(response, ensure_ascii=False)}\n\n"
|
||||
yield "data:[DONE]\n\n"
|
||||
|
||||
|
||||
resp = Response(streamed_response_generator(chat_id, dia, msg), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False):
|
||||
# focus answer content only
|
||||
answer = ans
|
||||
break
|
||||
content = answer["answer"]
|
||||
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"): continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
fillin_conv(result)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
rename_field(result)
|
||||
return get_result(data=result)
|
||||
response = {
|
||||
"id": f"chatcmpl-{chat_id}",
|
||||
"object": "chat.completion",
|
||||
"created": int(time.time()),
|
||||
"model": req.get("model", ""),
|
||||
"usage": {
|
||||
"prompt_tokens": len(prompt),
|
||||
"completion_tokens": len(content),
|
||||
"total_tokens": len(prompt) + len(content),
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": context_token_used,
|
||||
"accepted_prediction_tokens": len(content),
|
||||
"rejected_prediction_tokens": 0 # 0 for simplicity
|
||||
}
|
||||
},
|
||||
"choices": [
|
||||
{
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": content
|
||||
},
|
||||
"logprobs": None,
|
||||
"finish_reason": "stop",
|
||||
"index": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
return jsonify(response)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET'])
|
||||
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def list_session(chat_id,tenant_id):
|
||||
def agent_completions(tenant_id, agent_id):
|
||||
req = request.json
|
||||
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||
if not cvs:
|
||||
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||
if req.get("session_id"):
|
||||
dsl = cvs[0].dsl
|
||||
if not isinstance(dsl, str):
|
||||
dsl = json.dumps(dsl)
|
||||
#canvas = Canvas(dsl, tenant_id)
|
||||
#if canvas.get_preset_param():
|
||||
# req["question"] = ""
|
||||
conv = API4ConversationService.query(id=req["session_id"], dialog_id=agent_id)
|
||||
if not conv:
|
||||
return get_error_data_result(f"You don't own the session {req['session_id']}")
|
||||
# If an update to UserCanvas is detected, update the API4Conversation.dsl
|
||||
sync_dsl = req.get("sync_dsl", False)
|
||||
if sync_dsl is True and cvs[0].update_time > conv[0].update_time:
|
||||
current_dsl = conv[0].dsl
|
||||
new_dsl = json.loads(dsl)
|
||||
state_fields = ["history", "messages", "path", "reference"]
|
||||
states = {field: current_dsl.get(field, []) for field in state_fields}
|
||||
current_dsl.update(new_dsl)
|
||||
current_dsl.update(states)
|
||||
API4ConversationService.update_by_id(req["session_id"], {
|
||||
"dsl": current_dsl
|
||||
})
|
||||
else:
|
||||
req["question"] = ""
|
||||
if req.get("stream", True):
|
||||
resp = Response(agent_completion(tenant_id, agent_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
try:
|
||||
for answer in agent_completion(tenant_id, agent_id, **req):
|
||||
return get_result(data=answer)
|
||||
except Exception as e:
|
||||
return get_error_data_result(str(e))
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_session(tenant_id, chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message=f"You don't own the assistant {chat_id}.")
|
||||
id = request.args.get("id")
|
||||
@ -385,11 +403,12 @@ def list_session(chat_id,tenant_id):
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
user_id = request.args.get("user_id")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
convs = ConversationService.get_list(chat_id,page_number,items_per_page,orderby,desc,id,name)
|
||||
convs = ConversationService.get_list(chat_id, page_number, items_per_page, orderby, desc, id, name, user_id)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
@ -410,16 +429,68 @@ def list_session(chat_id,tenant_id):
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"dataset_id": chunk["kb_id"],
|
||||
"image_id": chunk["image_id"],
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
"id": chunk.get("chunk_id", chunk.get("id")),
|
||||
"content": chunk.get("content_with_weight", chunk.get("content")),
|
||||
"document_id": chunk.get("doc_id", chunk.get("document_id")),
|
||||
"document_name": chunk.get("docnm_kwd", chunk.get("document_name")),
|
||||
"dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
|
||||
"image_id": chunk.get("image_id", chunk.get("img_id")),
|
||||
"positions": chunk.get("positions", chunk.get("position_int")),
|
||||
}
|
||||
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
message_num += 1
|
||||
del conv["reference"]
|
||||
return get_result(data=convs)
|
||||
|
||||
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_agent_session(tenant_id, agent_id):
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_error_data_result(message=f"You don't own the agent {agent_id}.")
|
||||
id = request.args.get("id")
|
||||
user_id = request.args.get("user_id")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "update_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
# dsl defaults to True in all cases except for False and false
|
||||
include_dsl = request.args.get("dsl") != "False" and request.args.get("dsl") != "false"
|
||||
convs = API4ConversationService.get_list(agent_id, tenant_id, page_number, items_per_page, orderby, desc, id,
|
||||
user_id, include_dsl)
|
||||
if not convs:
|
||||
return get_result(data=[])
|
||||
for conv in convs:
|
||||
conv['messages'] = conv.pop("message")
|
||||
infos = conv["messages"]
|
||||
for info in infos:
|
||||
if "prompt" in info:
|
||||
info.pop("prompt")
|
||||
conv["agent_id"] = conv.pop("dialog_id")
|
||||
if conv["reference"]:
|
||||
messages = conv["messages"]
|
||||
message_num = 0
|
||||
chunk_num = 0
|
||||
while message_num < len(messages):
|
||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||
chunk_list = []
|
||||
if "chunks" in conv["reference"][chunk_num]:
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk.get("chunk_id", chunk.get("id")),
|
||||
"content": chunk.get("content_with_weight", chunk.get("content")),
|
||||
"document_id": chunk.get("doc_id", chunk.get("document_id")),
|
||||
"document_name": chunk.get("docnm_kwd", chunk.get("document_name")),
|
||||
"dataset_id": chunk.get("kb_id", chunk.get("dataset_id")),
|
||||
"image_id": chunk.get("image_id", chunk.get("img_id")),
|
||||
"positions": chunk.get("positions", chunk.get("position_int")),
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
@ -429,9 +500,9 @@ def list_session(chat_id,tenant_id):
|
||||
return get_result(data=convs)
|
||||
|
||||
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"])
|
||||
@manager.route('/chats/<chat_id>/sessions', methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id,chat_id):
|
||||
def delete(tenant_id, chat_id):
|
||||
if not DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You don't own the chat")
|
||||
req = request.json
|
||||
@ -439,22 +510,55 @@ def delete(tenant_id,chat_id):
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids=req.get("ids")
|
||||
ids = req.get("ids")
|
||||
|
||||
if not ids:
|
||||
conv_list = []
|
||||
for conv in convs:
|
||||
conv_list.append(conv.id)
|
||||
else:
|
||||
conv_list=ids
|
||||
conv_list = ids
|
||||
for id in conv_list:
|
||||
conv = ConversationService.query(id=id,dialog_id=chat_id)
|
||||
conv = ConversationService.query(id=id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
return get_error_data_result(message="The chat doesn't own the session")
|
||||
ConversationService.delete_by_id(id)
|
||||
return get_result()
|
||||
|
||||
@manager.route('/sessions/ask', methods=['POST'])
|
||||
|
||||
@manager.route('/agents/<agent_id>/sessions', methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete_agent_session(tenant_id, agent_id):
|
||||
req = request.json
|
||||
cvs = UserCanvasService.query(user_id=tenant_id, id=agent_id)
|
||||
if not cvs:
|
||||
return get_error_data_result(f"You don't own the agent {agent_id}")
|
||||
|
||||
convs = API4ConversationService.query(dialog_id=agent_id)
|
||||
if not convs:
|
||||
return get_error_data_result(f"Agent {agent_id} has no sessions")
|
||||
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids = req.get("ids")
|
||||
|
||||
if not ids:
|
||||
conv_list = []
|
||||
for conv in convs:
|
||||
conv_list.append(conv.id)
|
||||
else:
|
||||
conv_list = ids
|
||||
|
||||
for session_id in conv_list:
|
||||
conv = API4ConversationService.query(id=session_id, dialog_id=agent_id)
|
||||
if not conv:
|
||||
return get_error_data_result(f"The agent doesn't own the session ${session_id}")
|
||||
API4ConversationService.delete_by_id(session_id)
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route('/sessions/ask', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def ask_about(tenant_id):
|
||||
req = request.json
|
||||
@ -462,17 +566,18 @@ def ask_about(tenant_id):
|
||||
return get_error_data_result("`question` is required.")
|
||||
if not req.get("dataset_ids"):
|
||||
return get_error_data_result("`dataset_ids` is required.")
|
||||
if not isinstance(req.get("dataset_ids"),list):
|
||||
if not isinstance(req.get("dataset_ids"), list):
|
||||
return get_error_data_result("`dataset_ids` should be a list.")
|
||||
req["kb_ids"]=req.pop("dataset_ids")
|
||||
req["kb_ids"] = req.pop("dataset_ids")
|
||||
for kb_id in req["kb_ids"]:
|
||||
if not KnowledgebaseService.accessible(kb_id,tenant_id):
|
||||
if not KnowledgebaseService.accessible(kb_id, tenant_id):
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}.")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
uid = tenant_id
|
||||
|
||||
def stream():
|
||||
nonlocal req, uid
|
||||
try:
|
||||
@ -492,7 +597,7 @@ def ask_about(tenant_id):
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/sessions/related_questions', methods=['POST'])
|
||||
@manager.route('/sessions/related_questions', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def related_questions(tenant_id):
|
||||
req = request.json
|
||||
@ -529,3 +634,57 @@ Keywords: {question}
|
||||
Related search terms:
|
||||
"""}], {"temperature": 0.9})
|
||||
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||
|
||||
|
||||
@manager.route('/chatbots/<dialog_id>/completions', methods=['POST']) # noqa: F821
|
||||
def chatbot_completions(dialog_id):
|
||||
req = request.json
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_error_data_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_error_data_result(message='Authentication error: API key is invalid!"')
|
||||
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(iframe_completion(dialog_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
for answer in iframe_completion(dialog_id, **req):
|
||||
return get_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('/agentbots/<agent_id>/completions', methods=['POST']) # noqa: F821
|
||||
def agent_bot_completions(agent_id):
|
||||
req = request.json
|
||||
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_error_data_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_error_data_result(message='Authentication error: API key is invalid!"')
|
||||
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(agent_completion(objs[0].tenant_id, agent_id, **req), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
for answer in agent_completion(objs[0].tenant_id, agent_id, **req):
|
||||
return get_result(data=answer)
|
||||
|
||||
@ -38,7 +38,7 @@ from timeit import default_timer as timer
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
@manager.route("/version", methods=["GET"])
|
||||
@manager.route("/version", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def version():
|
||||
"""
|
||||
@ -61,7 +61,7 @@ def version():
|
||||
return get_json_result(data=get_ragflow_version())
|
||||
|
||||
|
||||
@manager.route("/status", methods=["GET"])
|
||||
@manager.route("/status", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def status():
|
||||
"""
|
||||
@ -170,7 +170,7 @@ def status():
|
||||
return get_json_result(data=res)
|
||||
|
||||
|
||||
@manager.route("/new_token", methods=["POST"])
|
||||
@manager.route("/new_token", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def new_token():
|
||||
"""
|
||||
@ -201,10 +201,11 @@ def new_token():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
obj = {
|
||||
"tenant_id": tenant_id,
|
||||
"token": generate_confirmation_token(tenant_id),
|
||||
"beta": generate_confirmation_token(generate_confirmation_token(tenant_id)).replace("ragflow-", "")[:32],
|
||||
"create_time": current_timestamp(),
|
||||
"create_date": datetime_format(datetime.now()),
|
||||
"update_time": None,
|
||||
@ -219,7 +220,7 @@ def new_token():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/token_list", methods=["GET"])
|
||||
@manager.route("/token_list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def token_list():
|
||||
"""
|
||||
@ -255,13 +256,19 @@ def token_list():
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
objs = APITokenService.query(tenant_id=tenants[0].tenant_id)
|
||||
return get_json_result(data=[o.to_dict() for o in objs])
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
objs = APITokenService.query(tenant_id=tenant_id)
|
||||
objs = [o.to_dict() for o in objs]
|
||||
for o in objs:
|
||||
if not o["beta"]:
|
||||
o["beta"] = generate_confirmation_token(generate_confirmation_token(tenants[0].tenant_id)).replace("ragflow-", "")[:32]
|
||||
APITokenService.filter_update([APIToken.tenant_id == tenant_id, APIToken.token == o["token"]], o)
|
||||
return get_json_result(data=objs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/token/<token>", methods=["DELETE"])
|
||||
@manager.route("/token/<token>", methods=["DELETE"]) # noqa: F821
|
||||
@login_required
|
||||
def rm(token):
|
||||
"""
|
||||
|
||||
@ -26,7 +26,7 @@ from api.utils import get_uuid, delta_seconds
|
||||
from api.utils.api_utils import get_json_result, validate_request, server_error_response, get_data_error_result
|
||||
|
||||
|
||||
@manager.route("/<tenant_id>/user/list", methods=["GET"])
|
||||
@manager.route("/<tenant_id>/user/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def user_list(tenant_id):
|
||||
if current_user.id != tenant_id:
|
||||
@ -44,7 +44,7 @@ def user_list(tenant_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/<tenant_id>/user', methods=['POST'])
|
||||
@manager.route('/<tenant_id>/user', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("email")
|
||||
def create(tenant_id):
|
||||
@ -55,32 +55,36 @@ def create(tenant_id):
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
usrs = UserService.query(email=req["email"])
|
||||
if not usrs:
|
||||
invite_user_email = req["email"]
|
||||
invite_users = UserService.query(email=invite_user_email)
|
||||
if not invite_users:
|
||||
return get_data_error_result(message="User not found.")
|
||||
|
||||
user_id = usrs[0].id
|
||||
user_tenants = UserTenantService.query(user_id=user_id, tenant_id=tenant_id)
|
||||
user_id_to_invite = invite_users[0].id
|
||||
user_tenants = UserTenantService.query(user_id=user_id_to_invite, tenant_id=tenant_id)
|
||||
if user_tenants:
|
||||
if user_tenants[0].status == UserTenantRole.NORMAL.value:
|
||||
return get_data_error_result(message="This user is in the team already.")
|
||||
return get_data_error_result(message="Invitation notification is sent.")
|
||||
user_tenant_role = user_tenants[0].role
|
||||
if user_tenant_role == UserTenantRole.NORMAL:
|
||||
return get_data_error_result(message=f"{invite_user_email} is already in the team.")
|
||||
if user_tenant_role == UserTenantRole.OWNER:
|
||||
return get_data_error_result(message=f"{invite_user_email} is the owner of the team.")
|
||||
return get_data_error_result(message=f"{invite_user_email} is in the team, but the role: {user_tenant_role} is invalid.")
|
||||
|
||||
UserTenantService.save(
|
||||
id=get_uuid(),
|
||||
user_id=user_id,
|
||||
user_id=user_id_to_invite,
|
||||
tenant_id=tenant_id,
|
||||
invited_by=current_user.id,
|
||||
role=UserTenantRole.INVITE,
|
||||
status=StatusEnum.VALID.value)
|
||||
|
||||
usr = usrs[0].to_dict()
|
||||
usr = invite_users[0].to_dict()
|
||||
usr = {k: v for k, v in usr.items() if k in ["id", "avatar", "email", "nickname"]}
|
||||
|
||||
return get_json_result(data=usr)
|
||||
|
||||
|
||||
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE'])
|
||||
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE']) # noqa: F821
|
||||
@login_required
|
||||
def rm(tenant_id, user_id):
|
||||
if current_user.id != tenant_id and current_user.id != user_id:
|
||||
@ -96,7 +100,7 @@ def rm(tenant_id, user_id):
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/list", methods=["GET"])
|
||||
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def tenant_list():
|
||||
try:
|
||||
@ -108,7 +112,7 @@ def tenant_list():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/agree/<tenant_id>", methods=["PUT"])
|
||||
@manager.route("/agree/<tenant_id>", methods=["PUT"]) # noqa: F821
|
||||
@login_required
|
||||
def agree(tenant_id):
|
||||
try:
|
||||
|
||||
@ -44,7 +44,7 @@ from api.db.services.file_service import FileService
|
||||
from api.utils.api_utils import get_json_result, construct_response
|
||||
|
||||
|
||||
@manager.route("/login", methods=["POST", "GET"])
|
||||
@manager.route("/login", methods=["POST", "GET"]) # noqa: F821
|
||||
def login():
|
||||
"""
|
||||
User login endpoint.
|
||||
@ -115,7 +115,7 @@ def login():
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/github_callback", methods=["GET"])
|
||||
@manager.route("/github_callback", methods=["GET"]) # noqa: F821
|
||||
def github_callback():
|
||||
"""
|
||||
GitHub OAuth callback endpoint.
|
||||
@ -200,7 +200,7 @@ def github_callback():
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
@manager.route("/feishu_callback", methods=["GET"])
|
||||
@manager.route("/feishu_callback", methods=["GET"]) # noqa: F821
|
||||
def feishu_callback():
|
||||
"""
|
||||
Feishu OAuth callback endpoint.
|
||||
@ -330,12 +330,12 @@ def user_info_from_github(access_token):
|
||||
headers=headers,
|
||||
).json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email["primary"] == True), None
|
||||
(email for email in email_info if email["primary"]), None
|
||||
)["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
@manager.route("/logout", methods=["GET"])
|
||||
@manager.route("/logout", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def log_out():
|
||||
"""
|
||||
@ -357,7 +357,7 @@ def log_out():
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/setting", methods=["POST"])
|
||||
@manager.route("/setting", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def setting_user():
|
||||
"""
|
||||
@ -429,7 +429,7 @@ def setting_user():
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/info", methods=["GET"])
|
||||
@manager.route("/info", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def user_profile():
|
||||
"""
|
||||
@ -531,7 +531,7 @@ def user_register(user_id, user):
|
||||
return UserService.query(email=user["email"])
|
||||
|
||||
|
||||
@manager.route("/register", methods=["POST"])
|
||||
@manager.route("/register", methods=["POST"]) # noqa: F821
|
||||
@validate_request("nickname", "email", "password")
|
||||
def user_add():
|
||||
"""
|
||||
@ -566,7 +566,7 @@ def user_add():
|
||||
email_address = req["email"]
|
||||
|
||||
# Validate the email address
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,5}$", email_address):
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", email_address):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message=f"Invalid email address: {email_address}!",
|
||||
@ -617,7 +617,7 @@ def user_add():
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/tenant_info", methods=["GET"])
|
||||
@manager.route("/tenant_info", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def tenant_info():
|
||||
"""
|
||||
@ -655,7 +655,7 @@ def tenant_info():
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/set_tenant_info", methods=["POST"])
|
||||
@manager.route("/set_tenant_info", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
||||
def set_tenant_info():
|
||||
|
||||
@ -89,6 +89,7 @@ class ParserType(StrEnum):
|
||||
AUDIO = "audio"
|
||||
EMAIL = "email"
|
||||
KG = "knowledge_graph"
|
||||
TAG = "tag"
|
||||
|
||||
|
||||
class FileSource(StrEnum):
|
||||
|
||||
@ -31,7 +31,6 @@ from peewee import (
|
||||
)
|
||||
from playhouse.pool import PooledMySQLDatabase, PooledPostgresqlDatabase
|
||||
|
||||
|
||||
from api.db import SerializedType, ParserType
|
||||
from api import settings
|
||||
from api import utils
|
||||
@ -130,7 +129,7 @@ def is_continuous_field(cls: typing.Type) -> bool:
|
||||
for p in cls.__bases__:
|
||||
if p in CONTINUOUS_FIELD_TYPE:
|
||||
return True
|
||||
elif p != Field and p != object:
|
||||
elif p is not Field and p is not object:
|
||||
if is_continuous_field(p):
|
||||
return True
|
||||
else:
|
||||
@ -703,6 +702,7 @@ class Knowledgebase(DataBaseModel):
|
||||
default=ParserType.NAIVE.value,
|
||||
index=True)
|
||||
parser_config = JSONField(null=False, default={"pages": [[1, 1000000]]})
|
||||
pagerank = IntegerField(default=0, index=False)
|
||||
status = CharField(
|
||||
max_length=1,
|
||||
null=True,
|
||||
@ -760,6 +760,7 @@ class Document(DataBaseModel):
|
||||
default="")
|
||||
process_begin_at = DateTimeField(null=True, index=True)
|
||||
process_duation = FloatField(default=0)
|
||||
meta_fields = JSONField(null=True, default={})
|
||||
|
||||
run = CharField(
|
||||
max_length=1,
|
||||
@ -842,8 +843,8 @@ class Task(DataBaseModel):
|
||||
id = CharField(max_length=32, primary_key=True)
|
||||
doc_id = CharField(max_length=32, null=False, index=True)
|
||||
from_page = IntegerField(default=0)
|
||||
|
||||
to_page = IntegerField(default=100000000)
|
||||
task_type = CharField(max_length=32, null=False, default="")
|
||||
|
||||
begin_at = DateTimeField(null=True, index=True)
|
||||
process_duation = FloatField(default=0)
|
||||
@ -854,6 +855,8 @@ class Task(DataBaseModel):
|
||||
help_text="process message",
|
||||
default="")
|
||||
retry_count = IntegerField(default=0)
|
||||
digest = TextField(null=True, help_text="task digest", default="")
|
||||
chunk_ids = LongTextField(null=True, help_text="chunk ids", default="")
|
||||
|
||||
|
||||
class Dialog(DataBaseModel):
|
||||
@ -923,6 +926,7 @@ class Conversation(DataBaseModel):
|
||||
name = CharField(max_length=255, null=True, help_text="converastion name", index=True)
|
||||
message = JSONField(null=True)
|
||||
reference = JSONField(null=True, default=[])
|
||||
user_id = CharField(max_length=255, null=True, help_text="user_id", index=True)
|
||||
|
||||
class Meta:
|
||||
db_table = "conversation"
|
||||
@ -931,8 +935,9 @@ class Conversation(DataBaseModel):
|
||||
class APIToken(DataBaseModel):
|
||||
tenant_id = CharField(max_length=32, null=False, index=True)
|
||||
token = CharField(max_length=255, null=False, index=True)
|
||||
dialog_id = CharField(max_length=32, null=False, index=True)
|
||||
dialog_id = CharField(max_length=32, null=True, index=True)
|
||||
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
||||
beta = CharField(max_length=255, null=True, index=True)
|
||||
|
||||
class Meta:
|
||||
db_table = "api_token"
|
||||
@ -947,7 +952,7 @@ class API4Conversation(DataBaseModel):
|
||||
reference = JSONField(null=True, default=[])
|
||||
tokens = IntegerField(default=0)
|
||||
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
||||
|
||||
dsl = JSONField(null=True, default={})
|
||||
duration = FloatField(default=0, index=True)
|
||||
round = IntegerField(default=0, index=True)
|
||||
thumb_up = IntegerField(default=0, index=True)
|
||||
@ -1046,11 +1051,6 @@ def migrate_db():
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
DB.execute_sql('ALTER TABLE llm DROP PRIMARY KEY;')
|
||||
DB.execute_sql('ALTER TABLE llm ADD PRIMARY KEY (llm_name,fid);')
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column('task', 'retry_count', IntegerField(default=0))
|
||||
@ -1066,7 +1066,59 @@ def migrate_db():
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("tenant_llm","max_tokens",IntegerField(default=8192,index=True))
|
||||
migrator.add_column("tenant_llm", "max_tokens", IntegerField(default=8192, index=True))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("api_4_conversation", "dsl", JSONField(null=True, default={}))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("knowledgebase", "pagerank", IntegerField(default=0, index=False))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("api_token", "beta", CharField(max_length=255, null=True, index=True))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("task", "digest", TextField(null=True, help_text="task digest", default=""))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("task", "chunk_ids", LongTextField(null=True, help_text="chunk ids", default=""))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("conversation", "user_id",
|
||||
CharField(max_length=255, null=True, help_text="user_id", index=True))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("document", "meta_fields",
|
||||
JSONField(null=True, default={}))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
migrate(
|
||||
migrator.add_column("task", "task_type",
|
||||
CharField(max_length=32, null=False, default=""))
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@ -133,7 +133,7 @@ def init_llm_factory():
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "cohere"], {"llm_factory": "Cohere"})
|
||||
TenantService.filter_update([1 == 1], {
|
||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
|
||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag"})
|
||||
## insert openai two embedding models to the current openai user.
|
||||
# print("Start to insert 2 OpenAI embedding models...")
|
||||
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
||||
@ -153,24 +153,17 @@ def init_llm_factory():
|
||||
break
|
||||
for kb_id in KnowledgebaseService.get_all_ids():
|
||||
KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
|
||||
"""
|
||||
drop table llm;
|
||||
drop table llm_factories;
|
||||
update tenant set parser_ids='naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph';
|
||||
alter table knowledgebase modify avatar longtext;
|
||||
alter table user modify avatar longtext;
|
||||
alter table dialog modify icon longtext;
|
||||
"""
|
||||
|
||||
|
||||
|
||||
def add_graph_templates():
|
||||
dir = os.path.join(get_project_base_directory(), "agent", "templates")
|
||||
for fnm in os.listdir(dir):
|
||||
try:
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r",encoding="utf-8"))
|
||||
try:
|
||||
CanvasTemplateService.save(**cnvs)
|
||||
except:
|
||||
except Exception:
|
||||
CanvasTemplateService.update_by_id(cnvs["id"], cnvs)
|
||||
except Exception:
|
||||
logging.exception("Add graph templates error: ")
|
||||
|
||||
@ -15,13 +15,14 @@
|
||||
#
|
||||
import pathlib
|
||||
import re
|
||||
from .user_service import UserService
|
||||
from .user_service import UserService as UserService
|
||||
|
||||
|
||||
def duplicate_name(query_func, **kwargs):
|
||||
fnm = kwargs["name"]
|
||||
objs = query_func(**kwargs)
|
||||
if not objs: return fnm
|
||||
if not objs:
|
||||
return fnm
|
||||
ext = pathlib.Path(fnm).suffix #.jpg
|
||||
nm = re.sub(r"%s$"%ext, "", fnm)
|
||||
r = re.search(r"\(([0-9]+)\)$", nm)
|
||||
@ -31,8 +32,8 @@ def duplicate_name(query_func, **kwargs):
|
||||
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
||||
c += 1
|
||||
nm = f"{nm}({c})"
|
||||
if ext: nm += f"{ext}"
|
||||
if ext:
|
||||
nm += f"{ext}"
|
||||
|
||||
kwargs["name"] = nm
|
||||
return duplicate_name(query_func, **kwargs)
|
||||
|
||||
|
||||
@ -39,6 +39,28 @@ class APITokenService(CommonService):
|
||||
class API4ConversationService(CommonService):
|
||||
model = API4Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, dialog_id, tenant_id,
|
||||
page_number, items_per_page,
|
||||
orderby, desc, id, user_id=None, include_dsl=True):
|
||||
if include_dsl:
|
||||
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
||||
else:
|
||||
fields = [field for field in cls.model._meta.fields.values() if field.name != 'dsl']
|
||||
sessions = cls.model.select(*fields).where(cls.model.dialog_id == dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if user_id:
|
||||
sessions = sessions.where(cls.model.user_id == user_id)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return list(sessions.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def append_message(cls, id, conversation):
|
||||
@ -48,7 +70,8 @@ class API4ConversationService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def stats(cls, tenant_id, from_date, to_date, source=None):
|
||||
if len(to_date) == 10: to_date += " 23:59:59"
|
||||
if len(to_date) == 10:
|
||||
to_date += " 23:59:59"
|
||||
return cls.model.select(
|
||||
cls.model.create_date.truncate("day").alias("dt"),
|
||||
peewee.fn.COUNT(
|
||||
|
||||
@ -13,10 +13,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
import peewee
|
||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog, CanvasTemplate, UserCanvas
|
||||
import json
|
||||
import time
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
from agent.canvas import Canvas
|
||||
from api.db.db_models import DB, CanvasTemplate, UserCanvas, API4Conversation
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.utils import get_uuid
|
||||
|
||||
|
||||
class CanvasTemplateService(CommonService):
|
||||
@ -25,3 +31,137 @@ class CanvasTemplateService(CommonService):
|
||||
|
||||
class UserCanvasService(CommonService):
|
||||
model = UserCanvas
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, tenant_id,
|
||||
page_number, items_per_page, orderby, desc, id, title):
|
||||
agents = cls.model.select()
|
||||
if id:
|
||||
agents = agents.where(cls.model.id == id)
|
||||
if title:
|
||||
agents = agents.where(cls.model.title == title)
|
||||
agents = agents.where(cls.model.user_id == tenant_id)
|
||||
if desc:
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
agents = agents.paginate(page_number, items_per_page)
|
||||
|
||||
return list(agents.dicts())
|
||||
|
||||
|
||||
def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
assert e, "Agent not found."
|
||||
assert cvs.user_id == tenant_id, "You do not own the agent."
|
||||
if not isinstance(cvs.dsl,str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
canvas = Canvas(cvs.dsl, tenant_id)
|
||||
canvas.reset()
|
||||
message_id = str(uuid4())
|
||||
if not session_id:
|
||||
query = canvas.get_preset_param()
|
||||
if query:
|
||||
for ele in query:
|
||||
if not ele["optional"]:
|
||||
if not kwargs.get(ele["key"]):
|
||||
assert False, f"`{ele['key']}` is required"
|
||||
ele["value"] = kwargs[ele["key"]]
|
||||
if ele["optional"]:
|
||||
if kwargs.get(ele["key"]):
|
||||
ele["value"] = kwargs[ele['key']]
|
||||
else:
|
||||
if "value" in ele:
|
||||
ele.pop("value")
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
session_id=get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": kwargs.get("user_id", "") if isinstance(kwargs, dict) else "",
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue(), "created_at": time.time()}],
|
||||
"source": "agent",
|
||||
"dsl": cvs.dsl
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
if query:
|
||||
yield "data:" + json.dumps({"code": 0,
|
||||
"message": "",
|
||||
"data": {
|
||||
"session_id": session_id,
|
||||
"answer": canvas.get_prologue(),
|
||||
"reference": [],
|
||||
"param": canvas.get_preset_param()
|
||||
}
|
||||
},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
else:
|
||||
conv = API4Conversation(**conv)
|
||||
else:
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
assert e, "Session not found!"
|
||||
canvas = Canvas(json.dumps(conv.dsl), tenant_id)
|
||||
canvas.messages.append({"role": "user", "content": question, "id": message_id})
|
||||
canvas.add_user_input(question)
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
conv.message.append({
|
||||
"role": "user",
|
||||
"content": question,
|
||||
"id": message_id
|
||||
})
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
if stream:
|
||||
try:
|
||||
for ans in canvas.run(stream=stream):
|
||||
if ans.get("running_status"):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {"answer": ans["content"],
|
||||
"running_status": True}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
continue
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "created_at": time.time(), "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
else:
|
||||
for answer in canvas.run(stream=False):
|
||||
if answer.get("running_status"):
|
||||
continue
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
conv.dsl = json.loads(str(canvas))
|
||||
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
result = structure_answer(conv, result, message_id, session_id)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
yield result
|
||||
break
|
||||
@ -115,7 +115,7 @@ class CommonService:
|
||||
try:
|
||||
obj = cls.model.query(id=pid)[0]
|
||||
return True, obj
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
|
||||
224
api/db/services/conversation_service.py
Normal file
224
api/db/services/conversation_service.py
Normal file
@ -0,0 +1,224 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import time
|
||||
from uuid import uuid4
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import Conversation, DB
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.utils import get_uuid
|
||||
import json
|
||||
|
||||
from rag.prompts import chunks_format
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, dialog_id, page_number, items_per_page, orderby, desc, id, name, user_id=None):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if name:
|
||||
sessions = sessions.where(cls.model.name == name)
|
||||
if user_id:
|
||||
sessions = sessions.where(cls.model.user_id == user_id)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return list(sessions.dicts())
|
||||
|
||||
|
||||
def structure_answer(conv, ans, message_id, session_id):
|
||||
reference = ans["reference"]
|
||||
if not isinstance(reference, dict):
|
||||
reference = {}
|
||||
ans["reference"] = {}
|
||||
|
||||
chunk_list = chunks_format(reference)
|
||||
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"] = session_id
|
||||
|
||||
if not conv:
|
||||
return ans
|
||||
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
if not conv.message or conv.message[-1].get("role", "") != "assistant":
|
||||
conv.message.append({"role": "assistant", "content": ans["answer"], "created_at": time.time(), "id": message_id})
|
||||
else:
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "created_at": time.time(), "id": message_id}
|
||||
if conv.reference:
|
||||
conv.reference[-1] = reference
|
||||
return ans
|
||||
|
||||
|
||||
def completion(tenant_id, chat_id, question, name="New session", session_id=None, stream=True, **kwargs):
|
||||
assert name, "`name` can not be empty."
|
||||
dia = DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
assert dia, "You do not own the chat."
|
||||
|
||||
if not session_id:
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": chat_id,
|
||||
"name": name,
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue"), "created_at": time.time()}],
|
||||
"user_id": kwargs.get("user_id", "")
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {
|
||||
"answer": conv["message"][0]["content"],
|
||||
"reference": {},
|
||||
"audio_binary": None,
|
||||
"id": None,
|
||||
"session_id": session_id
|
||||
}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
|
||||
conv = ConversationService.query(id=session_id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
raise LookupError("Session does not exist")
|
||||
|
||||
conv = conv[0]
|
||||
msg = []
|
||||
question = {
|
||||
"content": question,
|
||||
"role": "user",
|
||||
"id": str(uuid4())
|
||||
}
|
||||
conv.message.append(question)
|
||||
for m in conv.message:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
if stream:
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **kwargs):
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False, **kwargs):
|
||||
answer = structure_answer(conv, ans, message_id, session_id)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
yield answer
|
||||
|
||||
|
||||
def iframe_completion(dialog_id, question, session_id=None, stream=True, **kwargs):
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
assert e, "Dialog not found"
|
||||
if not session_id:
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": dialog_id,
|
||||
"user_id": kwargs.get("user_id", ""),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"], "created_at": time.time()}]
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {
|
||||
"answer": conv["message"][0]["content"],
|
||||
"reference": {},
|
||||
"audio_binary": None,
|
||||
"id": None,
|
||||
"session_id": session_id
|
||||
}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
else:
|
||||
session_id = session_id
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
assert e, "Session not found!"
|
||||
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
messages = conv.message
|
||||
question = {
|
||||
"role": "user",
|
||||
"content": question,
|
||||
"id": str(uuid4())
|
||||
}
|
||||
messages.append(question)
|
||||
|
||||
msg = []
|
||||
for m in messages:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
if stream:
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **kwargs):
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False, **kwargs):
|
||||
answer = structure_answer(conv, ans, message_id, session_id)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
break
|
||||
yield answer
|
||||
@ -15,23 +15,24 @@
|
||||
#
|
||||
import logging
|
||||
import binascii
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
from functools import partial
|
||||
import re
|
||||
from copy import deepcopy
|
||||
from timeit import default_timer as timer
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
from api.db import LLMType, ParserType,StatusEnum
|
||||
from api.db.db_models import Dialog, Conversation,DB
|
||||
from agentic_reasoning import DeepResearcher
|
||||
from api.db import LLMType, ParserType, StatusEnum
|
||||
from api.db.db_models import Dialog, DB
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
from api.db.services.llm_service import TenantLLMService, LLMBundle
|
||||
from api import settings
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp.search import index_name
|
||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.prompts import kb_prompt, message_fit_in, llm_id2llm_type, keyword_extraction, full_question, chunks_format
|
||||
from rag.utils import rmSpace, num_tokens_from_string
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DialogService(CommonService):
|
||||
@ -40,14 +41,14 @@ class DialogService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, tenant_id,
|
||||
page_number, items_per_page, orderby, desc, id , name):
|
||||
page_number, items_per_page, orderby, desc, id, name):
|
||||
chats = cls.model.select()
|
||||
if id:
|
||||
chats = chats.where(cls.model.id == id)
|
||||
if name:
|
||||
chats = chats.where(cls.model.name == name)
|
||||
chats = chats.where(
|
||||
(cls.model.tenant_id == tenant_id)
|
||||
(cls.model.tenant_id == tenant_id)
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if desc:
|
||||
@ -60,118 +61,84 @@ class DialogService(CommonService):
|
||||
return list(chats.dicts())
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
def chat_solo(dialog, messages, stream=True):
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if name:
|
||||
sessions = sessions.where(cls.model.name == name)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return list(sessions.dicts())
|
||||
|
||||
|
||||
def message_fit_in(msg, max_length=4000):
|
||||
def count():
|
||||
nonlocal msg
|
||||
tks_cnts = []
|
||||
for m in msg:
|
||||
tks_cnts.append(
|
||||
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
||||
total = 0
|
||||
for m in tks_cnts:
|
||||
total += m["count"]
|
||||
return total
|
||||
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||
if len(msg) > 1:
|
||||
msg_.append(msg[-1])
|
||||
msg = msg_
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
ll = num_tokens_from_string(msg_[0]["content"])
|
||||
l = num_tokens_from_string(msg_[-1]["content"])
|
||||
if ll / (ll + l) > 0.8:
|
||||
m = msg_[0]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
msg[0]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
m = msg_[1]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
msg[1]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
|
||||
def llm_id2llm_type(llm_id):
|
||||
llm_id = llm_id.split("@")[0]
|
||||
fnm = os.path.join(get_project_base_directory(), "conf")
|
||||
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
||||
for llm_factory in llm_factories["factory_llm_infos"]:
|
||||
for llm in llm_factory["llm"]:
|
||||
if llm_id == llm["llm_name"]:
|
||||
return llm["model_type"].strip(",")[-1]
|
||||
prompt_config = dialog.prompt_config
|
||||
tts_mdl = None
|
||||
if prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
|
||||
for m in messages if m["role"] != "system"]
|
||||
if stream:
|
||||
last_ans = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
st = timer()
|
||||
tmp = dialog.llm_id.split("@")
|
||||
fid = None
|
||||
llm_id = tmp[0]
|
||||
if len(tmp)>1: fid = tmp[1]
|
||||
if not dialog.kb_ids:
|
||||
for ans in chat_solo(dialog, messages, stream):
|
||||
yield ans
|
||||
return
|
||||
|
||||
llm = LLMService.query(llm_name=llm_id) if not fid else LLMService.query(llm_name=llm_id, fid=fid)
|
||||
if not llm:
|
||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id) if not fid else \
|
||||
TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=llm_id, llm_factory=fid)
|
||||
if not llm:
|
||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
||||
max_tokens = 8192
|
||||
chat_start_ts = timer()
|
||||
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
max_tokens = llm[0].max_tokens
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
max_tokens = llm_model_config.get("max_tokens", 8192)
|
||||
|
||||
check_llm_ts = timer()
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_nms) != 1:
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embedding_list) != 1:
|
||||
yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retr = settings.retrievaler if not is_kg else settings.kg_retrievaler
|
||||
embedding_model_name = embedding_list[0]
|
||||
|
||||
retriever = settings.retrievaler
|
||||
|
||||
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||
if "doc_ids" in messages[-1]:
|
||||
attachments = messages[-1]["doc_ids"]
|
||||
for m in messages[:-1]:
|
||||
if "doc_ids" in m:
|
||||
attachments.extend(m["doc_ids"])
|
||||
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
create_retriever_ts = timer()
|
||||
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_model_name)
|
||||
if not embd_mdl:
|
||||
raise LookupError("Embedding model(%s) not found" % embd_nms[0])
|
||||
raise LookupError("Embedding model(%s) not found" % embedding_model_name)
|
||||
|
||||
bind_embedding_ts = timer()
|
||||
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
bind_llm_ts = timer()
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
|
||||
tts_mdl = None
|
||||
@ -198,39 +165,73 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
||||
else:
|
||||
questions = questions[-1:]
|
||||
refineQ_tm = timer()
|
||||
keyword_tm = timer()
|
||||
|
||||
refine_question_ts = timer()
|
||||
|
||||
rerank_mdl = None
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
|
||||
for _ in range(len(questions) // 2):
|
||||
questions.append(questions[-1])
|
||||
bind_reranker_ts = timer()
|
||||
generate_keyword_ts = bind_reranker_ts
|
||||
thought = ""
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
knowledges = []
|
||||
else:
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
keyword_tm = timer()
|
||||
generate_keyword_ts = timer()
|
||||
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
kbinfos = retr.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
|
||||
knowledges = []
|
||||
if prompt_config.get("reasoning", False):
|
||||
reasoner = DeepResearcher(chat_mdl,
|
||||
prompt_config,
|
||||
partial(retriever.retrieval, embd_mdl=embd_mdl, tenant_ids=tenant_ids, kb_ids=dialog.kb_ids, page=1, page_size=dialog.top_n, similarity_threshold=0.2, vector_similarity_weight=0.3))
|
||||
|
||||
for think in reasoner.thinking(kbinfos, " ".join(questions)):
|
||||
if isinstance(think, str):
|
||||
thought = think
|
||||
knowledges = [t for t in think.split("\n") if t]
|
||||
elif stream:
|
||||
yield think
|
||||
else:
|
||||
kbinfos = retriever.retrieval(" ".join(questions), embd_mdl, tenant_ids, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(" ".join(questions), kbs)
|
||||
)
|
||||
if prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(" ".join(questions))
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if prompt_config.get("use_kg"):
|
||||
ck = settings.kg_retrievaler.retrieval(" ".join(questions),
|
||||
tenant_ids,
|
||||
dialog.kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(dialog.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
|
||||
logging.debug(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
retrieval_tm = timer()
|
||||
|
||||
retrieval_ts = timer()
|
||||
if not knowledges and prompt_config.get("empty_response"):
|
||||
empty_res = prompt_config["empty_response"]
|
||||
yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
|
||||
yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions), "audio_binary": tts(tts_mdl, empty_res)}
|
||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
|
||||
kwargs["knowledge"] = "\n\n------\n\n".join(knowledges)
|
||||
kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
|
||||
gen_conf = dialog.llm_setting
|
||||
|
||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||
@ -239,7 +240,6 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||
prompt = msg[0]["content"]
|
||||
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_tokens"] = min(
|
||||
@ -247,21 +247,28 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
max_tokens - used_token_count)
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_tm
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions
|
||||
|
||||
refs = []
|
||||
ans = answer.split("</think>")
|
||||
think = ""
|
||||
if len(ans) == 2:
|
||||
think = ans[0] + "</think>"
|
||||
answer = ans[1]
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer, idx = retr.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
answer, idx = retriever.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
if not recall_docs:
|
||||
recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
|
||||
refs = deepcopy(kbinfos)
|
||||
@ -270,46 +277,59 @@ def chat(dialog, messages, stream=True, **kwargs):
|
||||
del c["vector"]
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
done_tm = timer()
|
||||
prompt += "\n\n### Elapsed\n - Refine Question: %.1f ms\n - Keywords: %.1f ms\n - Retrieval: %.1f ms\n - LLM: %.1f ms" % (
|
||||
(refineQ_tm - st) * 1000, (keyword_tm - refineQ_tm) * 1000, (retrieval_tm - keyword_tm) * 1000,
|
||||
(done_tm - retrieval_tm) * 1000)
|
||||
return {"answer": answer, "reference": refs, "prompt": prompt}
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||
finish_chat_ts = timer()
|
||||
|
||||
total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
|
||||
check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
|
||||
create_retriever_time_cost = (create_retriever_ts - check_llm_ts) * 1000
|
||||
bind_embedding_time_cost = (bind_embedding_ts - create_retriever_ts) * 1000
|
||||
bind_llm_time_cost = (bind_llm_ts - bind_embedding_ts) * 1000
|
||||
refine_question_time_cost = (refine_question_ts - bind_llm_ts) * 1000
|
||||
bind_reranker_time_cost = (bind_reranker_ts - refine_question_ts) * 1000
|
||||
generate_keyword_time_cost = (generate_keyword_ts - bind_reranker_ts) * 1000
|
||||
retrieval_time_cost = (retrieval_ts - generate_keyword_ts) * 1000
|
||||
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
|
||||
|
||||
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||
prompt = f"{prompt}\n\n - Total: {total_time_cost:.1f}ms\n - Check LLM: {check_llm_time_cost:.1f}ms\n - Create retriever: {create_retriever_time_cost:.1f}ms\n - Bind embedding: {bind_embedding_time_cost:.1f}ms\n - Bind LLM: {bind_llm_time_cost:.1f}ms\n - Tune question: {refine_question_time_cost:.1f}ms\n - Bind reranker: {bind_reranker_time_cost:.1f}ms\n - Generate keyword: {generate_keyword_time_cost:.1f}ms\n - Retrieval: {retrieval_time_cost:.1f}ms\n - Generate answer: {generate_result_time_cost:.1f}ms"
|
||||
return {"answer": think+answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
|
||||
|
||||
if stream:
|
||||
last_ans = ""
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
|
||||
if thought:
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield {"answer": thought+answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
delta_ans = answer[len(last_ans):]
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(answer)
|
||||
yield {"answer": thought+answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(thought+answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
||||
logging.debug("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
res = decorate_answer(answer)
|
||||
res["audio_binary"] = tts(tts_mdl, answer)
|
||||
yield res
|
||||
|
||||
|
||||
def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
|
||||
user_promt = """
|
||||
表名:{};
|
||||
数据库表字段说明如下:
|
||||
sys_prompt = "You are a Database Administrator. You need to check the fields of the following tables based on the user's list of questions and write the SQL corresponding to the last question."
|
||||
user_prompt = """
|
||||
Table name: {};
|
||||
Table of database fields are as follows:
|
||||
{}
|
||||
|
||||
问题如下:
|
||||
Question are as follows:
|
||||
{}
|
||||
请写出SQL, 且只要SQL,不要有其他说明及文字。
|
||||
Please write the SQL, only SQL, without any other explanations or text.
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
@ -318,10 +338,11 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
tried_times = 0
|
||||
|
||||
def get_table():
|
||||
nonlocal sys_prompt, user_promt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
||||
nonlocal sys_prompt, user_prompt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {
|
||||
"temperature": 0.06})
|
||||
logging.debug(f"{question} ==> {user_promt} get SQL: {sql}")
|
||||
sql = re.sub(r"<think>.*</think>", "", sql, flags=re.DOTALL)
|
||||
logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||
sql = re.sub(r" +", " ", sql)
|
||||
@ -349,21 +370,23 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
if tbl is None:
|
||||
return None
|
||||
if tbl.get("error") and tried_times <= 2:
|
||||
user_promt = """
|
||||
表名:{};
|
||||
数据库表字段说明如下:
|
||||
user_prompt = """
|
||||
Table name: {};
|
||||
Table of database fields are as follows:
|
||||
{}
|
||||
|
||||
Question are as follows:
|
||||
{}
|
||||
Please write the SQL, only SQL, without any other explanations or text.
|
||||
|
||||
|
||||
The SQL error you provided last time is as follows:
|
||||
{}
|
||||
|
||||
问题如下:
|
||||
Error issued by database as follows:
|
||||
{}
|
||||
|
||||
你上一次给出的错误SQL如下:
|
||||
{}
|
||||
|
||||
后台报错如下:
|
||||
{}
|
||||
|
||||
请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
|
||||
Please correct the error and write SQL again, only SQL, without any other explanations or text.
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
@ -378,21 +401,21 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
|
||||
docid_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "doc_id"])
|
||||
docnm_idx = set([ii for ii, c in enumerate(
|
||||
doc_name_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||
clmn_idx = [ii for ii in range(
|
||||
len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
|
||||
column_idx = [ii for ii in range(
|
||||
len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
|
||||
|
||||
# compose markdown table
|
||||
clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||
tbl["columns"][i]["name"])) for i in
|
||||
clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
# compose Markdown table
|
||||
columns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||
tbl["columns"][i]["name"])) for i in
|
||||
column_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
|
||||
line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
|
||||
line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + \
|
||||
("|------|" if docid_idx and docid_idx else "")
|
||||
|
||||
rows = ["|" +
|
||||
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
||||
"|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") +
|
||||
"|" for r in tbl["rows"]]
|
||||
rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
|
||||
if quota:
|
||||
@ -401,191 +424,33 @@ def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||
|
||||
if not docid_idx or not docnm_idx:
|
||||
if not docid_idx or not doc_name_idx:
|
||||
logging.warning("SQL missing field: " + sql)
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"answer": "\n".join([columns, line, rows]),
|
||||
"reference": {"chunks": [], "doc_aggs": []},
|
||||
"prompt": sys_prompt
|
||||
}
|
||||
|
||||
docid_idx = list(docid_idx)[0]
|
||||
docnm_idx = list(docnm_idx)[0]
|
||||
doc_name_idx = list(doc_name_idx)[0]
|
||||
doc_aggs = {}
|
||||
for r in tbl["rows"]:
|
||||
if r[docid_idx] not in doc_aggs:
|
||||
doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
|
||||
doc_aggs[r[docid_idx]] = {"doc_name": r[doc_name_idx], "count": 0}
|
||||
doc_aggs[r[docid_idx]]["count"] += 1
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
|
||||
"answer": "\n".join([columns, line, rows]),
|
||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
|
||||
doc_aggs.items()]},
|
||||
"prompt": sys_prompt
|
||||
}
|
||||
|
||||
|
||||
def relevant(tenant_id, llm_id, question, contents: list):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
if not contents:return False
|
||||
contents = "Documents: \n" + " - ".join(contents)
|
||||
contents = f"Question: {question}\n" + contents
|
||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||
if ans.lower().find("yes") >= 0: return True
|
||||
return False
|
||||
|
||||
|
||||
def rewrite(tenant_id, llm_id, question):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
||||
return ans
|
||||
|
||||
|
||||
def keyword_extraction(chat_mdl, content, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
Task: extract the most important keywords/phrases of a given piece of text content.
|
||||
Requirements:
|
||||
- Summarize the text content, and give top {topn} important keywords/phrases.
|
||||
- The keywords MUST be in language of the given piece of text content.
|
||||
- The keywords are delimited by ENGLISH COMMA.
|
||||
- Keywords ONLY in output.
|
||||
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple): kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >=0: return ""
|
||||
return kwd
|
||||
|
||||
|
||||
def question_proposal(chat_mdl, content, topn=3):
|
||||
prompt = f"""
|
||||
Role: You're a text analyzer.
|
||||
Task: propose {topn} questions about a given piece of text content.
|
||||
Requirements:
|
||||
- Understand and summarize the text content, and propose top {topn} important questions.
|
||||
- The questions SHOULD NOT have overlapping meanings.
|
||||
- The questions SHOULD cover the main content of the text as much as possible.
|
||||
- The questions MUST be in language of the given piece of text content.
|
||||
- One question per line.
|
||||
- Question ONLY in output.
|
||||
|
||||
### Text Content
|
||||
{content}
|
||||
|
||||
"""
|
||||
msg = [
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": "Output: "}
|
||||
]
|
||||
_, msg = message_fit_in(msg, chat_mdl.max_length)
|
||||
kwd = chat_mdl.chat(prompt, msg[1:], {"temperature": 0.2})
|
||||
if isinstance(kwd, tuple): kwd = kwd[0]
|
||||
if kwd.find("**ERROR**") >= 0: return ""
|
||||
return kwd
|
||||
|
||||
|
||||
def full_question(tenant_id, llm_id, messages):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
conv = []
|
||||
for m in messages:
|
||||
if m["role"] not in ["user", "assistant"]: continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
today = datetime.date.today().isoformat()
|
||||
yesterday = (datetime.date.today() - timedelta(days=1)).isoformat()
|
||||
tomorrow = (datetime.date.today() + timedelta(days=1)).isoformat()
|
||||
prompt = f"""
|
||||
Role: A helpful assistant
|
||||
|
||||
Task and steps:
|
||||
1. Generate a full user question that would follow the conversation.
|
||||
2. If the user's question involves relative date, you need to convert it into absolute date based on the current date, which is {today}. For example: 'yesterday' would be converted to {yesterday}.
|
||||
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
|
||||
# Example 1
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
###############
|
||||
Output: What's the name of Donald Trump's mother?
|
||||
|
||||
------------
|
||||
# Example 2
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
User: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
|
||||
------------
|
||||
# Example 3
|
||||
## Conversation
|
||||
USER: What's the weather today in London?
|
||||
ASSISTANT: Cloudy.
|
||||
USER: What's about tomorrow in Rochester?
|
||||
###############
|
||||
Output: What's the weather in Rochester on {tomorrow}?
|
||||
######################
|
||||
|
||||
# Real Data
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": "Output: "}], {"temperature": 0.2})
|
||||
return ans if ans.find("**ERROR**") < 0 else messages[-1]["content"]
|
||||
|
||||
|
||||
def tts(tts_mdl, text):
|
||||
if not tts_mdl or not text: return
|
||||
if not tts_mdl or not text:
|
||||
return
|
||||
bin = b""
|
||||
for chunk in tts_mdl.tts(text):
|
||||
bin += chunk
|
||||
@ -594,26 +459,20 @@ def tts(tts_mdl, text):
|
||||
|
||||
def ask(question, kb_ids, tenant_id):
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
tenant_ids = [kb.tenant_id for kb in kbs]
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retr = settings.retrievaler if not is_kg else settings.kg_retrievaler
|
||||
is_knowledge_graph = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
|
||||
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embedding_list[0])
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||
max_tokens = chat_mdl.max_length
|
||||
|
||||
kbinfos = retr.retrieval(question, embd_mdl, tenant_ids, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
|
||||
used_token_count = 0
|
||||
for i, c in enumerate(knowledges):
|
||||
used_token_count += num_tokens_from_string(c)
|
||||
if max_tokens * 0.97 < used_token_count:
|
||||
knowledges = knowledges[:i]
|
||||
break
|
||||
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
kbinfos = retriever.retrieval(question, embd_mdl, tenant_ids, kb_ids,
|
||||
1, 12, 0.1, 0.3, aggs=False,
|
||||
rank_feature=label_question(question, kbs)
|
||||
)
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
prompt = """
|
||||
Role: You're a smart assistant. Your name is Miss R.
|
||||
Task: Summarize the information from knowledge bases and answer user's question.
|
||||
@ -623,29 +482,30 @@ def ask(question, kb_ids, tenant_id):
|
||||
- Answer with markdown format text.
|
||||
- Answer in language of user's question.
|
||||
- DO NOT make things up, especially for numbers.
|
||||
|
||||
|
||||
### Information from knowledge bases
|
||||
%s
|
||||
|
||||
|
||||
The above is information from knowledge bases.
|
||||
|
||||
"""%"\n".join(knowledges)
|
||||
|
||||
""" % "\n".join(knowledges)
|
||||
msg = [{"role": "user", "content": question}]
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal knowledges, kbinfos, prompt
|
||||
answer, idx = retr.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
answer, idx = retriever.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
if not recall_docs:
|
||||
recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
refs = deepcopy(kbinfos)
|
||||
for c in refs["chunks"]:
|
||||
@ -654,6 +514,7 @@ def ask(question, kb_ids, tenant_id):
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
refs["chunks"] = chunks_format(refs)
|
||||
return {"answer": answer, "reference": refs}
|
||||
|
||||
answer = ""
|
||||
@ -661,4 +522,3 @@ def ask(question, kb_ids, tenant_id):
|
||||
answer = ans
|
||||
yield {"answer": answer, "reference": {}}
|
||||
yield decorate_answer(answer)
|
||||
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import hashlib
|
||||
import xxhash
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
@ -22,13 +22,13 @@ from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from io import BytesIO
|
||||
import trio
|
||||
|
||||
from peewee import fn
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from api import settings
|
||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
@ -66,8 +66,8 @@ class DocumentService(CommonService):
|
||||
else:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
count = docs.count()
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
return list(docs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@ -96,20 +96,28 @@ class DocumentService(CommonService):
|
||||
def insert(cls, doc):
|
||||
if not cls.save(**doc):
|
||||
raise RuntimeError("Database error (Document)!")
|
||||
e, doc = cls.get_by_id(doc["id"])
|
||||
if not e:
|
||||
raise RuntimeError("Database error (Document retrieval)!")
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
e, kb = KnowledgebaseService.get_by_id(doc["kb_id"])
|
||||
if not KnowledgebaseService.update_by_id(
|
||||
kb.id, {"doc_num": kb.doc_num + 1}):
|
||||
raise RuntimeError("Database error (Knowledgebase)!")
|
||||
return doc
|
||||
return Document(**doc)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def remove_document(cls, doc, tenant_id):
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
cls.clear_chunk_num(doc.id)
|
||||
try:
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "community_report"], "source_id": doc.id},
|
||||
{"remove": {"source_id": doc.id}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]},
|
||||
{"removed_kwd": "Y"},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "community_report"], "must_not": {"exists": "source_id"}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
except Exception:
|
||||
pass
|
||||
return cls.delete_by_id(doc.id)
|
||||
|
||||
@classmethod
|
||||
@ -145,7 +153,7 @@ class DocumentService(CommonService):
|
||||
@DB.connection_context()
|
||||
def get_unfinished_docs(cls):
|
||||
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg,
|
||||
cls.model.run]
|
||||
cls.model.run, cls.model.parser_id]
|
||||
docs = cls.model.select(*fields) \
|
||||
.where(
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
@ -282,6 +290,31 @@ class DocumentService(CommonService):
|
||||
return
|
||||
return docs[0]["embd_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_chunking_config(cls, doc_id):
|
||||
configs = (
|
||||
cls.model.select(
|
||||
cls.model.id,
|
||||
cls.model.kb_id,
|
||||
cls.model.parser_id,
|
||||
cls.model.parser_config,
|
||||
Knowledgebase.language,
|
||||
Knowledgebase.embd_id,
|
||||
Tenant.id.alias("tenant_id"),
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
)
|
||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id))
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||
.where(cls.model.id == doc_id)
|
||||
)
|
||||
configs = configs.dicts()
|
||||
if not configs:
|
||||
return None
|
||||
return configs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_id_by_doc_name(cls, doc_name):
|
||||
@ -319,6 +352,8 @@ class DocumentService(CommonService):
|
||||
old[k] = v
|
||||
|
||||
dfs_update(d.parser_config, config)
|
||||
if not config.get("raptor") and d.parser_config.get("raptor"):
|
||||
del d.parser_config["raptor"]
|
||||
cls.update_by_id(id, {"parser_config": d.parser_config})
|
||||
|
||||
@classmethod
|
||||
@ -337,6 +372,10 @@ class DocumentService(CommonService):
|
||||
"progress_msg": "Task is queued...",
|
||||
"process_begin_at": get_format_time()
|
||||
})
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_meta_fields(cls, doc_id, meta_fields):
|
||||
return cls.update_by_id(doc_id, {"meta_fields": meta_fields})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -351,30 +390,36 @@ class DocumentService(CommonService):
|
||||
prg = 0
|
||||
finished = True
|
||||
bad = 0
|
||||
has_raptor = False
|
||||
has_graphrag = False
|
||||
e, doc = DocumentService.get_by_id(d["id"])
|
||||
status = doc.run # TaskStatus.RUNNING.value
|
||||
for t in tsks:
|
||||
if 0 <= t.progress < 1:
|
||||
finished = False
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
if t.progress_msg not in msg:
|
||||
msg.append(t.progress_msg)
|
||||
if t.progress == -1:
|
||||
bad += 1
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
msg.append(t.progress_msg)
|
||||
if t.task_type == "raptor":
|
||||
has_raptor = True
|
||||
elif t.task_type == "graphrag":
|
||||
has_graphrag = True
|
||||
prg /= len(tsks)
|
||||
if finished and bad:
|
||||
prg = -1
|
||||
status = TaskStatus.FAIL.value
|
||||
elif finished:
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and d["progress_msg"].lower().find(
|
||||
" raptor") < 0:
|
||||
queue_raptor_tasks(d)
|
||||
if d["parser_config"].get("raptor", {}).get("use_raptor") and not has_raptor:
|
||||
queue_raptor_o_graphrag_tasks(d, "raptor")
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
elif d["parser_config"].get("graphrag", {}).get("use_graphrag") and not has_graphrag:
|
||||
queue_raptor_o_graphrag_tasks(d, "graphrag")
|
||||
prg = 0.98 * len(tsks) / (len(tsks) + 1)
|
||||
msg.append("------ RAPTOR -------")
|
||||
else:
|
||||
status = TaskStatus.DONE.value
|
||||
|
||||
msg = "\n".join(msg)
|
||||
msg = "\n".join(sorted(msg))
|
||||
info = {
|
||||
"process_duation": datetime.timestamp(
|
||||
datetime.now()) -
|
||||
@ -406,30 +451,40 @@ class DocumentService(CommonService):
|
||||
return False
|
||||
|
||||
|
||||
def queue_raptor_tasks(doc):
|
||||
def queue_raptor_o_graphrag_tasks(doc, ty):
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||
|
||||
def new_task():
|
||||
nonlocal doc
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc["id"],
|
||||
"from_page": 0,
|
||||
"to_page": -1,
|
||||
"progress_msg": "Start to do RAPTOR (Recursive Abstractive Processing for Tree-Organized Retrieval)."
|
||||
"from_page": 100000000,
|
||||
"to_page": 100000000,
|
||||
"task_type": ty,
|
||||
"progress_msg": datetime.now().strftime("%H:%M:%S") + " created task " + ty
|
||||
}
|
||||
|
||||
task = new_task()
|
||||
for field in ["doc_id", "from_page", "to_page"]:
|
||||
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||
hasher.update(ty.encode("utf-8"))
|
||||
task["digest"] = hasher.hexdigest()
|
||||
bulk_insert_into_db(Task, [task], True)
|
||||
task["type"] = "raptor"
|
||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=task), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
from rag.app import presentation, picture, naive, audio, email
|
||||
from api.db.services.dialog_service import ConversationService, DialogService
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.conversation_service import ConversationService
|
||||
|
||||
e, conv = ConversationService.get_by_id(conversation_id)
|
||||
if not e:
|
||||
@ -437,6 +492,9 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
assert e, "Conversation not found!"
|
||||
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not dia.kb_ids:
|
||||
raise LookupError("No knowledge base associated with this conversation. "
|
||||
"Please add a knowledge base before uploading documents")
|
||||
kb_id = dia.kb_ids[0]
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
@ -456,7 +514,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
ParserType.AUDIO.value: audio,
|
||||
ParserType.EMAIL.value: email
|
||||
}
|
||||
parser_config = {"chunk_token_num": 4096, "delimiter": "\n!?;。;!?", "layout_recognize": False}
|
||||
parser_config = {"chunk_token_num": 4096, "delimiter": "\n!?;。;!?", "layout_recognize": "Plain Text"}
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
doc_nm = {}
|
||||
@ -482,10 +540,7 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
for ck in th.result():
|
||||
d = deepcopy(doc)
|
||||
d.update(ck)
|
||||
md5 = hashlib.md5()
|
||||
md5.update((ck["content_with_weight"] +
|
||||
str(d["doc_id"])).encode("utf-8"))
|
||||
d["id"] = md5.hexdigest()
|
||||
d["id"] = xxhash.xxh64((ck["content_with_weight"] + str(d["doc_id"])).encode("utf-8")).hexdigest()
|
||||
d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.now().timestamp()
|
||||
if not d.get("image"):
|
||||
@ -528,18 +583,20 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
cks = [c for c in docs if c["doc_id"] == doc_id]
|
||||
|
||||
if parser_ids[doc_id] != ParserType.PICTURE.value:
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
mindmap = MindMapExtractor(llm_bdl)
|
||||
try:
|
||||
mind_map = json.dumps(mindmap([c["content_with_weight"] for c in docs if c["doc_id"] == doc_id]).output,
|
||||
ensure_ascii=False, indent=2)
|
||||
if len(mind_map) < 32: raise Exception("Few content: " + mind_map)
|
||||
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in docs if c["doc_id"] == doc_id])
|
||||
mind_map = json.dumps(mind_map.output, ensure_ascii=False, indent=2)
|
||||
if len(mind_map) < 32:
|
||||
raise Exception("Few content: " + mind_map)
|
||||
cks.append({
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc_id,
|
||||
"kb_id": [kb.id],
|
||||
"docnm_kwd": doc_nm[doc_id],
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", doc_nm[doc_id])),
|
||||
"content_ltks": "",
|
||||
"content_ltks": rag_tokenizer.tokenize("summary summarize 总结 概况 file 文件 概括"),
|
||||
"content_with_weight": mind_map,
|
||||
"knowledge_graph_kwd": "mind_map"
|
||||
})
|
||||
@ -561,4 +618,4 @@ def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
DocumentService.increment_chunk_num(
|
||||
doc_id, kb.id, token_counts[doc_id], chunk_counts[doc_id], 0)
|
||||
|
||||
return [d["id"] for d, _ in files]
|
||||
return [d["id"] for d, _ in files]
|
||||
|
||||
@ -20,7 +20,7 @@ from api.db.db_models import DB
|
||||
from api.db.db_models import File, File2Document
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.utils import current_timestamp, datetime_format, get_uuid
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class File2DocumentService(CommonService):
|
||||
@ -43,10 +43,7 @@ class File2DocumentService(CommonService):
|
||||
def insert(cls, obj):
|
||||
if not cls.save(**obj):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
e, obj = cls.get_by_id(obj["id"])
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
return obj
|
||||
return File2Document(**obj)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -63,9 +60,8 @@ class File2DocumentService(CommonService):
|
||||
def update_by_file_id(cls, file_id, obj):
|
||||
obj["update_time"] = current_timestamp()
|
||||
obj["update_date"] = datetime_format(datetime.now())
|
||||
num = cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||
e, obj = cls.get_by_id(cls.model.id)
|
||||
return obj
|
||||
cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||
return File2Document(**obj)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
|
||||
@ -85,7 +85,8 @@ class FileService(CommonService):
|
||||
.join(Document, on=(File2Document.document_id == Document.id))
|
||||
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
||||
.where(cls.model.id == file_id))
|
||||
if not kbs: return []
|
||||
if not kbs:
|
||||
return []
|
||||
kbs_info_list = []
|
||||
for kb in list(kbs.dicts()):
|
||||
kbs_info_list.append({"kb_id": kb['id'], "kb_name": kb['name']})
|
||||
@ -250,10 +251,7 @@ class FileService(CommonService):
|
||||
def insert(cls, file):
|
||||
if not cls.save(**file):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
e, file = cls.get_by_id(file["id"])
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
return file
|
||||
return File(**file)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@ -304,7 +302,8 @@ class FileService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def add_file_from_kb(cls, doc, kb_folder_id, tenant_id):
|
||||
for _ in File2DocumentService.get_by_document_id(doc["id"]): return
|
||||
for _ in File2DocumentService.get_by_document_id(doc["id"]):
|
||||
return
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": kb_folder_id,
|
||||
@ -343,6 +342,8 @@ class FileService(CommonService):
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
raise RuntimeError("Exceed the maximum file number of a free user!")
|
||||
if len(file.filename) >= 128:
|
||||
raise RuntimeError("Exceed the maximum length of file name!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
@ -400,7 +401,7 @@ class FileService(CommonService):
|
||||
ParserType.AUDIO.value: audio,
|
||||
ParserType.EMAIL.value: email
|
||||
}
|
||||
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": False}
|
||||
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": "Plain Text"}
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
for file in file_objs:
|
||||
|
||||
@ -35,7 +35,10 @@ class KnowledgebaseService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
||||
page_number, items_per_page, orderby, desc, keywords):
|
||||
page_number, items_per_page,
|
||||
orderby, desc, keywords,
|
||||
parser_id=None
|
||||
):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
@ -67,6 +70,8 @@ class KnowledgebaseService(CommonService):
|
||||
cls.model.tenant_id == user_id))
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if parser_id:
|
||||
kbs = kbs.where(cls.model.parser_id == parser_id)
|
||||
if desc:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
@ -104,7 +109,8 @@ class KnowledgebaseService(CommonService):
|
||||
cls.model.token_num,
|
||||
cls.model.chunk_num,
|
||||
cls.model.parser_id,
|
||||
cls.model.parser_config]
|
||||
cls.model.parser_config,
|
||||
cls.model.pagerank]
|
||||
kbs = cls.model.select(*fields).join(Tenant, on=(
|
||||
(Tenant.id == cls.model.tenant_id) & (Tenant.status == StatusEnum.VALID.value))).where(
|
||||
(cls.model.id == kb_id),
|
||||
|
||||
@ -13,8 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.llm import EmbeddingModel, CvModel, ChatModel, RerankModel, Seq2txtModel, TTSModel
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import DB
|
||||
@ -36,11 +40,11 @@ class TenantLLMService(CommonService):
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_api_key(cls, tenant_id, model_name):
|
||||
arr = model_name.split("@")
|
||||
if len(arr) < 2:
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=model_name)
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(model_name)
|
||||
if not fid:
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm)
|
||||
else:
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=arr[0], llm_factory=arr[1])
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm, llm_factory=fid)
|
||||
if not objs:
|
||||
return
|
||||
return objs[0]
|
||||
@ -61,10 +65,28 @@ class TenantLLMService(CommonService):
|
||||
|
||||
return list(objs)
|
||||
|
||||
@staticmethod
|
||||
def split_model_name_and_factory(model_name):
|
||||
arr = model_name.split("@")
|
||||
if len(arr) < 2:
|
||||
return model_name, None
|
||||
if len(arr) > 2:
|
||||
return "@".join(arr[0:-1]), arr[-1]
|
||||
|
||||
# model name must be xxx@yyy
|
||||
try:
|
||||
model_factories = json.load(open(os.path.join(get_project_base_directory(), "conf/llm_factories.json"), "r"))["factory_llm_infos"]
|
||||
model_providers = set([f["name"] for f in model_factories])
|
||||
if arr[-1] not in model_providers:
|
||||
return model_name, None
|
||||
return arr[0], arr[-1]
|
||||
except Exception as e:
|
||||
logging.exception(f"TenantLLMService.split_model_name_and_factory got exception: {e}")
|
||||
return model_name, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type,
|
||||
llm_name=None, lang="Chinese"):
|
||||
def get_model_config(cls, tenant_id, llm_type, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
@ -85,24 +107,29 @@ class TenantLLMService(CommonService):
|
||||
assert False, "LLM type error"
|
||||
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
tmp = mdlnm.split("@")
|
||||
fid = None if len(tmp) < 2 else tmp[1]
|
||||
mdlnm = tmp[0]
|
||||
if model_config: model_config = model_config.to_dict()
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
if model_config:
|
||||
model_config = model_config.to_dict()
|
||||
if not model_config:
|
||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||
if llm and llm[0].fid in ["Youdao", "FastEmbed", "BAAI"]:
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key":"", "llm_name": mdlnm, "api_base": ""}
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key": "", "llm_name": mdlnm, "api_base": ""}
|
||||
if not model_config:
|
||||
if mdlnm == "flag-embedding":
|
||||
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "",
|
||||
"llm_name": llm_name, "api_base": ""}
|
||||
"llm_name": llm_name, "api_base": ""}
|
||||
else:
|
||||
if not mdlnm:
|
||||
raise LookupError(f"Type of {llm_type} model is not set.")
|
||||
raise LookupError("Model({}) not authorized".format(mdlnm))
|
||||
return model_config
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type,
|
||||
llm_name=None, lang="Chinese"):
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
if model_config["llm_factory"] not in EmbeddingModel:
|
||||
return
|
||||
@ -151,33 +178,39 @@ class TenantLLMService(CommonService):
|
||||
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
logging.error(f"Tenant not found: {tenant_id}")
|
||||
return 0
|
||||
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
mdlnm = tenant.embd_id
|
||||
elif llm_type == LLMType.SPEECH2TEXT.value:
|
||||
mdlnm = tenant.asr_id
|
||||
elif llm_type == LLMType.IMAGE2TEXT.value:
|
||||
mdlnm = tenant.img2txt_id
|
||||
elif llm_type == LLMType.CHAT.value:
|
||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.RERANK:
|
||||
mdlnm = tenant.rerank_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.TTS:
|
||||
mdlnm = tenant.tts_id if not llm_name else llm_name
|
||||
else:
|
||||
assert False, "LLM type error"
|
||||
llm_map = {
|
||||
LLMType.EMBEDDING.value: tenant.embd_id,
|
||||
LLMType.SPEECH2TEXT.value: tenant.asr_id,
|
||||
LLMType.IMAGE2TEXT.value: tenant.img2txt_id,
|
||||
LLMType.CHAT.value: tenant.llm_id if not llm_name else llm_name,
|
||||
LLMType.RERANK.value: tenant.rerank_id if not llm_name else llm_name,
|
||||
LLMType.TTS.value: tenant.tts_id if not llm_name else llm_name
|
||||
}
|
||||
|
||||
llm_name = mdlnm.split("@")[0] if "@" in mdlnm else mdlnm
|
||||
mdlnm = llm_map.get(llm_type)
|
||||
if mdlnm is None:
|
||||
logging.error(f"LLM type error: {llm_type}")
|
||||
return 0
|
||||
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
|
||||
num = 0
|
||||
try:
|
||||
for u in cls.query(tenant_id=tenant_id, llm_name=llm_name):
|
||||
num += cls.model.update(used_tokens=u.used_tokens + used_tokens)\
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == llm_name)\
|
||||
.execute()
|
||||
except Exception as e:
|
||||
pass
|
||||
num = cls.model.update(
|
||||
used_tokens=cls.model.used_tokens + used_tokens
|
||||
).where(
|
||||
cls.model.tenant_id == tenant_id,
|
||||
cls.model.llm_name == llm_name,
|
||||
cls.model.llm_factory == llm_factory if llm_factory else True
|
||||
).execute()
|
||||
except Exception:
|
||||
logging.exception(
|
||||
"TenantLLMService.increase_usage got exception,Failed to update used_tokens for tenant_id=%s, llm_name=%s",
|
||||
tenant_id, llm_name)
|
||||
return 0
|
||||
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@ -191,7 +224,7 @@ class TenantLLMService(CommonService):
|
||||
return list(objs)
|
||||
|
||||
|
||||
class LLMBundle(object):
|
||||
class LLMBundle:
|
||||
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese"):
|
||||
self.tenant_id = tenant_id
|
||||
self.llm_type = llm_type
|
||||
@ -200,18 +233,16 @@ class LLMBundle(object):
|
||||
tenant_id, llm_type, llm_name, lang=lang)
|
||||
assert self.mdl, "Can't find model for {}/{}/{}".format(
|
||||
tenant_id, llm_type, llm_name)
|
||||
self.max_length = 8192
|
||||
for lm in LLMService.query(llm_name=llm_name):
|
||||
self.max_length = lm.max_tokens
|
||||
break
|
||||
|
||||
def encode(self, texts: list, batch_size=32):
|
||||
emd, used_tokens = self.mdl.encode(texts, batch_size)
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
self.max_length = model_config.get("max_tokens", 8192)
|
||||
|
||||
def encode(self, texts: list):
|
||||
embeddings, used_tokens = self.mdl.encode(texts)
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error(
|
||||
"LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
return emd, used_tokens
|
||||
return embeddings, used_tokens
|
||||
|
||||
def encode_queries(self, query: str):
|
||||
emd, used_tokens = self.mdl.encode_queries(query)
|
||||
@ -247,20 +278,21 @@ class LLMBundle(object):
|
||||
|
||||
def tts(self, text):
|
||||
for chunk in self.mdl.tts(text):
|
||||
if isinstance(chunk,int):
|
||||
if isinstance(chunk, int):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
return
|
||||
yield chunk
|
||||
yield chunk
|
||||
|
||||
def chat(self, system, history, gen_conf):
|
||||
txt, used_tokens = self.mdl.chat(system, history, gen_conf)
|
||||
if isinstance(txt, int) and not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name, used_tokens))
|
||||
"LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name,
|
||||
used_tokens))
|
||||
return txt
|
||||
|
||||
def chat_streamly(self, system, history, gen_conf):
|
||||
@ -269,6 +301,7 @@ class LLMBundle(object):
|
||||
if not TenantLLMService.increase_usage(
|
||||
self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||
logging.error(
|
||||
"LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name, txt))
|
||||
"LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name,
|
||||
txt))
|
||||
return
|
||||
yield txt
|
||||
|
||||
@ -15,6 +15,8 @@
|
||||
#
|
||||
import os
|
||||
import random
|
||||
import xxhash
|
||||
from datetime import datetime
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from deepdoc.parser import PdfParser
|
||||
@ -29,6 +31,18 @@ from deepdoc.parser.excel_parser import RAGFlowExcelParser
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from api import settings
|
||||
from rag.nlp import search
|
||||
|
||||
|
||||
def trim_header_by_lines(text: str, max_length) -> str:
|
||||
len_text = len(text)
|
||||
if len_text <= max_length:
|
||||
return text
|
||||
for i in range(len_text):
|
||||
if text[i] == '\n' and len_text - i <= max_length:
|
||||
return text[i + 1:]
|
||||
return text
|
||||
|
||||
|
||||
class TaskService(CommonService):
|
||||
@ -53,102 +67,154 @@ class TaskService(CommonService):
|
||||
Knowledgebase.tenant_id,
|
||||
Knowledgebase.language,
|
||||
Knowledgebase.embd_id,
|
||||
Knowledgebase.pagerank,
|
||||
Knowledgebase.parser_config.alias("kb_parser_config"),
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
cls.model.update_time]
|
||||
docs = cls.model.select(*fields) \
|
||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
||||
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id)) \
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
|
||||
.where(cls.model.id == task_id)
|
||||
cls.model.update_time,
|
||||
]
|
||||
docs = (
|
||||
cls.model.select(*fields)
|
||||
.join(Document, on=(cls.model.doc_id == Document.id))
|
||||
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id))
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||
.where(cls.model.id == task_id)
|
||||
)
|
||||
docs = list(docs.dicts())
|
||||
if not docs: return None
|
||||
if not docs:
|
||||
return None
|
||||
|
||||
msg = "\nTask has been received."
|
||||
prog = random.random() / 10.
|
||||
msg = f"\n{datetime.now().strftime('%H:%M:%S')} Task has been received."
|
||||
prog = random.random() / 10.0
|
||||
if docs[0]["retry_count"] >= 3:
|
||||
msg = "\nERROR: Task is abandoned after 3 times attempts."
|
||||
prog = -1
|
||||
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + msg,
|
||||
progress=prog,
|
||||
retry_count=docs[0]["retry_count"]+1
|
||||
).where(
|
||||
cls.model.id == docs[0]["id"]).execute()
|
||||
cls.model.update(
|
||||
progress_msg=cls.model.progress_msg + msg,
|
||||
progress=prog,
|
||||
retry_count=docs[0]["retry_count"] + 1,
|
||||
).where(cls.model.id == docs[0]["id"]).execute()
|
||||
|
||||
if docs[0]["retry_count"] >= 3: return None
|
||||
if docs[0]["retry_count"] >= 3:
|
||||
return None
|
||||
|
||||
return docs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tasks(cls, doc_id: str):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.from_page,
|
||||
cls.model.progress,
|
||||
cls.model.digest,
|
||||
cls.model.chunk_ids,
|
||||
]
|
||||
tasks = (
|
||||
cls.model.select(*fields).order_by(cls.model.from_page.asc(), cls.model.create_time.desc())
|
||||
.where(cls.model.doc_id == doc_id)
|
||||
)
|
||||
tasks = list(tasks.dicts())
|
||||
if not tasks:
|
||||
return None
|
||||
return tasks
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_chunk_ids(cls, id: str, chunk_ids: str):
|
||||
cls.model.update(chunk_ids=chunk_ids).where(cls.model.id == id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_ongoing_doc_name(cls):
|
||||
with DB.lock("get_task", -1):
|
||||
docs = cls.model.select(*[Document.id, Document.kb_id, Document.location, File.parent_id]) \
|
||||
.join(Document, on=(cls.model.doc_id == Document.id)) \
|
||||
.join(File2Document, on=(File2Document.document_id == Document.id), join_type=JOIN.LEFT_OUTER) \
|
||||
.join(File, on=(File2Document.file_id == File.id), join_type=JOIN.LEFT_OUTER) \
|
||||
.where(
|
||||
docs = (
|
||||
cls.model.select(
|
||||
*[Document.id, Document.kb_id, Document.location, File.parent_id]
|
||||
)
|
||||
.join(Document, on=(cls.model.doc_id == Document.id))
|
||||
.join(
|
||||
File2Document,
|
||||
on=(File2Document.document_id == Document.id),
|
||||
join_type=JOIN.LEFT_OUTER,
|
||||
)
|
||||
.join(
|
||||
File,
|
||||
on=(File2Document.file_id == File.id),
|
||||
join_type=JOIN.LEFT_OUTER,
|
||||
)
|
||||
.where(
|
||||
Document.status == StatusEnum.VALID.value,
|
||||
Document.run == TaskStatus.RUNNING.value,
|
||||
~(Document.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress < 1,
|
||||
cls.model.create_time >= current_timestamp() - 1000 * 600
|
||||
cls.model.create_time >= current_timestamp() - 1000 * 600,
|
||||
)
|
||||
)
|
||||
docs = list(docs.dicts())
|
||||
if not docs: return []
|
||||
if not docs:
|
||||
return []
|
||||
|
||||
return list(set([(d["parent_id"] if d["parent_id"] else d["kb_id"], d["location"]) for d in docs]))
|
||||
return list(
|
||||
set(
|
||||
[
|
||||
(
|
||||
d["parent_id"] if d["parent_id"] else d["kb_id"],
|
||||
d["location"],
|
||||
)
|
||||
for d in docs
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def do_cancel(cls, id):
|
||||
try:
|
||||
task = cls.model.get_by_id(id)
|
||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
task = cls.model.get_by_id(id)
|
||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls, id, info):
|
||||
if os.environ.get("MACOS"):
|
||||
if info["progress_msg"]:
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]).where(
|
||||
cls.model.id == id).execute()
|
||||
task = cls.model.get_by_id(id)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
cls.model.id == id).execute()
|
||||
cls.model.id == id
|
||||
).execute()
|
||||
return
|
||||
|
||||
with DB.lock("update_progress", -1):
|
||||
if info["progress_msg"]:
|
||||
cls.model.update(progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]).where(
|
||||
cls.model.id == id).execute()
|
||||
task = cls.model.get_by_id(id)
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
cls.model.update(progress=info["progress"]).where(
|
||||
cls.model.id == id).execute()
|
||||
cls.model.id == id
|
||||
).execute()
|
||||
|
||||
|
||||
def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
def new_task():
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc["id"]
|
||||
}
|
||||
tsks = []
|
||||
return {"id": get_uuid(), "doc_id": doc["id"], "progress": 0.0, "from_page": 0, "to_page": 100000000}
|
||||
|
||||
parse_task_array = []
|
||||
|
||||
if doc["type"] == FileType.PDF.value:
|
||||
file_bin = STORAGE_IMPL.get(bucket, name)
|
||||
do_layout = doc["parser_config"].get("layout_recognize", True)
|
||||
do_layout = doc["parser_config"].get("layout_recognize", "DeepDOC")
|
||||
pages = PdfParser.total_page_number(doc["name"], file_bin)
|
||||
page_size = doc["parser_config"].get("task_page_size", 12)
|
||||
if doc["parser_id"] == "paper":
|
||||
page_size = doc["parser_config"].get("task_page_size", 22)
|
||||
if doc["parser_id"] in ["one", "knowledge_graph"] or not do_layout:
|
||||
if doc["parser_id"] in ["one", "knowledge_graph"] or do_layout != "DeepDOC":
|
||||
page_size = 10 ** 9
|
||||
page_ranges = doc["parser_config"].get("pages") or [(1, 10 ** 5)]
|
||||
for s, e in page_ranges:
|
||||
@ -159,7 +225,7 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
task = new_task()
|
||||
task["from_page"] = p
|
||||
task["to_page"] = min(p + page_size, e)
|
||||
tsks.append(task)
|
||||
parse_task_array.append(task)
|
||||
|
||||
elif doc["parser_id"] == "table":
|
||||
file_bin = STORAGE_IMPL.get(bucket, name)
|
||||
@ -168,12 +234,72 @@ def queue_tasks(doc: dict, bucket: str, name: str):
|
||||
task = new_task()
|
||||
task["from_page"] = i
|
||||
task["to_page"] = min(i + 3000, rn)
|
||||
tsks.append(task)
|
||||
parse_task_array.append(task)
|
||||
else:
|
||||
tsks.append(new_task())
|
||||
parse_task_array.append(new_task())
|
||||
|
||||
bulk_insert_into_db(Task, tsks, True)
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
for task in parse_task_array:
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
if field == "parser_config":
|
||||
for k in ["raptor", "graphrag"]:
|
||||
if k in chunking_config[field]:
|
||||
del chunking_config[field][k]
|
||||
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||
for field in ["doc_id", "from_page", "to_page"]:
|
||||
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||
task_digest = hasher.hexdigest()
|
||||
task["digest"] = task_digest
|
||||
task["progress"] = 0.0
|
||||
|
||||
prev_tasks = TaskService.get_tasks(doc["id"])
|
||||
ck_num = 0
|
||||
if prev_tasks:
|
||||
for task in parse_task_array:
|
||||
ck_num += reuse_prev_task_chunks(task, prev_tasks, chunking_config)
|
||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||
chunk_ids = []
|
||||
for task in prev_tasks:
|
||||
if task["chunk_ids"]:
|
||||
chunk_ids.extend(task["chunk_ids"].split())
|
||||
if chunk_ids:
|
||||
settings.docStoreConn.delete({"id": chunk_ids}, search.index_name(chunking_config["tenant_id"]),
|
||||
chunking_config["kb_id"])
|
||||
DocumentService.update_by_id(doc["id"], {"chunk_num": ck_num})
|
||||
|
||||
bulk_insert_into_db(Task, parse_task_array, True)
|
||||
DocumentService.begin2parse(doc["id"])
|
||||
|
||||
for t in tsks:
|
||||
assert REDIS_CONN.queue_product(SVR_QUEUE_NAME, message=t), "Can't access Redis. Please check the Redis' status."
|
||||
unfinished_task_array = [task for task in parse_task_array if task["progress"] < 1.0]
|
||||
for unfinished_task in unfinished_task_array:
|
||||
assert REDIS_CONN.queue_product(
|
||||
SVR_QUEUE_NAME, message=unfinished_task
|
||||
), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
|
||||
idx = 0
|
||||
while idx < len(prev_tasks):
|
||||
prev_task = prev_tasks[idx]
|
||||
if prev_task.get("from_page", 0) == task.get("from_page", 0) \
|
||||
and prev_task.get("digest", 0) == task.get("digest", ""):
|
||||
break
|
||||
idx += 1
|
||||
|
||||
if idx >= len(prev_tasks):
|
||||
return 0
|
||||
prev_task = prev_tasks[idx]
|
||||
if prev_task["progress"] < 1.0 or not prev_task["chunk_ids"]:
|
||||
return 0
|
||||
task["chunk_ids"] = prev_task["chunk_ids"]
|
||||
task["progress"] = 1.0
|
||||
if "from_page" in task and "to_page" in task and int(task['to_page']) - int(task['from_page']) >= 10 ** 6:
|
||||
task["progress_msg"] = f"Page({task['from_page']}~{task['to_page']}): "
|
||||
else:
|
||||
task["progress_msg"] = ""
|
||||
task["progress_msg"] = " ".join(
|
||||
[datetime.now().strftime("%H:%M:%S"), task["progress_msg"], "Reused previous task's chunks."])
|
||||
prev_task["chunk_ids"] = ""
|
||||
|
||||
return len(task["chunk_ids"].split())
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import hashlib
|
||||
from datetime import datetime
|
||||
|
||||
import peewee
|
||||
@ -22,8 +23,9 @@ from api.db import UserTenantRole
|
||||
from api.db.db_models import DB, UserTenant
|
||||
from api.db.db_models import User, Tenant
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import get_uuid, get_format_time, current_timestamp, datetime_format
|
||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||
from api.db import StatusEnum
|
||||
from rag.settings import MINIO
|
||||
|
||||
|
||||
class UserService(CommonService):
|
||||
@ -126,6 +128,12 @@ class TenantService(CommonService):
|
||||
if num == 0:
|
||||
raise LookupError("Tenant not found which is supposed to be there")
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def user_gateway(cls, tenant_id):
|
||||
hashobj = hashlib.sha256(tenant_id.encode("utf-8"))
|
||||
return int(hashobj.hexdigest(), 16)%len(MINIO)
|
||||
|
||||
|
||||
class UserTenantService(CommonService):
|
||||
model = UserTenant
|
||||
|
||||
@ -18,23 +18,17 @@
|
||||
# from beartype.claw import beartype_all # <-- you didn't sign up for this
|
||||
# beartype_all(conf=BeartypeConf(violation_type=UserWarning)) # <-- emit warnings from all code
|
||||
|
||||
import logging
|
||||
from api.utils.log_utils import initRootLogger
|
||||
initRootLogger("ragflow_server")
|
||||
for module in ["pdfminer"]:
|
||||
module_logger = logging.getLogger(module)
|
||||
module_logger.setLevel(logging.WARNING)
|
||||
for module in ["peewee"]:
|
||||
module_logger = logging.getLogger(module)
|
||||
module_logger.handlers.clear()
|
||||
module_logger.propagate = True
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import threading
|
||||
|
||||
from werkzeug.serving import run_simple
|
||||
from api import settings
|
||||
@ -47,16 +41,29 @@ from api.db.db_models import init_database_tables as init_web_db
|
||||
from api.db.init_data import init_web_data
|
||||
from api.versions import get_ragflow_version
|
||||
from api.utils import show_configs
|
||||
from rag.settings import print_rag_settings
|
||||
from rag.utils.redis_conn import RedisDistributedLock
|
||||
|
||||
stop_event = threading.Event()
|
||||
|
||||
def update_progress():
|
||||
while True:
|
||||
time.sleep(3)
|
||||
redis_lock = RedisDistributedLock("update_progress", timeout=60)
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
if not redis_lock.acquire():
|
||||
continue
|
||||
DocumentService.update_progress()
|
||||
stop_event.wait(6)
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
finally:
|
||||
redis_lock.release()
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
logging.info("Received interrupt signal, shutting down...")
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.info(r"""
|
||||
@ -75,6 +82,7 @@ if __name__ == '__main__':
|
||||
)
|
||||
show_configs()
|
||||
settings.init_settings()
|
||||
print_rag_settings()
|
||||
|
||||
# init db
|
||||
init_web_db()
|
||||
@ -101,6 +109,9 @@ if __name__ == '__main__':
|
||||
RuntimeConfig.init_env()
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
thread = ThreadPoolExecutor(max_workers=1)
|
||||
thread.submit(update_progress)
|
||||
|
||||
@ -117,4 +128,6 @@ if __name__ == '__main__':
|
||||
)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user