mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
1028 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 09a3854ed8 | |||
| 43f51baa96 | |||
| 5a2011e687 | |||
| 7dd9ce0b5f | |||
| b66881a371 | |||
| 4d7934061e | |||
| 660fa8888b | |||
| 3285f09c92 | |||
| 51ec708c58 | |||
| 9b8971a9de | |||
| 6546f86b4e | |||
| 8de6b97806 | |||
| e4e0a88053 | |||
| 7719fd6350 | |||
| 15ef6dd72f | |||
| 5b5f19cbc1 | |||
| ea38e12d42 | |||
| 885eb2eab9 | |||
| 6587acef88 | |||
| ad03ede7cd | |||
| 468e4042c2 | |||
| af1344033d | |||
| 4012d65b3c | |||
| e2bc1a3478 | |||
| 6c2c447a72 | |||
| e7022db9a4 | |||
| ca4a0ee1b2 | |||
| 27b0550876 | |||
| 797e03f843 | |||
| b4e06237ef | |||
| 751a13fb64 | |||
| fa7b857aa9 | |||
| 257af75ece | |||
| cbdacf21f6 | |||
| b1f3130519 | |||
| 3c224c817b | |||
| a3c9402218 | |||
| a7d40e9132 | |||
| 648342b62f | |||
| 4870d42949 | |||
| caaf7043cc | |||
| 237a66913b | |||
| 3c50c7d3ac | |||
| b44e65a12e | |||
| e3f40db963 | |||
| b5ad7b7062 | |||
| 6fc7def562 | |||
| c8f608b2dd | |||
| 5c81e01de5 | |||
| 83fac6d0a0 | |||
| a6681d6366 | |||
| 1388c4420d | |||
| 962bd5f5df | |||
| 627c11c429 | |||
| 4ba17361e9 | |||
| c946858328 | |||
| ba6e2af5fd | |||
| 2ffe6f7439 | |||
| e3987e21b9 | |||
| a713f54732 | |||
| 519f03097e | |||
| 299c655e39 | |||
| b8c0fb4572 | |||
| d1e172171f | |||
| 81ae6cf78d | |||
| 1120575021 | |||
| 221947acc4 | |||
| 21d8ffca56 | |||
| 41cff3e09e | |||
| b6c4722687 | |||
| 6ea4248bdc | |||
| 88a28212b3 | |||
| 9d0309aedc | |||
| 9a8ce9d3e2 | |||
| 7499608a8b | |||
| 0ebbb60102 | |||
| 80f6d22d2a | |||
| 088b049b4c | |||
| fa9b7b259c | |||
| 14616cf845 | |||
| d2915f6984 | |||
| ccce8beeeb | |||
| 3d2e0f1a1b | |||
| 918d5a9ff8 | |||
| 7d05d4ced7 | |||
| dbdda0fbab | |||
| cf7fdd274b | |||
| 982ed233a2 | |||
| 1f96c95b42 | |||
| 8604c4f57c | |||
| a674338c21 | |||
| 89d82ff031 | |||
| c71d25f744 | |||
| f57f32cf3a | |||
| b6314164c5 | |||
| 856201c0f2 | |||
| 9d8b96c1d0 | |||
| 7c3c185038 | |||
| a9259917c6 | |||
| 8c28587821 | |||
| 12979a3f21 | |||
| 376eb15c63 | |||
| 89ba7abe30 | |||
| 2fd5ac1031 | |||
| 40e84ca41a | |||
| a28c672695 | |||
| 74e0b58d89 | |||
| 7c20c964b4 | |||
| 5d0981d046 | |||
| a793dd2ea8 | |||
| 915e385244 | |||
| 7a344a32f9 | |||
| 8c1ee3845a | |||
| 8c751d5afc | |||
| f5faf0c94f | |||
| af72e8dc33 | |||
| bcd70affb5 | |||
| 6987e9f23b | |||
| 41665b0865 | |||
| d1744aaaf3 | |||
| d5f8548200 | |||
| 4d8698624c | |||
| 1009819801 | |||
| 8fe782f4ea | |||
| 7140950e93 | |||
| 0181747881 | |||
| 3c41159d26 | |||
| e0e1d04da5 | |||
| f0a14f5fce | |||
| 174a2578e8 | |||
| a0959b9d38 | |||
| 13299197b8 | |||
| 249296e417 | |||
| db0f6840d9 | |||
| 1033a3ae26 | |||
| 1845daf41f | |||
| 4c8f9f0d77 | |||
| cc00c3ec93 | |||
| 653b785958 | |||
| 971c1bcba7 | |||
| 065917bf1c | |||
| 820934fc77 | |||
| d3d2ccc76c | |||
| c8ab9079b3 | |||
| 0d5589bfda | |||
| b846a0f547 | |||
| 69578ebfce | |||
| 06cef71ba6 | |||
| d2b1da0e26 | |||
| 7c6d30f4c8 | |||
| ea0352ee4a | |||
| fa5cf10f56 | |||
| 3fe71ab7dd | |||
| 9f715d6bc2 | |||
| 48de3b26ba | |||
| 273c4bc4d3 | |||
| 420c97199a | |||
| ecf0322165 | |||
| 38234aca53 | |||
| 1c06ec39ca | |||
| cfdccebb17 | |||
| 980a883033 | |||
| 02d429f0ca | |||
| 9c24d5d44a | |||
| 0cc5d7a8a6 | |||
| c43bf1dcf5 | |||
| f76b8279dd | |||
| db5ec89dc5 | |||
| 1c201c4d54 | |||
| ba78d0f0c2 | |||
| add8c63458 | |||
| 83661efdaf | |||
| 971197d595 | |||
| 0884e9a4d9 | |||
| 2de42f00b8 | |||
| e8fe580d7a | |||
| 62505164d5 | |||
| d1dcf3b43c | |||
| f84662d2ee | |||
| 1cb6b7f5dd | |||
| 023f509501 | |||
| 50bc53a1f5 | |||
| 8cd4882596 | |||
| 35e5fade93 | |||
| 4942a23290 | |||
| d1716d865a | |||
| c2b7c305fa | |||
| 341e5904c8 | |||
| ded9bf80c5 | |||
| fea157ba08 | |||
| 0db00f70b2 | |||
| 701761d119 | |||
| 2993fc666b | |||
| 8a6d205df0 | |||
| 912b6b023e | |||
| 89e8818dda | |||
| 1dba6b5bf9 | |||
| 3fcf2ee54c | |||
| d8f413a885 | |||
| 7264fb6978 | |||
| bd4bc57009 | |||
| 0569b50fed | |||
| 6b64641042 | |||
| 9cef3a2625 | |||
| e7e89d3ecb | |||
| 13e212c856 | |||
| 61cf430dbb | |||
| e841b09d63 | |||
| b1a1eedf53 | |||
| 68e3b33ae4 | |||
| cd55f6c1b8 | |||
| 996b5fe14e | |||
| db4fd19c82 | |||
| 12db62b9c7 | |||
| b5f2cf16bc | |||
| e27ff8d3d4 | |||
| 5f59418aba | |||
| 87e69868c0 | |||
| 72c20022f6 | |||
| 3f2472f1b9 | |||
| 1d4d67daf8 | |||
| 7538e218a5 | |||
| 6b52f7df5a | |||
| 63131ec9b2 | |||
| e8f1a245a6 | |||
| 908450509f | |||
| 70a0f081f6 | |||
| 93422fa8cc | |||
| bfc84ba95b | |||
| 871055b0fc | |||
| ba71160b14 | |||
| bd5dda6b10 | |||
| 774563970b | |||
| 83d84e90ed | |||
| 8ef2f79d0a | |||
| 296476ab89 | |||
| a36a0fe71c | |||
| a81f6d1b24 | |||
| 8406a5ea47 | |||
| 20b6dafbd8 | |||
| 33cc9cafa9 | |||
| 6567ecf15a | |||
| 3a7322f5b2 | |||
| 829e5f287b | |||
| 1e8efa2631 | |||
| e7f7c09b0b | |||
| 8ae562504b | |||
| bacc9d3ab9 | |||
| d226764ed0 | |||
| 39120d49cf | |||
| 27211a9b34 | |||
| e9de25c973 | |||
| 09e971dcc8 | |||
| 883df22aa2 | |||
| 2bd7abadd3 | |||
| 435479adb3 | |||
| 2c727a4a9c | |||
| a15f522dc9 | |||
| de53498b39 | |||
| 72740eb5b9 | |||
| c30ffb5716 | |||
| 6dcff7db97 | |||
| 9213568692 | |||
| d81e4095de | |||
| 8ddeaca3d6 | |||
| f441f8ffc2 | |||
| 522c7b7ac6 | |||
| 377c0fb4fa | |||
| 7dd9758056 | |||
| 26cf5131c9 | |||
| 93207f83ba | |||
| f77604db26 | |||
| dd5b8e2e1a | |||
| 83ff8e8009 | |||
| 7db6cb8ca3 | |||
| ba6470a7a5 | |||
| df16a80f25 | |||
| 29ea059f90 | |||
| a191933f81 | |||
| 6e1ebb2855 | |||
| 68b952abb1 | |||
| 0879b6af2c | |||
| 2b9145948f | |||
| 726473fd39 | |||
| d207291217 | |||
| bf382e5c4d | |||
| 4338e706c6 | |||
| 86af330f06 | |||
| d016a06fd5 | |||
| 7423a5806e | |||
| b6cd282ccd | |||
| 82ca2e0378 | |||
| 1cd54832b5 | |||
| 660386d3b5 | |||
| 4cdaa77545 | |||
| 9fcc4946e2 | |||
| 98e9d68c75 | |||
| 8f34824aa4 | |||
| 9a6808230a | |||
| c7bd0a755c | |||
| dd1c8c5779 | |||
| 526ba3388f | |||
| cb95072ecf | |||
| f6aeebc608 | |||
| 307f53dae8 | |||
| fa98cc2bb9 | |||
| c58d95ed69 | |||
| edbc396bc6 | |||
| b137de1def | |||
| 2cb1046cbf | |||
| a880beb1f6 | |||
| 34283d4db4 | |||
| 5629fbd2ca | |||
| b7aa6d6c4f | |||
| 0b7b88592f | |||
| 42edecc98f | |||
| af98763e27 | |||
| 5a8fbc5a81 | |||
| 0cd8024c34 | |||
| 3bd1fefe1f | |||
| e18c408759 | |||
| 23b81eae77 | |||
| 66c01c7274 | |||
| 4b8ce08050 | |||
| ca30ef83bf | |||
| d469ae6d50 | |||
| f581a1c4e5 | |||
| 15c75bbf15 | |||
| adbb8319e0 | |||
| f98b24c9bf | |||
| 87c9a054d3 | |||
| cd6ed4b380 | |||
| f29a3dd651 | |||
| e658beee38 | |||
| 17ea5c1dee | |||
| 4e76220e25 | |||
| 24335485bf | |||
| 121c51661d | |||
| 02d10f8eda | |||
| dddf766470 | |||
| 8584d4b642 | |||
| b86e07088b | |||
| 1a9215bc6f | |||
| cf9611c96f | |||
| f126875ec6 | |||
| 89410d2381 | |||
| 96c015fb85 | |||
| ca40b56839 | |||
| 3654ae61c1 | |||
| bab3fce136 | |||
| 4bbbf92331 | |||
| db9fa3042b | |||
| 880a6a0428 | |||
| 465a140727 | |||
| 2677617f93 | |||
| 03038c7d3d | |||
| 16d2be623c | |||
| 021b2ac51a | |||
| 19f71a961a | |||
| 1e45137284 | |||
| 5283a10387 | |||
| d55344bc11 | |||
| 640e8e3f3e | |||
| c20f5675c6 | |||
| 378bdfccfc | |||
| 395ce16b3c | |||
| be3ae0eda9 | |||
| 3e5a39482e | |||
| 9a486e0f51 | |||
| ee9ac15174 | |||
| ac465ba2a6 | |||
| fd4aa79c07 | |||
| 2d83c64eed | |||
| 1284647694 | |||
| 076d811086 | |||
| 121d3fd815 | |||
| d008a4df9f | |||
| 5a88c01111 | |||
| 256b0fb19c | |||
| 78631a3fd3 | |||
| 4117f41758 | |||
| a52bdf0b7e | |||
| b47361432a | |||
| 061d8f78e5 | |||
| 7ec587fa9e | |||
| 685311814f | |||
| 410c0a829d | |||
| 33371cda11 | |||
| fa210e7c58 | |||
| 360f5c1179 | |||
| 44f2d6f5da | |||
| 57a83eca8a | |||
| 6447b737ab | |||
| fe4852cb71 | |||
| f52e56c2d6 | |||
| e9debfd74d | |||
| d8a7fb6f2b | |||
| c8a82da722 | |||
| 09dd786674 | |||
| 0ecccd27eb | |||
| 5a830ea68b | |||
| ff2365b146 | |||
| ac75bcdf95 | |||
| a62a1a5012 | |||
| 361c74ab42 | |||
| 5059d3db18 | |||
| 5674d762f7 | |||
| fa38aed01b | |||
| ab52ffc9c0 | |||
| 5f65c7f48e | |||
| bb9504d1cc | |||
| 5d79912274 | |||
| b52f09adfe | |||
| 27f0d82102 | |||
| 4be3754340 | |||
| 52ceac62ab | |||
| 871b1d7f9b | |||
| bfdf02c6ce | |||
| a3bb4aadcc | |||
| 40b2c48957 | |||
| 55eb525fdc | |||
| 4e69100ca7 | |||
| 415de50419 | |||
| 4332948cf9 | |||
| c0c2a10680 | |||
| 95fad5d523 | |||
| 119713153c | |||
| d86d7061ea | |||
| e86bd723d1 | |||
| 2c0035dcea | |||
| c3b0ab43e7 | |||
| f93be47f51 | |||
| bb4cc365c1 | |||
| c5d1139f7b | |||
| 11247d1a9d | |||
| 5a200f7652 | |||
| 057ae646f2 | |||
| 6d7b2337bd | |||
| 755989e330 | |||
| 5b10daa72a | |||
| 1bf974b592 | |||
| c9b08b7560 | |||
| 60a6cf7c7a | |||
| 8572e1f3db | |||
| 84d1ffe44c | |||
| 766d900a41 | |||
| e59458c36b | |||
| 850e119a81 | |||
| 0a78920bff | |||
| 0089e2b30c | |||
| b7cb4d3e35 | |||
| fd1ad18489 | |||
| 5acc407240 | |||
| 16ec6ad346 | |||
| 5312b75362 | |||
| 33a189f620 | |||
| 56def59c2b | |||
| 7fbab750af | |||
| 3bd0b99495 | |||
| ff34c4232e | |||
| c5ac571676 | |||
| 97401c1e33 | |||
| 24ab857471 | |||
| 50e93d1528 | |||
| 42fbeb285a | |||
| 51fb08be98 | |||
| 501b7d4d01 | |||
| 1d57801c0c | |||
| 73144e278b | |||
| 92739ea804 | |||
| 0ff2042fc1 | |||
| de24e74b4c | |||
| 83e80e3d7f | |||
| ea73f13ebf | |||
| af6eabad0e | |||
| 5fb5a51b2e | |||
| 37004ecfb3 | |||
| 6d333ec4bc | |||
| ac188b0486 | |||
| adeb9d87e2 | |||
| d121033208 | |||
| 494f84cd69 | |||
| f24d464a53 | |||
| 484c536f2e | |||
| f7112acd97 | |||
| de4f75dcd8 | |||
| 15fff5724e | |||
| d616354d66 | |||
| 1bad24e3ab | |||
| 4910146149 | |||
| 0e549e96ee | |||
| 318cb7d792 | |||
| 4d1255b231 | |||
| b30f0be858 | |||
| a82e9b3d91 | |||
| 02a452993e | |||
| 307cdc62ea | |||
| 2d491188b8 | |||
| acc0f7396e | |||
| 9a4cd81891 | |||
| 1694f32e8e | |||
| 41fade3fe6 | |||
| 8d333f3590 | |||
| cd77425b87 | |||
| 544c9990e3 | |||
| 41a647fe32 | |||
| 594bf485d4 | |||
| 863c3e3d9c | |||
| 1767039be3 | |||
| cd75fa02b1 | |||
| cfdd37820a | |||
| 9d12380806 | |||
| 866098634b | |||
| 8013505daf | |||
| deb81810e9 | |||
| 6ab96287c9 | |||
| aaa4776657 | |||
| 5b2e5dd334 | |||
| de46b0d46e | |||
| cc703da747 | |||
| d956a442ce | |||
| 5fc59a3132 | |||
| 1d955507e9 | |||
| cf09c2260a | |||
| c9b18cbe18 | |||
| 8123942ec1 | |||
| 685114d253 | |||
| c9e56d20cf | |||
| 8ee0b6ea54 | |||
| f50b2461cb | |||
| 617faee718 | |||
| b15643bd80 | |||
| f12290f04b | |||
| 15838a6673 | |||
| 39ad9490ac | |||
| 387baf858f | |||
| 2dba858c84 | |||
| 43ea312144 | |||
| ce05696d95 | |||
| 0f62bfda21 | |||
| 70ffe2b4e8 | |||
| e76db6e222 | |||
| 7b664b5a84 | |||
| 8a41057236 | |||
| 447041d265 | |||
| f0375c4acd | |||
| 8af769de41 | |||
| f808bc32ba | |||
| e8cb1d8fc4 | |||
| 4e86ee4ff9 | |||
| c99034f717 | |||
| 86b254d214 | |||
| 1c38f4cefb | |||
| 74c195cd36 | |||
| e48bec1cbf | |||
| 205a5eb9f5 | |||
| 8844826208 | |||
| 8fe4281d81 | |||
| fb1bedbd3c | |||
| 6e55b9146c | |||
| 071ea9c493 | |||
| 5037a28e4d | |||
| fdac4afd10 | |||
| 769d701f56 | |||
| 8b512cdadf | |||
| 3ae126836a | |||
| e8bfda6020 | |||
| 34c54cd459 | |||
| 3d873d98fb | |||
| fbe25b5add | |||
| 0c6c7c8fe7 | |||
| e266f9a66f | |||
| fde6e5ab39 | |||
| 67529825e2 | |||
| 738a7d5c24 | |||
| 83ec915d51 | |||
| e535099f36 | |||
| 16b5feadb7 | |||
| 960f47c4d4 | |||
| 51139de178 | |||
| 1f5167f1ca | |||
| 578ea34b3e | |||
| 5fb3d2f55c | |||
| d99d1e3518 | |||
| 5b387b68ba | |||
| f92a45dcc4 | |||
| c4b8e4845c | |||
| 87659dcd3a | |||
| 6fd9508017 | |||
| 113851a692 | |||
| 66c69d10fe | |||
| 781d49cd0e | |||
| aaae938f54 | |||
| 9e73f799b2 | |||
| 21a62130c8 | |||
| 68e47c81d4 | |||
| f11d8af936 | |||
| 74ec734d69 | |||
| 8c75803b70 | |||
| ff4239c7cf | |||
| cf5867b146 | |||
| 77481ab3ab | |||
| 9c53b3336a | |||
| 24481f0332 | |||
| 4e6b84bb41 | |||
| 65c3f0406c | |||
| 7fb8b30cc2 | |||
| acca3640f7 | |||
| 58836d84fe | |||
| ad56137a59 | |||
| 2828e321bc | |||
| 932781ea4e | |||
| 5200711441 | |||
| c21cea2038 | |||
| 6a0f448419 | |||
| 7d2f65671f | |||
| a0d5f81098 | |||
| 52f26f4643 | |||
| 313e92dd9b | |||
| fee757eb41 | |||
| b5ddc7ca05 | |||
| 534fa60b2a | |||
| 390b2b8f26 | |||
| 0283e4098f | |||
| 2cdba3d1e6 | |||
| eb0b37d7ee | |||
| 198e52e990 | |||
| a50ccf77f9 | |||
| deaf15a08b | |||
| 0d8791936e | |||
| 5d167cd772 | |||
| f35c5ed119 | |||
| fc46d6bb87 | |||
| 8252b1c5c0 | |||
| c802a6ffdd | |||
| 9b06734ced | |||
| 6ab4c1a6e9 | |||
| f631073ac2 | |||
| 8aabc2807c | |||
| d931c33ced | |||
| f4324e89d9 | |||
| f04c9e2937 | |||
| 1fc2889f98 | |||
| ee0c38da66 | |||
| c1806e1ab2 | |||
| 66d0d44a00 | |||
| 2078d88c28 | |||
| 7734ad7fcd | |||
| 1a47e136e3 | |||
| cbf04ee470 | |||
| ef0aecea3b | |||
| dfc5fa1f4d | |||
| f341dc03b8 | |||
| 4585edc20e | |||
| dba9158f9a | |||
| 82f572ff95 | |||
| 518a00630e | |||
| aa61ae24dc | |||
| fb950079ef | |||
| aec8c15e7e | |||
| 7c620bdc69 | |||
| e7dde69584 | |||
| d6eded1959 | |||
| 80f851922a | |||
| 17757930a3 | |||
| a8883905a7 | |||
| 8426cbbd02 | |||
| 0b759f559c | |||
| 2d5d10ecbf | |||
| 954bd5a1c2 | |||
| ccb1c269e8 | |||
| 6dfb0c245c | |||
| 72d1047a8f | |||
| bece37e6c8 | |||
| 59cb0eb8bc | |||
| fc56217eb3 | |||
| 723cf9443e | |||
| bd94b5dfb5 | |||
| ef59c5bab9 | |||
| 62b7c655c5 | |||
| b0b866c8fd | |||
| 3a831d0c28 | |||
| 9e323a9351 | |||
| 7ac95b759b | |||
| daea357940 | |||
| 4aa1abd8e5 | |||
| 922b5c652d | |||
| aaa97874c6 | |||
| 193d93d820 | |||
| 4058715df7 | |||
| 3f595029d7 | |||
| e8f5a4da56 | |||
| a9472e3652 | |||
| 4dd48b60f3 | |||
| e4ab8ba2de | |||
| a1f848bfe0 | |||
| f2309ff93e | |||
| 38be53cf31 | |||
| 65a06d62d8 | |||
| 10cbbb76f8 | |||
| 1c84d1b562 | |||
| 4eb7659499 | |||
| 46a61e5aff | |||
| da82566304 | |||
| c8b79dfed4 | |||
| da80fa40bc | |||
| 94dbd4aac9 | |||
| ca9f30e1a1 | |||
| 2e4295d5ca | |||
| d11b1628a1 | |||
| 45f9f428db | |||
| 902703d145 | |||
| 7ccca2143c | |||
| 70ce02faf4 | |||
| 3f1741c8c6 | |||
| 6c24ad7966 | |||
| 4846589599 | |||
| a24547aa66 | |||
| a04c5247ab | |||
| ed6a76dcc0 | |||
| a0ccbec8bd | |||
| 4693c5382a | |||
| ff3b4d0dcd | |||
| 62d35b1b73 | |||
| 91b609447d | |||
| c353840244 | |||
| f12b9fdcd4 | |||
| 80ede65bbe | |||
| 52cf186028 | |||
| ea0f1d47a5 | |||
| 9fe7c92217 | |||
| d353f7f7f8 | |||
| f3738b06f1 | |||
| 5a8bc88147 | |||
| 04ef5b2783 | |||
| c9ea22ef69 | |||
| 152111fd9d | |||
| 86f6da2f74 | |||
| 8c00cbc87a | |||
| 41e808f4e6 | |||
| bc0281040b | |||
| 341a7b1473 | |||
| c29c395390 | |||
| a23a0f230c | |||
| 2a88ce6be1 | |||
| 664b781d62 | |||
| 65571e5254 | |||
| aa30f20730 | |||
| b9b278d441 | |||
| e1d86cfee3 | |||
| 8ebd07337f | |||
| dd584d57b0 | |||
| 3d39b96c6f | |||
| 179091b1a4 | |||
| d14d92a900 | |||
| 1936ad82d2 | |||
| 8a09f07186 | |||
| df8d31451b | |||
| fc95d113c3 | |||
| 7d14455fbe | |||
| bbe6ed3b90 | |||
| 127af4e45c | |||
| 41cdba19ba | |||
| 0d9c1f1c3c | |||
| e650f0d368 | |||
| 067b4fc012 | |||
| 38ff2ffc01 | |||
| a9cc992d13 | |||
| 5cf2c97908 | |||
| 81fede0041 | |||
| 07a83f93d5 | |||
| 1a904edd94 | |||
| 906969fe4e | |||
| 776ea078a6 | |||
| fcdde26a7f | |||
| 79076ffb5f | |||
| e8dcdfb9f0 | |||
| c4f43a395d | |||
| a255c78b59 | |||
| 936f27e9e5 | |||
| 2616f651c9 | |||
| e8018fde83 | |||
| f514482c0a | |||
| e9ee9269f5 | |||
| cf18231713 | |||
| f48aed6d4a | |||
| b524cf0ec8 | |||
| 994517495f | |||
| 63781bde3f | |||
| 91d6fb8061 | |||
| 45f52e85d7 | |||
| 9aa8cfb73a | |||
| 79ca25ec7e | |||
| 6ff7cfe005 | |||
| 4e16936fa4 | |||
| 677c99b090 | |||
| 8e30a75e5c | |||
| b14052e5a2 | |||
| ddaed541ff | |||
| 1ee9c0b8d9 | |||
| 9b724b3b5e | |||
| 3b1ee769eb | |||
| 41cb94324a | |||
| 982ec24fa7 | |||
| 1f7a035340 | |||
| d04ae3f943 | |||
| abd19b0f48 | |||
| aa1251af9a | |||
| 483f3aa71d | |||
| 72bb79e8dd | |||
| 927a195008 | |||
| d13dc0c24d | |||
| 37ac7576f1 | |||
| c832e0b858 | |||
| 5d015e48c1 | |||
| b58e882eaa | |||
| 1bc33009c7 | |||
| cb731dce34 | |||
| 1595cdc48f | |||
| 4179ecd469 | |||
| cb14dafaca | |||
| c2567844ea | |||
| 757c5376be | |||
| 79968c37a8 | |||
| 2e00d8d3d4 | |||
| 0b456a18a3 | |||
| dd8e660f0a | |||
| 98ee3dee74 | |||
| d4b0cd8599 | |||
| 3398dac906 | |||
| 7eb25e0de6 | |||
| bed77ee28f | |||
| 56cd576876 | |||
| 4fbad2828c | |||
| e997bf6507 | |||
| 209b731541 | |||
| c47a38773c | |||
| fcd18d7d87 | |||
| fe9adbf0a5 | |||
| c7f7adf029 | |||
| c27172b3bc | |||
| a246949b77 | |||
| 0a954d720a | |||
| f89e55ec42 | |||
| 5fe8cf6018 | |||
| 4720849ac0 | |||
| d7721833e7 | |||
| 7332f1d0f3 | |||
| 2d101561f8 | |||
| 59590e9aae | |||
| bb9b9b8357 | |||
| a4b368e53f | |||
| c461261f0b | |||
| a1633e0a2f | |||
| 369add35b8 | |||
| 5abd0bbac1 | |||
| 2d89863fdd | |||
| 6cb3e08381 | |||
| 986b9cbb1a | |||
| 9c456adffd | |||
| c15b138839 | |||
| ff11348f7c | |||
| cbdabbb58f | |||
| cf0011be67 | |||
| 1f47001c82 | |||
| a914535344 | |||
| ba1063c2b9 | |||
| 2b4bca4447 | |||
| 11cf6ae313 | |||
| 88db5d90d1 | |||
| 209ef09dc3 | |||
| 370c8bc25b | |||
| e90a959b4d | |||
| ca320a8c30 | |||
| ae505e6165 | |||
| 63b5c2292d | |||
| 8d8a5f73b6 | |||
| d0fa66f4d5 | |||
| 9dd22e141b | |||
| b6c1ca828e | |||
| d367c7e226 | |||
| a3aa3f0d36 | |||
| 7b8752fe24 | |||
| 5e2c33e5b0 | |||
| e40be8e541 | |||
| 23d0b564d3 | |||
| ecaa9de843 | |||
| 2f74727bb9 | |||
| adbb038a87 | |||
| 3947da10ae | |||
| 4862be28ad | |||
| 035e8ed0f7 | |||
| cc167ae619 | |||
| f8847e7bcd | |||
| 3baebd709b | |||
| 3e6a4b2628 | |||
| 312635cb13 | |||
| 756d454122 | |||
| a4cab371fa | |||
| 0d7e52338e | |||
| 4110f7f5ce | |||
| 0af57ff772 | |||
| 0bd58038a8 | |||
| 0cbcfcfedf | |||
| fbdde0259a | |||
| d482173c9b | |||
| 929dc97509 | |||
| 30005c0203 | |||
| 382458ace7 | |||
| 4080f6a54a | |||
| 09570c7eef | |||
| 312f1a0477 | |||
| 1ca226e43b | |||
| 830cda6a3a | |||
| c66dbbe433 | |||
| 3b218b2dc0 | |||
| d58ef6127f | |||
| 55173c7201 | |||
| f860bdf0ad | |||
| 997627861a | |||
| 9f9d32d2cd | |||
| d55f44601a | |||
| abb6359547 | |||
| f55ff590d7 | |||
| 7d3bb3a2f9 | |||
| e6cb74b27f | |||
| 00f54c207e | |||
| d0dc56166c | |||
| e15e39f183 | |||
| 33f3e05b75 | |||
| b8bfbac2e5 | |||
| d5729e598f | |||
| f2c5ad170d | |||
| 0aa3c4cdae | |||
| f123587538 | |||
| a41a646909 | |||
| 787e0c6786 | |||
| 05ee1be1e9 | |||
| a0d630365c | |||
| b5b8032a56 | |||
| ccb9f0b0d7 | |||
| a0ab619aeb | |||
| 32349481ef | |||
| 2b9ed935f3 | |||
| 188c0f614b | |||
| dad97869b6 | |||
| 57c8a37285 | |||
| 9d0fed601d | |||
| fe32952825 | |||
| 5808aef28c | |||
| ca720bd811 | |||
| ba11312766 | |||
| c8bbf7452d | |||
| b08650bc4c | |||
| fb77f9917b | |||
| d874683ae4 | |||
| f9e5caa8ed | |||
| 99df0766fe | |||
| 3b50688228 | |||
| ffc095bd50 | |||
| 799c57287c | |||
| eef43fa25c | |||
| 5a4dfecfbe | |||
| 7f237fee16 | |||
| 30ae78755b | |||
| 2114e966d8 | |||
| 562349eb02 | |||
| 618d6bc924 | |||
| 762aa4b8c4 | |||
| 9cd09488ca | |||
| f2806a8332 | |||
| b6e34e3aa7 | |||
| 3ee9653170 | |||
| 6d1078b538 | |||
| 6e862553cb | |||
| b1baa91ff0 | |||
| b55c3d07dc | |||
| 2b3318cd3d | |||
| 434b55be70 | |||
| 98b4c67292 | |||
| 3d645ff31a | |||
| 5e8cd693a5 | |||
| 29f297b850 | |||
| 7235638607 | |||
| 00919fd599 | |||
| 43c0792ffd | |||
| 4b1b68c5fc | |||
| 3492f54c7a | |||
| da5cef0686 | |||
| 9098efb8aa | |||
| 421657f64b | |||
| 7ee5e0d152 | |||
| 22915223d4 | |||
| d7b4e84cda | |||
| e845d5f9f8 | |||
| 3d18284dd6 | |||
| 96783aa82c | |||
| a0c2da1219 | |||
| 79e2edc835 | |||
| 57b87fa9d9 | |||
| 153e430b00 | |||
| 3ccaa06031 | |||
| 569ab011c4 | |||
| 96b1538b3e | |||
| 735570486f | |||
| da68f541b6 | |||
| 83771e500c | |||
| a6d2119498 | |||
| 57b9f8cf52 | |||
| 5c3577c4c9 | |||
| 76118000c1 | |||
| 9433f64fe2 | |||
| d7c9611d45 | |||
| 79399f7f25 | |||
| 23522f1ea8 | |||
| 46dc3f1c48 | |||
| c9b156fa6d | |||
| 83939b1a63 | |||
| 7f08ba47d7 | |||
| ce3dd019c3 | |||
| 476c56868d | |||
| b9c4954c2f | |||
| a060672b31 | |||
| f022504ef9 | |||
| 1a78b8b295 | |||
| 017dd85ccf | |||
| 4c7b2ef46e | |||
| 597d88bf9a |
46
.github/ISSUE_TEMPLATE/agent_scenario_request.yml
vendored
Normal file
46
.github/ISSUE_TEMPLATE/agent_scenario_request.yml
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
name: "❤️🔥ᴬᴳᴱᴺᵀ Agent scenario request"
|
||||||
|
description: Propose a agent scenario request for RAGFlow.
|
||||||
|
title: "[Agent Scenario Request]: "
|
||||||
|
labels: ["❤️🔥ᴬᴳᴱᴺᵀ agent scenario"]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Self Checks
|
||||||
|
description: "Please check the following in order to be responded in time :)"
|
||||||
|
options:
|
||||||
|
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||||
|
required: true
|
||||||
|
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||||
|
required: true
|
||||||
|
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Is your feature request related to a scenario?
|
||||||
|
description: |
|
||||||
|
A clear and concise description of what the scenario is. Ex. I'm always frustrated when [...]
|
||||||
|
render: Markdown
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Describe the feature you'd like
|
||||||
|
description: A clear and concise description of what you want to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Documentation, adoption, use case
|
||||||
|
description: If you can, explain some scenarios how users might use this, situations it would be helpful in. Any API designs, mockups, or diagrams are also helpful.
|
||||||
|
render: Markdown
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Additional information
|
||||||
|
description: |
|
||||||
|
Add any other context or screenshots about the feature request here.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
82
.github/workflows/release.yml
vendored
82
.github/workflows/release.yml
vendored
@ -16,52 +16,52 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release:
|
release:
|
||||||
runs-on: [ "self-hosted", "overseas" ]
|
runs-on: [ "self-hosted", "ragflow-test" ]
|
||||||
steps:
|
steps:
|
||||||
- name: Ensure workspace ownership
|
- name: Ensure workspace ownership
|
||||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
run: echo "chown -R ${USER} ${GITHUB_WORKSPACE}" && sudo chown -R ${USER} ${GITHUB_WORKSPACE}
|
||||||
|
|
||||||
# https://github.com/actions/checkout/blob/v3/README.md
|
# https://github.com/actions/checkout/blob/v3/README.md
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
token: ${{ secrets.GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
fetch-tags: true
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Prepare release body
|
- name: Prepare release body
|
||||||
run: |
|
run: |
|
||||||
if [[ $GITHUB_EVENT_NAME == 'create' ]]; then
|
if [[ ${GITHUB_EVENT_NAME} == "create" ]]; then
|
||||||
RELEASE_TAG=${GITHUB_REF#refs/tags/}
|
RELEASE_TAG=${GITHUB_REF#refs/tags/}
|
||||||
if [[ $RELEASE_TAG == 'nightly' ]]; then
|
if [[ ${RELEASE_TAG} == "nightly" ]]; then
|
||||||
PRERELEASE=true
|
PRERELEASE=true
|
||||||
else
|
else
|
||||||
PRERELEASE=false
|
PRERELEASE=false
|
||||||
fi
|
fi
|
||||||
echo "Workflow triggered by create tag: $RELEASE_TAG"
|
echo "Workflow triggered by create tag: ${RELEASE_TAG}"
|
||||||
else
|
else
|
||||||
RELEASE_TAG=nightly
|
RELEASE_TAG=nightly
|
||||||
PRERELEASE=true
|
PRERELEASE=true
|
||||||
echo "Workflow triggered by schedule"
|
echo "Workflow triggered by schedule"
|
||||||
fi
|
fi
|
||||||
echo "RELEASE_TAG=$RELEASE_TAG" >> $GITHUB_ENV
|
echo "RELEASE_TAG=${RELEASE_TAG}" >> ${GITHUB_ENV}
|
||||||
echo "PRERELEASE=$PRERELEASE" >> $GITHUB_ENV
|
echo "PRERELEASE=${PRERELEASE}" >> ${GITHUB_ENV}
|
||||||
RELEASE_DATETIME=$(date --rfc-3339=seconds)
|
RELEASE_DATETIME=$(date --rfc-3339=seconds)
|
||||||
echo Release $RELEASE_TAG created from $GITHUB_SHA at $RELEASE_DATETIME > release_body.md
|
echo Release ${RELEASE_TAG} created from ${GITHUB_SHA} at ${RELEASE_DATETIME} > release_body.md
|
||||||
|
|
||||||
- name: Move the existing mutable tag
|
- name: Move the existing mutable tag
|
||||||
# https://github.com/softprops/action-gh-release/issues/171
|
# https://github.com/softprops/action-gh-release/issues/171
|
||||||
run: |
|
run: |
|
||||||
git fetch --tags
|
git fetch --tags
|
||||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
if [[ ${GITHUB_EVENT_NAME} == "schedule" ]]; then
|
||||||
# Determine if a given tag exists and matches a specific Git commit.
|
# Determine if a given tag exists and matches a specific Git commit.
|
||||||
# actions/checkout@v4 fetch-tags doesn't work when triggered by schedule
|
# actions/checkout@v4 fetch-tags doesn't work when triggered by schedule
|
||||||
if [ "$(git rev-parse -q --verify "refs/tags/$RELEASE_TAG")" = "$GITHUB_SHA" ]; then
|
if [ "$(git rev-parse -q --verify "refs/tags/${RELEASE_TAG}")" = "${GITHUB_SHA}" ]; then
|
||||||
echo "mutable tag $RELEASE_TAG exists and matches $GITHUB_SHA"
|
echo "mutable tag ${RELEASE_TAG} exists and matches ${GITHUB_SHA}"
|
||||||
else
|
else
|
||||||
git tag -f $RELEASE_TAG $GITHUB_SHA
|
git tag -f ${RELEASE_TAG} ${GITHUB_SHA}
|
||||||
git push -f origin $RELEASE_TAG:refs/tags/$RELEASE_TAG
|
git push -f origin ${RELEASE_TAG}:refs/tags/${RELEASE_TAG}
|
||||||
echo "created/moved mutable tag $RELEASE_TAG to $GITHUB_SHA"
|
echo "created/moved mutable tag ${RELEASE_TAG} to ${GITHUB_SHA}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -69,50 +69,26 @@ jobs:
|
|||||||
# https://github.com/actions/upload-release-asset has been replaced by https://github.com/softprops/action-gh-release
|
# https://github.com/actions/upload-release-asset has been replaced by https://github.com/softprops/action-gh-release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
token: ${{ secrets.GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||||
prerelease: ${{ env.PRERELEASE }}
|
prerelease: ${{ env.PRERELEASE }}
|
||||||
tag_name: ${{ env.RELEASE_TAG }}
|
tag_name: ${{ env.RELEASE_TAG }}
|
||||||
# The body field does not support environment variable substitution directly.
|
# The body field does not support environment variable substitution directly.
|
||||||
body_path: release_body.md
|
body_path: release_body.md
|
||||||
|
|
||||||
# https://github.com/marketplace/actions/docker-login
|
- name: Build and push ragflow-sdk
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: infiniflow
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
|
||||||
- name: Build and push full image
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}
|
|
||||||
file: Dockerfile
|
|
||||||
platforms: linux/amd64
|
|
||||||
|
|
||||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
|
||||||
- name: Build and push slim image
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim
|
|
||||||
file: Dockerfile
|
|
||||||
build-args: LIGHTEN=1
|
|
||||||
platforms: linux/amd64
|
|
||||||
|
|
||||||
- name: Build ragflow-sdk
|
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
run: |
|
run: |
|
||||||
cd sdk/python && \
|
cd sdk/python && uv build && uv publish --token ${{ secrets.PYPI_API_TOKEN }}
|
||||||
uv build
|
|
||||||
|
|
||||||
- name: Publish package distributions to PyPI
|
- name: Build and push ragflow-cli
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
run: |
|
||||||
with:
|
cd admin/client && uv build && uv publish --token ${{ secrets.PYPI_API_TOKEN }}
|
||||||
packages-dir: sdk/python/dist/
|
|
||||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
- name: Build and push image
|
||||||
verbose: true
|
run: |
|
||||||
|
sudo docker login --username infiniflow --password-stdin <<< ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
sudo docker build --build-arg NEED_MIRROR=1 -t infiniflow/ragflow:${RELEASE_TAG} -f Dockerfile .
|
||||||
|
sudo docker tag infiniflow/ragflow:${RELEASE_TAG} infiniflow/ragflow:latest
|
||||||
|
sudo docker push infiniflow/ragflow:${RELEASE_TAG}
|
||||||
|
sudo docker push infiniflow/ragflow:latest
|
||||||
|
|||||||
234
.github/workflows/tests.yml
vendored
234
.github/workflows/tests.yml
vendored
@ -9,8 +9,11 @@ on:
|
|||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
- '*.md'
|
- '*.md'
|
||||||
- '*.mdx'
|
- '*.mdx'
|
||||||
|
# The only difference between pull_request and pull_request_target is the context in which the workflow runs:
|
||||||
|
# — pull_request_target workflows use the workflow files from the default branch, and secrets are available.
|
||||||
|
# — pull_request workflows use the workflow files from the pull request branch, and secrets are unavailable.
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [ opened, synchronize, reopened, labeled ]
|
types: [ synchronize, ready_for_review ]
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'docs/**'
|
- 'docs/**'
|
||||||
- '*.md'
|
- '*.md'
|
||||||
@ -28,26 +31,63 @@ jobs:
|
|||||||
name: ragflow_tests
|
name: ragflow_tests
|
||||||
# https://docs.github.com/en/actions/using-jobs/using-conditions-to-control-job-execution
|
# https://docs.github.com/en/actions/using-jobs/using-conditions-to-control-job-execution
|
||||||
# https://github.com/orgs/community/discussions/26261
|
# https://github.com/orgs/community/discussions/26261
|
||||||
if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'ci') }}
|
if: ${{ github.event_name != 'pull_request' || (github.event.pull_request.draft == false && contains(github.event.pull_request.labels.*.name, 'ci')) }}
|
||||||
runs-on: [ "self-hosted", "debug" ]
|
runs-on: [ "self-hosted", "ragflow-test" ]
|
||||||
steps:
|
steps:
|
||||||
# https://github.com/hmarr/debug-action
|
# https://github.com/hmarr/debug-action
|
||||||
#- uses: hmarr/debug-action@v2
|
#- uses: hmarr/debug-action@v2
|
||||||
|
|
||||||
- name: Show who triggered this workflow
|
- name: Ensure workspace ownership
|
||||||
run: |
|
run: |
|
||||||
echo "Workflow triggered by ${{ github.event_name }}"
|
echo "Workflow triggered by ${{ github.event_name }}"
|
||||||
|
echo "chown -R ${USER} ${GITHUB_WORKSPACE}" && sudo chown -R ${USER} ${GITHUB_WORKSPACE}
|
||||||
- name: Ensure workspace ownership
|
|
||||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
|
||||||
|
|
||||||
# https://github.com/actions/checkout/issues/1781
|
# https://github.com/actions/checkout/issues/1781
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
ref: ${{ (github.event_name == 'pull_request' || github.event_name == 'pull_request_target') && format('refs/pull/{0}/merge', github.event.pull_request.number) || github.sha }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
fetch-tags: true
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Check workflow duplication
|
||||||
|
if: ${{ !cancelled() && !failure() }}
|
||||||
|
run: |
|
||||||
|
if [[ ${GITHUB_EVENT_NAME} != "pull_request" && ${GITHUB_EVENT_NAME} != "schedule" ]]; then
|
||||||
|
HEAD=$(git rev-parse HEAD)
|
||||||
|
# Find a PR that introduced a given commit
|
||||||
|
gh auth login --with-token <<< "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
PR_NUMBER=$(gh pr list --search ${HEAD} --state merged --json number --jq .[0].number)
|
||||||
|
echo "HEAD=${HEAD}"
|
||||||
|
echo "PR_NUMBER=${PR_NUMBER}"
|
||||||
|
if [[ -n "${PR_NUMBER}" ]]; then
|
||||||
|
PR_SHA_FP=${RUNNER_WORKSPACE_PREFIX}/artifacts/${GITHUB_REPOSITORY}/PR_${PR_NUMBER}
|
||||||
|
if [[ -f "${PR_SHA_FP}" ]]; then
|
||||||
|
read -r PR_SHA PR_RUN_ID < "${PR_SHA_FP}"
|
||||||
|
# Calculate the hash of the current workspace content
|
||||||
|
HEAD_SHA=$(git rev-parse HEAD^{tree})
|
||||||
|
if [[ "${HEAD_SHA}" == "${PR_SHA}" ]]; then
|
||||||
|
echo "Cancel myself since the workspace content hash is the same with PR #${PR_NUMBER} merged. See ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${PR_RUN_ID} for details."
|
||||||
|
gh run cancel ${GITHUB_RUN_ID}
|
||||||
|
while true; do
|
||||||
|
status=$(gh run view ${GITHUB_RUN_ID} --json status -q .status)
|
||||||
|
[ "${status}" = "completed" ] && break
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
elif [[ ${GITHUB_EVENT_NAME} == "pull_request" ]]; then
|
||||||
|
PR_NUMBER=${{ github.event.pull_request.number }}
|
||||||
|
PR_SHA_FP=${RUNNER_WORKSPACE_PREFIX}/artifacts/${GITHUB_REPOSITORY}/PR_${PR_NUMBER}
|
||||||
|
# Calculate the hash of the current workspace content
|
||||||
|
PR_SHA=$(git rev-parse HEAD^{tree})
|
||||||
|
echo "PR #${PR_NUMBER} workspace content hash: ${PR_SHA}"
|
||||||
|
mkdir -p ${RUNNER_WORKSPACE_PREFIX}/artifacts/${GITHUB_REPOSITORY}
|
||||||
|
echo "${PR_SHA} ${GITHUB_RUN_ID}" > ${PR_SHA_FP}
|
||||||
|
fi
|
||||||
|
|
||||||
# https://github.com/astral-sh/ruff-action
|
# https://github.com/astral-sh/ruff-action
|
||||||
- name: Static check with Ruff
|
- name: Static check with Ruff
|
||||||
uses: astral-sh/ruff-action@v3
|
uses: astral-sh/ruff-action@v3
|
||||||
@ -55,121 +95,185 @@ jobs:
|
|||||||
version: ">=0.11.x"
|
version: ">=0.11.x"
|
||||||
args: "check"
|
args: "check"
|
||||||
|
|
||||||
- name: Build ragflow:nightly-slim
|
- name: Check comments of changed Python files
|
||||||
|
if: ${{ false }}
|
||||||
run: |
|
run: |
|
||||||
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-$HOME}
|
if [[ ${{ github.event_name }} == 'pull_request' || ${{ github.event_name }} == 'pull_request_target' ]]; then
|
||||||
sudo docker pull ubuntu:22.04
|
CHANGED_FILES=$(git diff --name-only ${{ github.event.pull_request.base.sha }}...${{ github.event.pull_request.head.sha }} \
|
||||||
sudo docker build --progress=plain --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
| grep -E '\.(py)$' || true)
|
||||||
|
|
||||||
|
if [ -n "$CHANGED_FILES" ]; then
|
||||||
|
echo "Check comments of changed Python files with check_comment_ascii.py"
|
||||||
|
|
||||||
|
readarray -t files <<< "$CHANGED_FILES"
|
||||||
|
HAS_ERROR=0
|
||||||
|
|
||||||
|
for file in "${files[@]}"; do
|
||||||
|
if [ -f "$file" ]; then
|
||||||
|
if python3 check_comment_ascii.py "$file"; then
|
||||||
|
echo "✅ $file"
|
||||||
|
else
|
||||||
|
echo "❌ $file"
|
||||||
|
HAS_ERROR=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $HAS_ERROR -ne 0 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "No Python files changed"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run unit test
|
||||||
|
run: |
|
||||||
|
uv sync --python 3.10 --group test --frozen
|
||||||
|
source .venv/bin/activate
|
||||||
|
which pytest || echo "pytest not in PATH"
|
||||||
|
echo "Start to run unit test"
|
||||||
|
python3 run_tests.py
|
||||||
|
|
||||||
- name: Build ragflow:nightly
|
- name: Build ragflow:nightly
|
||||||
run: |
|
run: |
|
||||||
sudo docker build --progress=plain --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-${HOME}}
|
||||||
|
RAGFLOW_IMAGE=infiniflow/ragflow:${GITHUB_RUN_ID}
|
||||||
- name: Start ragflow:nightly-slim
|
echo "RAGFLOW_IMAGE=${RAGFLOW_IMAGE}" >> ${GITHUB_ENV}
|
||||||
run: |
|
sudo docker pull ubuntu:22.04
|
||||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
sudo DOCKER_BUILDKIT=1 docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t ${RAGFLOW_IMAGE} .
|
||||||
sudo docker compose -f docker/docker-compose.yml up -d
|
if [[ ${GITHUB_EVENT_NAME} == "schedule" ]]; then
|
||||||
|
export HTTP_API_TEST_LEVEL=p3
|
||||||
- name: Stop ragflow:nightly-slim
|
else
|
||||||
if: always() # always run this step even if previous steps failed
|
export HTTP_API_TEST_LEVEL=p2
|
||||||
run: |
|
fi
|
||||||
sudo docker compose -f docker/docker-compose.yml down -v
|
echo "HTTP_API_TEST_LEVEL=${HTTP_API_TEST_LEVEL}" >> ${GITHUB_ENV}
|
||||||
|
echo "RAGFLOW_CONTAINER=${GITHUB_RUN_ID}-ragflow-cpu-1" >> ${GITHUB_ENV}
|
||||||
|
|
||||||
- name: Start ragflow:nightly
|
- name: Start ragflow:nightly
|
||||||
run: |
|
run: |
|
||||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly" >> docker/.env
|
# Determine runner number (default to 1 if not found)
|
||||||
sudo docker compose -f docker/docker-compose.yml up -d
|
RUNNER_NUM=$(sudo docker inspect $(hostname) --format '{{index .Config.Labels "com.docker.compose.container-number"}}' 2>/dev/null || true)
|
||||||
|
RUNNER_NUM=${RUNNER_NUM:-1}
|
||||||
|
|
||||||
|
# Compute port numbers using bash arithmetic
|
||||||
|
ES_PORT=$((1200 + RUNNER_NUM * 10))
|
||||||
|
OS_PORT=$((1201 + RUNNER_NUM * 10))
|
||||||
|
INFINITY_THRIFT_PORT=$((23817 + RUNNER_NUM * 10))
|
||||||
|
INFINITY_HTTP_PORT=$((23820 + RUNNER_NUM * 10))
|
||||||
|
INFINITY_PSQL_PORT=$((5432 + RUNNER_NUM * 10))
|
||||||
|
MYSQL_PORT=$((5455 + RUNNER_NUM * 10))
|
||||||
|
MINIO_PORT=$((9000 + RUNNER_NUM * 10))
|
||||||
|
MINIO_CONSOLE_PORT=$((9001 + RUNNER_NUM * 10))
|
||||||
|
REDIS_PORT=$((6379 + RUNNER_NUM * 10))
|
||||||
|
TEI_PORT=$((6380 + RUNNER_NUM * 10))
|
||||||
|
KIBANA_PORT=$((6601 + RUNNER_NUM * 10))
|
||||||
|
SVR_HTTP_PORT=$((9380 + RUNNER_NUM * 10))
|
||||||
|
ADMIN_SVR_HTTP_PORT=$((9381 + RUNNER_NUM * 10))
|
||||||
|
SVR_MCP_PORT=$((9382 + RUNNER_NUM * 10))
|
||||||
|
SANDBOX_EXECUTOR_MANAGER_PORT=$((9385 + RUNNER_NUM * 10))
|
||||||
|
SVR_WEB_HTTP_PORT=$((80 + RUNNER_NUM * 10))
|
||||||
|
SVR_WEB_HTTPS_PORT=$((443 + RUNNER_NUM * 10))
|
||||||
|
|
||||||
|
# Persist computed ports into docker/.env so docker-compose uses the correct host bindings
|
||||||
|
echo "" >> docker/.env
|
||||||
|
echo -e "ES_PORT=${ES_PORT}" >> docker/.env
|
||||||
|
echo -e "OS_PORT=${OS_PORT}" >> docker/.env
|
||||||
|
echo -e "INFINITY_THRIFT_PORT=${INFINITY_THRIFT_PORT}" >> docker/.env
|
||||||
|
echo -e "INFINITY_HTTP_PORT=${INFINITY_HTTP_PORT}" >> docker/.env
|
||||||
|
echo -e "INFINITY_PSQL_PORT=${INFINITY_PSQL_PORT}" >> docker/.env
|
||||||
|
echo -e "MYSQL_PORT=${MYSQL_PORT}" >> docker/.env
|
||||||
|
echo -e "MINIO_PORT=${MINIO_PORT}" >> docker/.env
|
||||||
|
echo -e "MINIO_CONSOLE_PORT=${MINIO_CONSOLE_PORT}" >> docker/.env
|
||||||
|
echo -e "REDIS_PORT=${REDIS_PORT}" >> docker/.env
|
||||||
|
echo -e "TEI_PORT=${TEI_PORT}" >> docker/.env
|
||||||
|
echo -e "KIBANA_PORT=${KIBANA_PORT}" >> docker/.env
|
||||||
|
echo -e "SVR_HTTP_PORT=${SVR_HTTP_PORT}" >> docker/.env
|
||||||
|
echo -e "ADMIN_SVR_HTTP_PORT=${ADMIN_SVR_HTTP_PORT}" >> docker/.env
|
||||||
|
echo -e "SVR_MCP_PORT=${SVR_MCP_PORT}" >> docker/.env
|
||||||
|
echo -e "SANDBOX_EXECUTOR_MANAGER_PORT=${SANDBOX_EXECUTOR_MANAGER_PORT}" >> docker/.env
|
||||||
|
echo -e "SVR_WEB_HTTP_PORT=${SVR_WEB_HTTP_PORT}" >> docker/.env
|
||||||
|
echo -e "SVR_WEB_HTTPS_PORT=${SVR_WEB_HTTPS_PORT}" >> docker/.env
|
||||||
|
|
||||||
|
echo -e "COMPOSE_PROFILES=\${COMPOSE_PROFILES},tei-cpu" >> docker/.env
|
||||||
|
echo -e "TEI_MODEL=BAAI/bge-small-en-v1.5" >> docker/.env
|
||||||
|
echo -e "RAGFLOW_IMAGE=${RAGFLOW_IMAGE}" >> docker/.env
|
||||||
|
echo "HOST_ADDRESS=http://host.docker.internal:${SVR_HTTP_PORT}" >> ${GITHUB_ENV}
|
||||||
|
|
||||||
|
sudo docker compose -f docker/docker-compose.yml -p ${GITHUB_RUN_ID} up -d
|
||||||
|
uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv pip install sdk/python --group test
|
||||||
|
|
||||||
- name: Run sdk tests against Elasticsearch
|
- name: Run sdk tests against Elasticsearch
|
||||||
run: |
|
run: |
|
||||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
until sudo docker exec ${RAGFLOW_CONTAINER} curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
|
||||||
echo "Waiting for service to be available..."
|
echo "Waiting for service to be available..."
|
||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
source .venv/bin/activate && pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_sdk_api
|
||||||
export HTTP_API_TEST_LEVEL=p3
|
|
||||||
else
|
|
||||||
export HTTP_API_TEST_LEVEL=p2
|
|
||||||
fi
|
|
||||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv pip install sdk/python && uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_sdk_api
|
|
||||||
|
|
||||||
- name: Run frontend api tests against Elasticsearch
|
- name: Run frontend api tests against Elasticsearch
|
||||||
run: |
|
run: |
|
||||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
until sudo docker exec ${RAGFLOW_CONTAINER} curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
|
||||||
echo "Waiting for service to be available..."
|
echo "Waiting for service to be available..."
|
||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
cd sdk/python && UV_LINK_MODE=copy uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
source .venv/bin/activate && pytest -s --tb=short sdk/python/test/test_frontend_api/get_email.py sdk/python/test/test_frontend_api/test_dataset.py
|
||||||
|
|
||||||
- name: Run http api tests against Elasticsearch
|
- name: Run http api tests against Elasticsearch
|
||||||
run: |
|
run: |
|
||||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
until sudo docker exec ${RAGFLOW_CONTAINER} curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
|
||||||
echo "Waiting for service to be available..."
|
echo "Waiting for service to be available..."
|
||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
source .venv/bin/activate && pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_http_api
|
||||||
export HTTP_API_TEST_LEVEL=p3
|
|
||||||
else
|
|
||||||
export HTTP_API_TEST_LEVEL=p2
|
|
||||||
fi
|
|
||||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_http_api
|
|
||||||
|
|
||||||
- name: Stop ragflow:nightly
|
- name: Stop ragflow:nightly
|
||||||
if: always() # always run this step even if previous steps failed
|
if: always() # always run this step even if previous steps failed
|
||||||
run: |
|
run: |
|
||||||
sudo docker compose -f docker/docker-compose.yml down -v
|
sudo docker compose -f docker/docker-compose.yml -p ${GITHUB_RUN_ID} down -v || true
|
||||||
|
sudo docker ps -a --filter "label=com.docker.compose.project=${GITHUB_RUN_ID}" -q | xargs -r sudo docker rm -f
|
||||||
|
|
||||||
- name: Start ragflow:nightly
|
- name: Start ragflow:nightly
|
||||||
run: |
|
run: |
|
||||||
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml up -d
|
sed -i '1i DOC_ENGINE=infinity' docker/.env
|
||||||
|
sudo docker compose -f docker/docker-compose.yml -p ${GITHUB_RUN_ID} up -d
|
||||||
|
|
||||||
- name: Run sdk tests against Infinity
|
- name: Run sdk tests against Infinity
|
||||||
run: |
|
run: |
|
||||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
until sudo docker exec ${RAGFLOW_CONTAINER} curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
|
||||||
echo "Waiting for service to be available..."
|
echo "Waiting for service to be available..."
|
||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
source .venv/bin/activate && DOC_ENGINE=infinity pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_sdk_api
|
||||||
export HTTP_API_TEST_LEVEL=p3
|
|
||||||
else
|
|
||||||
export HTTP_API_TEST_LEVEL=p2
|
|
||||||
fi
|
|
||||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv pip install sdk/python && DOC_ENGINE=infinity uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_sdk_api
|
|
||||||
|
|
||||||
- name: Run frontend api tests against Infinity
|
- name: Run frontend api tests against Infinity
|
||||||
run: |
|
run: |
|
||||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
until sudo docker exec ${RAGFLOW_CONTAINER} curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
|
||||||
echo "Waiting for service to be available..."
|
echo "Waiting for service to be available..."
|
||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
cd sdk/python && UV_LINK_MODE=copy uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
source .venv/bin/activate && DOC_ENGINE=infinity pytest -s --tb=short sdk/python/test/test_frontend_api/get_email.py sdk/python/test/test_frontend_api/test_dataset.py
|
||||||
|
|
||||||
- name: Run http api tests against Infinity
|
- name: Run http api tests against Infinity
|
||||||
run: |
|
run: |
|
||||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
until sudo docker exec ${RAGFLOW_CONTAINER} curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
|
||||||
echo "Waiting for service to be available..."
|
echo "Waiting for service to be available..."
|
||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
source .venv/bin/activate && DOC_ENGINE=infinity pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_http_api
|
||||||
export HTTP_API_TEST_LEVEL=p3
|
|
||||||
else
|
|
||||||
export HTTP_API_TEST_LEVEL=p2
|
|
||||||
fi
|
|
||||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && DOC_ENGINE=infinity uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_http_api
|
|
||||||
|
|
||||||
- name: Stop ragflow:nightly
|
- name: Stop ragflow:nightly
|
||||||
if: always() # always run this step even if previous steps failed
|
if: always() # always run this step even if previous steps failed
|
||||||
run: |
|
run: |
|
||||||
sudo DOC_ENGINE=infinity docker compose -f docker/docker-compose.yml down -v
|
# Sometimes `docker compose down` fail due to hang container, heavy load etc. Need to remove such containers to release resources(for example, listen ports).
|
||||||
|
sudo docker compose -f docker/docker-compose.yml -p ${GITHUB_RUN_ID} down -v || true
|
||||||
|
sudo docker ps -a --filter "label=com.docker.compose.project=${GITHUB_RUN_ID}" -q | xargs -r sudo docker rm -f
|
||||||
|
if [[ -n ${RAGFLOW_IMAGE} ]]; then
|
||||||
|
sudo docker rmi -f ${RAGFLOW_IMAGE}
|
||||||
|
fi
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -149,7 +149,7 @@ out
|
|||||||
# Nuxt.js build / generate output
|
# Nuxt.js build / generate output
|
||||||
.nuxt
|
.nuxt
|
||||||
dist
|
dist
|
||||||
|
ragflow_cli.egg-info
|
||||||
# Gatsby files
|
# Gatsby files
|
||||||
.cache/
|
.cache/
|
||||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||||
|
|||||||
116
CLAUDE.md
Normal file
116
CLAUDE.md
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
RAGFlow is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document understanding. It's a full-stack application with:
|
||||||
|
- Python backend (Flask-based API server)
|
||||||
|
- React/TypeScript frontend (built with UmiJS)
|
||||||
|
- Microservices architecture with Docker deployment
|
||||||
|
- Multiple data stores (MySQL, Elasticsearch/Infinity, Redis, MinIO)
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Backend (`/api/`)
|
||||||
|
- **Main Server**: `api/ragflow_server.py` - Flask application entry point
|
||||||
|
- **Apps**: Modular Flask blueprints in `api/apps/` for different functionalities:
|
||||||
|
- `kb_app.py` - Knowledge base management
|
||||||
|
- `dialog_app.py` - Chat/conversation handling
|
||||||
|
- `document_app.py` - Document processing
|
||||||
|
- `canvas_app.py` - Agent workflow canvas
|
||||||
|
- `file_app.py` - File upload/management
|
||||||
|
- **Services**: Business logic in `api/db/services/`
|
||||||
|
- **Models**: Database models in `api/db/db_models.py`
|
||||||
|
|
||||||
|
### Core Processing (`/rag/`)
|
||||||
|
- **Document Processing**: `deepdoc/` - PDF parsing, OCR, layout analysis
|
||||||
|
- **LLM Integration**: `rag/llm/` - Model abstractions for chat, embedding, reranking
|
||||||
|
- **RAG Pipeline**: `rag/flow/` - Chunking, parsing, tokenization
|
||||||
|
- **Graph RAG**: `graphrag/` - Knowledge graph construction and querying
|
||||||
|
|
||||||
|
### Agent System (`/agent/`)
|
||||||
|
- **Components**: Modular workflow components (LLM, retrieval, categorize, etc.)
|
||||||
|
- **Templates**: Pre-built agent workflows in `agent/templates/`
|
||||||
|
- **Tools**: External API integrations (Tavily, Wikipedia, SQL execution, etc.)
|
||||||
|
|
||||||
|
### Frontend (`/web/`)
|
||||||
|
- React/TypeScript with UmiJS framework
|
||||||
|
- Ant Design + shadcn/ui components
|
||||||
|
- State management with Zustand
|
||||||
|
- Tailwind CSS for styling
|
||||||
|
|
||||||
|
## Common Development Commands
|
||||||
|
|
||||||
|
### Backend Development
|
||||||
|
```bash
|
||||||
|
# Install Python dependencies
|
||||||
|
uv sync --python 3.10 --all-extras
|
||||||
|
uv run download_deps.py
|
||||||
|
pre-commit install
|
||||||
|
|
||||||
|
# Start dependent services
|
||||||
|
docker compose -f docker/docker-compose-base.yml up -d
|
||||||
|
|
||||||
|
# Run backend (requires services to be running)
|
||||||
|
source .venv/bin/activate
|
||||||
|
export PYTHONPATH=$(pwd)
|
||||||
|
bash docker/launch_backend_service.sh
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
uv run pytest
|
||||||
|
|
||||||
|
# Linting
|
||||||
|
ruff check
|
||||||
|
ruff format
|
||||||
|
```
|
||||||
|
|
||||||
|
### Frontend Development
|
||||||
|
```bash
|
||||||
|
cd web
|
||||||
|
npm install
|
||||||
|
npm run dev # Development server
|
||||||
|
npm run build # Production build
|
||||||
|
npm run lint # ESLint
|
||||||
|
npm run test # Jest tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker Development
|
||||||
|
```bash
|
||||||
|
# Full stack with Docker
|
||||||
|
cd docker
|
||||||
|
docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# Check server status
|
||||||
|
docker logs -f ragflow-server
|
||||||
|
|
||||||
|
# Rebuild images
|
||||||
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
|
```
|
||||||
|
|
||||||
|
## Key Configuration Files
|
||||||
|
|
||||||
|
- `docker/.env` - Environment variables for Docker deployment
|
||||||
|
- `docker/service_conf.yaml.template` - Backend service configuration
|
||||||
|
- `pyproject.toml` - Python dependencies and project configuration
|
||||||
|
- `web/package.json` - Frontend dependencies and scripts
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
- **Python**: pytest with markers (p1/p2/p3 priority levels)
|
||||||
|
- **Frontend**: Jest with React Testing Library
|
||||||
|
- **API Tests**: HTTP API and SDK tests in `test/` and `sdk/python/test/`
|
||||||
|
|
||||||
|
## Database Engines
|
||||||
|
|
||||||
|
RAGFlow supports switching between Elasticsearch (default) and Infinity:
|
||||||
|
- Set `DOC_ENGINE=infinity` in `docker/.env` to use Infinity
|
||||||
|
- Requires container restart: `docker compose down -v && docker compose up -d`
|
||||||
|
|
||||||
|
## Development Environment Requirements
|
||||||
|
|
||||||
|
- Python 3.10-3.12
|
||||||
|
- Node.js >=18.20.4
|
||||||
|
- Docker & Docker Compose
|
||||||
|
- uv package manager
|
||||||
|
- 16GB+ RAM, 50GB+ disk space
|
||||||
44
Dockerfile
44
Dockerfile
@ -4,26 +4,16 @@ USER root
|
|||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
|
||||||
ARG NEED_MIRROR=0
|
ARG NEED_MIRROR=0
|
||||||
ARG LIGHTEN=0
|
|
||||||
ENV LIGHTEN=${LIGHTEN}
|
|
||||||
|
|
||||||
WORKDIR /ragflow
|
WORKDIR /ragflow
|
||||||
|
|
||||||
# Copy models downloaded via download_deps.py
|
# Copy models downloaded via download_deps.py
|
||||||
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
RUN mkdir -p /ragflow/rag/res/deepdoc /root/.ragflow
|
||||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
||||||
cp /huggingface.co/InfiniFlow/huqie/huqie.txt.trie /ragflow/rag/res/ && \
|
|
||||||
tar --exclude='.*' -cf - \
|
tar --exclude='.*' -cf - \
|
||||||
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
/huggingface.co/InfiniFlow/text_concat_xgb_v1.0 \
|
||||||
/huggingface.co/InfiniFlow/deepdoc \
|
/huggingface.co/InfiniFlow/deepdoc \
|
||||||
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
| tar -xf - --strip-components=3 -C /ragflow/rag/res/deepdoc
|
||||||
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co,target=/huggingface.co \
|
|
||||||
if [ "$LIGHTEN" != "1" ]; then \
|
|
||||||
(tar -cf - \
|
|
||||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
|
||||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
|
||||||
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# https://github.com/chrismattmann/tika-python
|
# https://github.com/chrismattmann/tika-python
|
||||||
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
# This is the only way to run python-tika without internet access. Without this set, the default is to check the tika version and pull latest every time from Apache.
|
||||||
@ -60,14 +50,16 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
|||||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||||
apt install -y libjemalloc-dev && \
|
apt install -y libjemalloc-dev && \
|
||||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less && \
|
apt install -y python3-pip pipx nginx unzip curl wget git vim less && \
|
||||||
apt install -y ghostscript
|
apt install -y ghostscript && \
|
||||||
|
apt install -y pandoc && \
|
||||||
|
apt install -y texlive
|
||||||
|
|
||||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
|
||||||
pip3 config set global.trusted-host mirrors.aliyun.com; \
|
pip3 config set global.trusted-host pypi.tuna.tsinghua.edu.cn; \
|
||||||
mkdir -p /etc/uv && \
|
mkdir -p /etc/uv && \
|
||||||
echo "[[index]]" > /etc/uv/uv.toml && \
|
echo "[[index]]" > /etc/uv/uv.toml && \
|
||||||
echo 'url = "https://mirrors.aliyun.com/pypi/simple"' >> /etc/uv/uv.toml && \
|
echo 'url = "https://pypi.tuna.tsinghua.edu.cn/simple"' >> /etc/uv/uv.toml && \
|
||||||
echo "default = true" >> /etc/uv/uv.toml; \
|
echo "default = true" >> /etc/uv/uv.toml; \
|
||||||
fi; \
|
fi; \
|
||||||
pipx install uv
|
pipx install uv
|
||||||
@ -86,12 +78,12 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
|||||||
# A modern version of cargo is needed for the latest version of the Rust compiler.
|
# A modern version of cargo is needed for the latest version of the Rust compiler.
|
||||||
RUN apt update && apt install -y curl build-essential \
|
RUN apt update && apt install -y curl build-essential \
|
||||||
&& if [ "$NEED_MIRROR" == "1" ]; then \
|
&& if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
# Use TUNA mirrors for rustup/rust dist files
|
# Use TUNA mirrors for rustup/rust dist files \
|
||||||
export RUSTUP_DIST_SERVER="https://mirrors.tuna.tsinghua.edu.cn/rustup"; \
|
export RUSTUP_DIST_SERVER="https://mirrors.tuna.tsinghua.edu.cn/rustup"; \
|
||||||
export RUSTUP_UPDATE_ROOT="https://mirrors.tuna.tsinghua.edu.cn/rustup/rustup"; \
|
export RUSTUP_UPDATE_ROOT="https://mirrors.tuna.tsinghua.edu.cn/rustup/rustup"; \
|
||||||
echo "Using TUNA mirrors for Rustup."; \
|
echo "Using TUNA mirrors for Rustup."; \
|
||||||
fi; \
|
fi; \
|
||||||
# Force curl to use HTTP/1.1
|
# Force curl to use HTTP/1.1 \
|
||||||
curl --proto '=https' --tlsv1.2 --http1.1 -sSf https://sh.rustup.rs | bash -s -- -y --profile minimal \
|
curl --proto '=https' --tlsv1.2 --http1.1 -sSf https://sh.rustup.rs | bash -s -- -y --profile minimal \
|
||||||
&& echo 'export PATH="/root/.cargo/bin:${PATH}"' >> /root/.bashrc
|
&& echo 'export PATH="/root/.cargo/bin:${PATH}"' >> /root/.bashrc
|
||||||
|
|
||||||
@ -151,15 +143,11 @@ COPY pyproject.toml uv.lock ./
|
|||||||
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
# uv records index url into uv.lock but doesn't failover among multiple indexes
|
||||||
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
RUN --mount=type=cache,id=ragflow_uv,target=/root/.cache/uv,sharing=locked \
|
||||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||||
sed -i 's|pypi.org|mirrors.aliyun.com/pypi|g' uv.lock; \
|
sed -i 's|pypi.org|pypi.tuna.tsinghua.edu.cn|g' uv.lock; \
|
||||||
else \
|
else \
|
||||||
sed -i 's|mirrors.aliyun.com/pypi|pypi.org|g' uv.lock; \
|
sed -i 's|pypi.tuna.tsinghua.edu.cn|pypi.org|g' uv.lock; \
|
||||||
fi; \
|
fi; \
|
||||||
if [ "$LIGHTEN" == "1" ]; then \
|
uv sync --python 3.10 --frozen
|
||||||
uv sync --python 3.10 --frozen; \
|
|
||||||
else \
|
|
||||||
uv sync --python 3.10 --frozen --all-extras; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
COPY web web
|
COPY web web
|
||||||
COPY docs docs
|
COPY docs docs
|
||||||
@ -169,11 +157,7 @@ RUN --mount=type=cache,id=ragflow_npm,target=/root/.npm,sharing=locked \
|
|||||||
COPY .git /ragflow/.git
|
COPY .git /ragflow/.git
|
||||||
|
|
||||||
RUN version_info=$(git describe --tags --match=v* --first-parent --always); \
|
RUN version_info=$(git describe --tags --match=v* --first-parent --always); \
|
||||||
if [ "$LIGHTEN" == "1" ]; then \
|
version_info="$version_info"; \
|
||||||
version_info="$version_info slim"; \
|
|
||||||
else \
|
|
||||||
version_info="$version_info full"; \
|
|
||||||
fi; \
|
|
||||||
echo "RAGFlow version: $version_info"; \
|
echo "RAGFlow version: $version_info"; \
|
||||||
echo $version_info > /ragflow/VERSION
|
echo $version_info > /ragflow/VERSION
|
||||||
|
|
||||||
@ -191,6 +175,7 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
|||||||
ENV PYTHONPATH=/ragflow/
|
ENV PYTHONPATH=/ragflow/
|
||||||
|
|
||||||
COPY web web
|
COPY web web
|
||||||
|
COPY admin admin
|
||||||
COPY api api
|
COPY api api
|
||||||
COPY conf conf
|
COPY conf conf
|
||||||
COPY deepdoc deepdoc
|
COPY deepdoc deepdoc
|
||||||
@ -201,6 +186,7 @@ COPY agentic_reasoning agentic_reasoning
|
|||||||
COPY pyproject.toml uv.lock ./
|
COPY pyproject.toml uv.lock ./
|
||||||
COPY mcp mcp
|
COPY mcp mcp
|
||||||
COPY plugin plugin
|
COPY plugin plugin
|
||||||
|
COPY common common
|
||||||
|
|
||||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||||
COPY docker/entrypoint.sh ./
|
COPY docker/entrypoint.sh ./
|
||||||
|
|||||||
14
Dockerfile_tei
Normal file
14
Dockerfile_tei
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
FROM ghcr.io/huggingface/text-embeddings-inference:cpu-1.8
|
||||||
|
|
||||||
|
# uv tool install huggingface_hub
|
||||||
|
# hf download --local-dir tei_data/BAAI/bge-small-en-v1.5 BAAI/bge-small-en-v1.5
|
||||||
|
# hf download --local-dir tei_data/BAAI/bge-m3 BAAI/bge-m3
|
||||||
|
# hf download --local-dir tei_data/Qwen/Qwen3-Embedding-0.6B Qwen/Qwen3-Embedding-0.6B
|
||||||
|
COPY tei_data /data
|
||||||
|
|
||||||
|
# curl -X POST http://localhost:6380/embed -H "Content-Type: application/json" -d '{"inputs": "Hello, world! This is a test sentence."}'
|
||||||
|
# curl -X POST http://tei:80/embed -H "Content-Type: application/json" -d '{"inputs": "Hello, world! This is a test sentence."}'
|
||||||
|
# [[-0.058816575,0.019564206,0.026697718,...]]
|
||||||
|
|
||||||
|
# curl -X POST http://localhost:6380/v1/embeddings -H "Content-Type: application/json" -d '{"input": "Hello, world! This is a test sentence."}'
|
||||||
|
# {"object":"list","data":[{"object":"embedding","embedding":[-0.058816575,0.019564206,...],"index":0}],"model":"BAAI/bge-small-en-v1.5","usage":{"prompt_tokens":12,"total_tokens":12}}
|
||||||
102
README.md
102
README.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
<img src="web/src/assets/logo-with-text.svg" width="520" alt="ragflow logo">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.20.1">
|
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.22.1">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -43,7 +43,9 @@
|
|||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
#
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/ragflow-octoverse.png" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
@ -59,8 +61,7 @@
|
|||||||
- 🔎 [System Architecture](#-system-architecture)
|
- 🔎 [System Architecture](#-system-architecture)
|
||||||
- 🎬 [Get Started](#-get-started)
|
- 🎬 [Get Started](#-get-started)
|
||||||
- 🔧 [Configurations](#-configurations)
|
- 🔧 [Configurations](#-configurations)
|
||||||
- 🔧 [Build a docker image without embedding models](#-build-a-docker-image-without-embedding-models)
|
- 🔧 [Build a Docker image](#-build-a-docker-image)
|
||||||
- 🔧 [Build a docker image including embedding models](#-build-a-docker-image-including-embedding-models)
|
|
||||||
- 🔨 [Launch service from source for development](#-launch-service-from-source-for-development)
|
- 🔨 [Launch service from source for development](#-launch-service-from-source-for-development)
|
||||||
- 📚 [Documentation](#-documentation)
|
- 📚 [Documentation](#-documentation)
|
||||||
- 📜 [Roadmap](#-roadmap)
|
- 📜 [Roadmap](#-roadmap)
|
||||||
@ -71,10 +72,7 @@
|
|||||||
|
|
||||||
## 💡 What is RAGFlow?
|
## 💡 What is RAGFlow?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document
|
[RAGFlow](https://ragflow.io/) is a leading open-source Retrieval-Augmented Generation (RAG) engine that fuses cutting-edge RAG with Agent capabilities to create a superior context layer for LLMs. It offers a streamlined RAG workflow adaptable to enterprises of any scale. Powered by a converged context engine and pre-built agent templates, RAGFlow enables developers to transform complex data into high-fidelity, production-ready AI systems with exceptional efficiency and precision.
|
||||||
understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models)
|
|
||||||
to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted
|
|
||||||
data.
|
|
||||||
|
|
||||||
## 🎮 Demo
|
## 🎮 Demo
|
||||||
|
|
||||||
@ -87,15 +85,15 @@ Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
## 🔥 Latest Updates
|
## 🔥 Latest Updates
|
||||||
|
|
||||||
|
- 2025-11-19 Supports Gemini 3 Pro.
|
||||||
|
- 2025-11-12 Supports data synchronization from Confluence, S3, Notion, Discord, Google Drive.
|
||||||
|
- 2025-10-23 Supports MinerU & Docling as document parsing methods.
|
||||||
|
- 2025-10-15 Supports orchestrable ingestion pipeline.
|
||||||
- 2025-08-08 Supports OpenAI's latest GPT-5 series models.
|
- 2025-08-08 Supports OpenAI's latest GPT-5 series models.
|
||||||
- 2025-08-04 Supports new models, including Kimi K2 and Grok 4.
|
|
||||||
- 2025-08-01 Supports agentic workflow and MCP.
|
- 2025-08-01 Supports agentic workflow and MCP.
|
||||||
- 2025-05-23 Adds a Python/JavaScript code executor component to Agent.
|
- 2025-05-23 Adds a Python/JavaScript code executor component to Agent.
|
||||||
- 2025-05-05 Supports cross-language query.
|
- 2025-05-05 Supports cross-language query.
|
||||||
- 2025-03-19 Supports using a multi-modal model to make sense of images within PDF or DOCX files.
|
- 2025-03-19 Supports using a multi-modal model to make sense of images within PDF or DOCX files.
|
||||||
- 2025-02-28 Combined with Internet search (Tavily), supports reasoning like Deep Research for any LLMs.
|
|
||||||
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
|
||||||
- 2024-08-22 Support text to SQL statements through RAG.
|
|
||||||
|
|
||||||
## 🎉 Stay Tuned
|
## 🎉 Stay Tuned
|
||||||
|
|
||||||
@ -138,7 +136,7 @@ releases! 🌟
|
|||||||
## 🔎 System Architecture
|
## 🔎 System Architecture
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎬 Get Started
|
## 🎬 Get Started
|
||||||
@ -177,41 +175,48 @@ releases! 🌟
|
|||||||
> ```bash
|
> ```bash
|
||||||
> vm.max_map_count=262144
|
> vm.max_map_count=262144
|
||||||
> ```
|
> ```
|
||||||
|
>
|
||||||
2. Clone the repo:
|
2. Clone the repo:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Start up the server using the pre-built Docker images:
|
3. Start up the server using the pre-built Docker images:
|
||||||
|
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||||
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||||
|
|
||||||
> The command below downloads the `v0.20.1-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.20.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.20.1` for the full edition `v0.20.1`.
|
> The command below downloads the `v0.22.1` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.22.1`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
# Use CPU for embedding and DeepDoc tasks:
|
|
||||||
|
# git checkout v0.22.1
|
||||||
|
# Optional: use a stable tag (see releases: https://github.com/infiniflow/ragflow/releases)
|
||||||
|
# This step ensures the **entrypoint.sh** file in the code matches the Docker image version.
|
||||||
|
|
||||||
|
# Use CPU for DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
# To use GPU to accelerate DeepDoc tasks:
|
||||||
# docker compose -f docker-compose-gpu.yml up -d
|
# sed -i '1i DEVICE=gpu' .env
|
||||||
```
|
# docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
> Note: Prior to `v0.22.0`, we provided both images with embedding models and slim images without embedding models. Details as follows:
|
||||||
|-------------------|-----------------|-----------------------|--------------------------|
|
|
||||||
| v0.20.1 | ≈9 | :heavy_check_mark: | Stable release |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
| v0.20.1-slim | ≈2 | ❌ | Stable release |
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
| v0.21.1 | ≈9 | ✔️ | Stable release |
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||||
|
|
||||||
|
> Starting with `v0.22.0`, we ship only the slim edition and no longer append the **-slim** suffix to the image tag.
|
||||||
|
|
||||||
4. Check the server status after having the server up and running:
|
4. Check the server status after having the server up and running:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f docker-ragflow-cpu-1
|
||||||
```
|
```
|
||||||
|
|
||||||
_The following output confirms a successful launch of the system:_
|
_The following output confirms a successful launch of the system:_
|
||||||
@ -229,14 +234,17 @@ releases! 🌟
|
|||||||
|
|
||||||
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
> If you skip this confirmation step and directly log in to RAGFlow, your browser may prompt a `network anormal`
|
||||||
> error because, at that moment, your RAGFlow may not be fully initialized.
|
> error because, at that moment, your RAGFlow may not be fully initialized.
|
||||||
|
>
|
||||||
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
5. In your web browser, enter the IP address of your server and log in to RAGFlow.
|
||||||
|
|
||||||
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default
|
> With the default settings, you only need to enter `http://IP_OF_YOUR_MACHINE` (**sans** port number) as the default
|
||||||
> HTTP serving port `80` can be omitted when using the default configurations.
|
> HTTP serving port `80` can be omitted when using the default configurations.
|
||||||
|
>
|
||||||
6. In [service_conf.yaml.template](./docker/service_conf.yaml.template), select the desired LLM factory in `user_default_llm` and update
|
6. In [service_conf.yaml.template](./docker/service_conf.yaml.template), select the desired LLM factory in `user_default_llm` and update
|
||||||
the `API_KEY` field with the corresponding API key.
|
the `API_KEY` field with the corresponding API key.
|
||||||
|
|
||||||
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
> See [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) for more information.
|
||||||
|
>
|
||||||
|
|
||||||
_The show is on!_
|
_The show is on!_
|
||||||
|
|
||||||
@ -275,7 +283,6 @@ RAGFlow uses Elasticsearch by default for storing full text and vectors. To swit
|
|||||||
> `-v` will delete the docker container volumes, and the existing data will be cleared.
|
> `-v` will delete the docker container volumes, and the existing data will be cleared.
|
||||||
|
|
||||||
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
2. Set `DOC_ENGINE` in **docker/.env** to `infinity`.
|
||||||
|
|
||||||
3. Start the containers:
|
3. Start the containers:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -285,20 +292,10 @@ RAGFlow uses Elasticsearch by default for storing full text and vectors. To swit
|
|||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> Switching to Infinity on a Linux/arm64 machine is not yet officially supported.
|
> Switching to Infinity on a Linux/arm64 machine is not yet officially supported.
|
||||||
|
|
||||||
## 🔧 Build a Docker image without embedding models
|
## 🔧 Build a Docker image
|
||||||
|
|
||||||
This image is approximately 2 GB in size and relies on external LLM and embedding services.
|
This image is approximately 2 GB in size and relies on external LLM and embedding services.
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
|
||||||
cd ragflow/
|
|
||||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 Build a Docker image including embedding models
|
|
||||||
|
|
||||||
This image is approximately 9 GB in size. As it includes embedding models, it relies on external LLM services only.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
@ -307,22 +304,20 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
|
|
||||||
## 🔨 Launch service from source for development
|
## 🔨 Launch service from source for development
|
||||||
|
|
||||||
1. Install uv, or skip this step if it is already installed:
|
1. Install `uv` and `pre-commit`, or skip this step if they are already installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv pre-commit
|
pipx install uv pre-commit
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Clone the source code and install Python dependencies:
|
2. Clone the source code and install Python dependencies:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
uv sync --python 3.10 # install RAGFlow dependent python modules
|
||||||
uv run download_deps.py
|
uv run download_deps.py
|
||||||
pre-commit install
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -334,22 +329,23 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
```
|
```
|
||||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
```
|
```
|
||||||
|
|
||||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. If your operating system does not have jemalloc, please install it as follows:
|
5. If your operating system does not have jemalloc, please install it as follows:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# ubuntu
|
# Ubuntu
|
||||||
sudo apt-get install libjemalloc-dev
|
sudo apt-get install libjemalloc-dev
|
||||||
# centos
|
# CentOS
|
||||||
sudo yum install jemalloc
|
sudo yum install jemalloc
|
||||||
|
# OpenSUSE
|
||||||
|
sudo zypper install jemalloc
|
||||||
|
# macOS
|
||||||
|
sudo brew install jemalloc
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Launch backend service:
|
6. Launch backend service:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -357,14 +353,12 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Install frontend dependencies:
|
7. Install frontend dependencies:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install
|
npm install
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Launch frontend service:
|
8. Launch frontend service:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -374,14 +368,12 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
_The following output confirms a successful launch of the system:_
|
_The following output confirms a successful launch of the system:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
9. Stop RAGFlow front-end and back-end service after development is complete:
|
9. Stop RAGFlow front-end and back-end service after development is complete:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pkill -f "ragflow_server.py|task_executor.py"
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 Documentation
|
## 📚 Documentation
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
|||||||
91
README_id.md
91
README_id.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="Logo ragflow">
|
<img src="web/src/assets/logo-with-text.svg" width="520" alt="Logo ragflow">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@
|
|||||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.20.1">
|
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.22.1">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||||
@ -43,7 +43,13 @@
|
|||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
#
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/ragflow-octoverse.png" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
|
</div>
|
||||||
|
|
||||||
<details open>
|
<details open>
|
||||||
<summary><b>📕 Daftar Isi </b> </summary>
|
<summary><b>📕 Daftar Isi </b> </summary>
|
||||||
@ -55,8 +61,7 @@
|
|||||||
- 🔎 [Arsitektur Sistem](#-arsitektur-sistem)
|
- 🔎 [Arsitektur Sistem](#-arsitektur-sistem)
|
||||||
- 🎬 [Mulai](#-mulai)
|
- 🎬 [Mulai](#-mulai)
|
||||||
- 🔧 [Konfigurasi](#-konfigurasi)
|
- 🔧 [Konfigurasi](#-konfigurasi)
|
||||||
- 🔧 [Membangun Image Docker tanpa Model Embedding](#-membangun-image-docker-tanpa-model-embedding)
|
- 🔧 [Membangun Image Docker](#-membangun-docker-image)
|
||||||
- 🔧 [Membangun Image Docker dengan Model Embedding](#-membangun-image-docker-dengan-model-embedding)
|
|
||||||
- 🔨 [Meluncurkan aplikasi dari Sumber untuk Pengembangan](#-meluncurkan-aplikasi-dari-sumber-untuk-pengembangan)
|
- 🔨 [Meluncurkan aplikasi dari Sumber untuk Pengembangan](#-meluncurkan-aplikasi-dari-sumber-untuk-pengembangan)
|
||||||
- 📚 [Dokumentasi](#-dokumentasi)
|
- 📚 [Dokumentasi](#-dokumentasi)
|
||||||
- 📜 [Peta Jalan](#-peta-jalan)
|
- 📜 [Peta Jalan](#-peta-jalan)
|
||||||
@ -67,7 +72,7 @@
|
|||||||
|
|
||||||
## 💡 Apa Itu RAGFlow?
|
## 💡 Apa Itu RAGFlow?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/) adalah mesin RAG (Retrieval-Augmented Generation) open-source berbasis pemahaman dokumen yang mendalam. Platform ini menyediakan alur kerja RAG yang efisien untuk bisnis dengan berbagai skala, menggabungkan LLM (Large Language Models) untuk menyediakan kemampuan tanya-jawab yang benar dan didukung oleh referensi dari data terstruktur kompleks.
|
[RAGFlow](https://ragflow.io/) adalah mesin RAG (Retrieval-Augmented Generation) open-source terkemuka yang mengintegrasikan teknologi RAG mutakhir dengan kemampuan Agent untuk menciptakan lapisan kontekstual superior bagi LLM. Menyediakan alur kerja RAG yang efisien dan dapat diadaptasi untuk perusahaan segala skala. Didukung oleh mesin konteks terkonvergensi dan template Agent yang telah dipra-bangun, RAGFlow memungkinkan pengembang mengubah data kompleks menjadi sistem AI kesetiaan-tinggi dan siap-produksi dengan efisiensi dan presisi yang luar biasa.
|
||||||
|
|
||||||
## 🎮 Demo
|
## 🎮 Demo
|
||||||
|
|
||||||
@ -80,13 +85,15 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
## 🔥 Pembaruan Terbaru
|
## 🔥 Pembaruan Terbaru
|
||||||
|
|
||||||
|
- 2025-11-19 Mendukung Gemini 3 Pro.
|
||||||
|
- 2025-11-12 Mendukung sinkronisasi data dari Confluence, S3, Notion, Discord, Google Drive.
|
||||||
|
- 2025-10-23 Mendukung MinerU & Docling sebagai metode penguraian dokumen.
|
||||||
|
- 2025-10-15 Dukungan untuk jalur data yang terorkestrasi.
|
||||||
- 2025-08-08 Mendukung model seri GPT-5 terbaru dari OpenAI.
|
- 2025-08-08 Mendukung model seri GPT-5 terbaru dari OpenAI.
|
||||||
- 2025-08-04 Mendukung model baru, termasuk Kimi K2 dan Grok 4.
|
|
||||||
- 2025-08-01 Mendukung alur kerja agen dan MCP.
|
- 2025-08-01 Mendukung alur kerja agen dan MCP.
|
||||||
- 2025-05-23 Menambahkan komponen pelaksana kode Python/JS ke Agen.
|
- 2025-05-23 Menambahkan komponen pelaksana kode Python/JS ke Agen.
|
||||||
- 2025-05-05 Mendukung kueri lintas bahasa.
|
- 2025-05-05 Mendukung kueri lintas bahasa.
|
||||||
- 2025-03-19 Mendukung penggunaan model multi-modal untuk memahami gambar di dalam file PDF atau DOCX.
|
- 2025-03-19 Mendukung penggunaan model multi-modal untuk memahami gambar di dalam file PDF atau DOCX.
|
||||||
- 2025-02-28 dikombinasikan dengan pencarian Internet (TAVILY), mendukung penelitian mendalam untuk LLM apa pun.
|
|
||||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||||
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||||
|
|
||||||
@ -129,7 +136,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
## 🔎 Arsitektur Sistem
|
## 🔎 Arsitektur Sistem
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎬 Mulai
|
## 🎬 Mulai
|
||||||
@ -168,41 +175,48 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
> ```bash
|
> ```bash
|
||||||
> vm.max_map_count=262144
|
> vm.max_map_count=262144
|
||||||
> ```
|
> ```
|
||||||
|
>
|
||||||
2. Clone repositori:
|
2. Clone repositori:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Bangun image Docker pre-built dan jalankan server:
|
3. Bangun image Docker pre-built dan jalankan server:
|
||||||
|
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||||
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||||
|
|
||||||
> Perintah di bawah ini mengunduh edisi v0.20.1-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.20.1-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.20.1 untuk edisi lengkap v0.20.1.
|
> Perintah di bawah ini mengunduh edisi v0.22.1 dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.22.1, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
# Use CPU for embedding and DeepDoc tasks:
|
|
||||||
$ docker compose -f docker-compose.yml up -d
|
# git checkout v0.22.1
|
||||||
|
# Opsional: gunakan tag stabil (lihat releases: https://github.com/infiniflow/ragflow/releases)
|
||||||
|
# This steps ensures the **entrypoint.sh** file in the code matches the Docker image version.
|
||||||
|
|
||||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
# Use CPU for DeepDoc tasks:
|
||||||
# docker compose -f docker-compose-gpu.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
|
# To use GPU to accelerate DeepDoc tasks:
|
||||||
|
# sed -i '1i DEVICE=gpu' .env
|
||||||
|
# docker compose -f docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> Catatan: Sebelum `v0.22.0`, kami menyediakan image dengan model embedding dan image slim tanpa model embedding. Detailnya sebagai berikut:
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
| v0.20.1 | ≈9 | :heavy_check_mark: | Stable release |
|
| v0.21.1 | ≈9 | ✔️ | Stable release |
|
||||||
| v0.20.1-slim | ≈2 | ❌ | Stable release |
|
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
> Mulai dari `v0.22.0`, kami hanya menyediakan edisi slim dan tidak lagi menambahkan akhiran **-slim** pada tag image.
|
||||||
|
|
||||||
1. Periksa status server setelah server aktif dan berjalan:
|
1. Periksa status server setelah server aktif dan berjalan:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f docker-ragflow-cpu-1
|
||||||
```
|
```
|
||||||
|
|
||||||
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||||
@ -220,14 +234,17 @@ $ docker compose -f docker-compose.yml up -d
|
|||||||
|
|
||||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||||
> karena RAGFlow mungkin belum sepenuhnya siap.
|
> karena RAGFlow mungkin belum sepenuhnya siap.
|
||||||
|
>
|
||||||
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||||
|
|
||||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||||
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||||
|
>
|
||||||
3. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
3. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||||
bidang `API_KEY` dengan kunci API yang sesuai.
|
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||||
|
|
||||||
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||||
|
>
|
||||||
|
|
||||||
_Sistem telah siap digunakan!_
|
_Sistem telah siap digunakan!_
|
||||||
|
|
||||||
@ -249,20 +266,10 @@ Pembaruan konfigurasi ini memerlukan reboot semua kontainer agar efektif:
|
|||||||
> $ docker compose -f docker-compose.yml up -d
|
> $ docker compose -f docker-compose.yml up -d
|
||||||
> ```
|
> ```
|
||||||
|
|
||||||
## 🔧 Membangun Docker Image tanpa Model Embedding
|
## 🔧 Membangun Docker Image
|
||||||
|
|
||||||
Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan embedding.
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
|
||||||
cd ragflow/
|
|
||||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
|
||||||
|
|
||||||
Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hanya bergantung pada aplikasi LLM eksternal.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
@ -271,22 +278,20 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
|
|
||||||
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||||
|
|
||||||
1. Instal uv, atau lewati langkah ini jika sudah terinstal:
|
1. Instal `uv` dan `pre-commit`, atau lewati langkah ini jika sudah terinstal:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv pre-commit
|
pipx install uv pre-commit
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Clone kode sumber dan instal dependensi Python:
|
2. Clone kode sumber dan instal dependensi Python:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
uv sync --python 3.10 # install RAGFlow dependent python modules
|
||||||
uv run download_deps.py
|
uv run download_deps.py
|
||||||
pre-commit install
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -298,13 +303,11 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
```
|
```
|
||||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Jika sistem operasi Anda tidak memiliki jemalloc, instal sebagai berikut:
|
5. Jika sistem operasi Anda tidak memiliki jemalloc, instal sebagai berikut:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -312,8 +315,9 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
sudo apt-get install libjemalloc-dev
|
sudo apt-get install libjemalloc-dev
|
||||||
# centos
|
# centos
|
||||||
sudo yum install jemalloc
|
sudo yum install jemalloc
|
||||||
|
# mac
|
||||||
|
sudo brew install jemalloc
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Jalankan aplikasi backend:
|
6. Jalankan aplikasi backend:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -321,14 +325,12 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Instal dependensi frontend:
|
7. Instal dependensi frontend:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install
|
npm install
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Jalankan aplikasi frontend:
|
8. Jalankan aplikasi frontend:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -338,15 +340,12 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
_Output berikut menandakan bahwa sistem berhasil diluncurkan:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
|
||||||
9. Hentikan layanan front-end dan back-end RAGFlow setelah pengembangan selesai:
|
9. Hentikan layanan front-end dan back-end RAGFlow setelah pengembangan selesai:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pkill -f "ragflow_server.py|task_executor.py"
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 Dokumentasi
|
## 📚 Dokumentasi
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
|||||||
101
README_ja.md
101
README_ja.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
<img src="web/src/assets/logo-with-text.svg" width="350" alt="ragflow logo">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.20.1">
|
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.22.1">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -43,11 +43,17 @@
|
|||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
#
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/ragflow-octoverse.png" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
|
</div>
|
||||||
|
|
||||||
## 💡 RAGFlow とは?
|
## 💡 RAGFlow とは?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
[RAGFlow](https://ragflow.io/) は、先進的なRAG(Retrieval-Augmented Generation)技術と Agent 機能を融合し、大規模言語モデル(LLM)に優れたコンテキスト層を構築する最先端のオープンソース RAG エンジンです。あらゆる規模の企業に対応可能な合理化された RAG ワークフローを提供し、統合型コンテキストエンジンと事前構築されたAgentテンプレートにより、開発者が複雑なデータを驚異的な効率性と精度で高精細なプロダクションレディAIシステムへ変換することを可能にします。
|
||||||
|
|
||||||
## 🎮 Demo
|
## 🎮 Demo
|
||||||
|
|
||||||
@ -60,13 +66,15 @@
|
|||||||
|
|
||||||
## 🔥 最新情報
|
## 🔥 最新情報
|
||||||
|
|
||||||
|
- 2025-11-19 Gemini 3 Proをサポートしています
|
||||||
|
- 2025-11-12 Confluence、S3、Notion、Discord、Google Drive からのデータ同期をサポートします。
|
||||||
|
- 2025-10-23 ドキュメント解析方法として MinerU と Docling をサポートします。
|
||||||
|
- 2025-10-15 オーケストレーションされたデータパイプラインのサポート。
|
||||||
- 2025-08-08 OpenAI の最新 GPT-5 シリーズモデルをサポートします。
|
- 2025-08-08 OpenAI の最新 GPT-5 シリーズモデルをサポートします。
|
||||||
- 2025-08-04 新モデル、キミK2およびGrok 4をサポート。
|
|
||||||
- 2025-08-01 エージェントワークフローとMCPをサポート。
|
- 2025-08-01 エージェントワークフローとMCPをサポート。
|
||||||
- 2025-05-23 エージェントに Python/JS コードエグゼキュータコンポーネントを追加しました。
|
- 2025-05-23 エージェントに Python/JS コードエグゼキュータコンポーネントを追加しました。
|
||||||
- 2025-05-05 言語間クエリをサポートしました。
|
- 2025-05-05 言語間クエリをサポートしました。
|
||||||
- 2025-03-19 PDFまたはDOCXファイル内の画像を理解するために、多モーダルモデルを使用することをサポートします。
|
- 2025-03-19 PDFまたはDOCXファイル内の画像を理解するために、多モーダルモデルを使用することをサポートします。
|
||||||
- 2025-02-28 インターネット検索 (TAVILY) と組み合わせて、あらゆる LLM の詳細な調査をサポートします。
|
|
||||||
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||||
|
|
||||||
@ -109,7 +117,7 @@
|
|||||||
## 🔎 システム構成
|
## 🔎 システム構成
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎬 初期設定
|
## 🎬 初期設定
|
||||||
@ -147,41 +155,48 @@
|
|||||||
> ```bash
|
> ```bash
|
||||||
> vm.max_map_count=262144
|
> vm.max_map_count=262144
|
||||||
> ```
|
> ```
|
||||||
|
>
|
||||||
2. リポジトリをクローンする:
|
2. リポジトリをクローンする:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||||
|
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||||
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||||
|
|
||||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.20.1-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.20.1-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.20.1 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.20.1 と設定します。
|
> 以下のコマンドは、RAGFlow Docker イメージの v0.22.1 エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.22.1 とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
# Use CPU for embedding and DeepDoc tasks:
|
|
||||||
|
# git checkout v0.22.1
|
||||||
|
# 任意: 安定版タグを利用 (一覧: https://github.com/infiniflow/ragflow/releases)
|
||||||
|
# この手順は、コード内の entrypoint.sh ファイルが Docker イメージのバージョンと一致していることを確認します。
|
||||||
|
|
||||||
|
# Use CPU for DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
# To use GPU to accelerate DeepDoc tasks:
|
||||||
# docker compose -f docker-compose-gpu.yml up -d
|
# sed -i '1i DEVICE=gpu' .env
|
||||||
```
|
# docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
> 注意:`v0.22.0` より前のバージョンでは、embedding モデルを含むイメージと、embedding モデルを含まない slim イメージの両方を提供していました。詳細は以下の通りです:
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
|
||||||
| v0.20.1 | ≈9 | :heavy_check_mark: | Stable release |
|
|
||||||
| v0.20.1-slim | ≈2 | ❌ | Stable release |
|
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
|
||||||
|
|
||||||
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
|
| v0.21.1 | ≈9 | ✔️ | Stable release |
|
||||||
|
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||||
|
|
||||||
|
> `v0.22.0` 以降、当プロジェクトでは slim エディションのみを提供し、イメージタグに **-slim** サフィックスを付けなくなりました。
|
||||||
|
|
||||||
|
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f docker-ragflow-cpu-1
|
||||||
```
|
```
|
||||||
|
|
||||||
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
_以下の出力は、システムが正常に起動したことを確認するものです:_
|
||||||
@ -197,12 +212,15 @@
|
|||||||
```
|
```
|
||||||
|
|
||||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||||
|
>
|
||||||
2. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
2. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||||
|
|
||||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||||
|
>
|
||||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||||
|
|
||||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||||
|
>
|
||||||
|
|
||||||
_これで初期設定完了!ショーの開幕です!_
|
_これで初期設定完了!ショーの開幕です!_
|
||||||
|
|
||||||
@ -231,33 +249,27 @@
|
|||||||
RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトルを保存します。[Infinity]に切り替え(https://github.com/infiniflow/infinity/)、次の手順に従います。
|
RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトルを保存します。[Infinity]に切り替え(https://github.com/infiniflow/infinity/)、次の手順に従います。
|
||||||
|
|
||||||
1. 実行中のすべてのコンテナを停止するには:
|
1. 実行中のすべてのコンテナを停止するには:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml down -v
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: `-v` は docker コンテナのボリュームを削除し、既存のデータをクリアします。
|
Note: `-v` は docker コンテナのボリュームを削除し、既存のデータをクリアします。
|
||||||
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
2. **docker/.env** の「DOC \_ ENGINE」を「infinity」に設定します。
|
||||||
|
|
||||||
3. 起動コンテナ:
|
3. 起動コンテナ:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||||
|
>
|
||||||
|
|
||||||
## 🔧 ソースコードで Docker イメージを作成(埋め込みモデルなし)
|
## 🔧 ソースコードで Docker イメージを作成
|
||||||
|
|
||||||
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
この Docker イメージのサイズは約 1GB で、外部の大モデルと埋め込みサービスに依存しています。
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
|
||||||
cd ragflow/
|
|
||||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 ソースコードをコンパイルした Docker イメージ(埋め込みモデルを含む)
|
|
||||||
|
|
||||||
この Docker のサイズは約 9GB で、埋め込みモデルを含むため、外部の大モデルサービスのみが必要です。
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
@ -266,22 +278,20 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
|
|
||||||
## 🔨 ソースコードからサービスを起動する方法
|
## 🔨 ソースコードからサービスを起動する方法
|
||||||
|
|
||||||
1. uv をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
1. `uv` と `pre-commit` をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv pre-commit
|
pipx install uv pre-commit
|
||||||
```
|
```
|
||||||
|
|
||||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
uv sync --python 3.10 # install RAGFlow dependent python modules
|
||||||
uv run download_deps.py
|
uv run download_deps.py
|
||||||
pre-commit install
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -293,22 +303,21 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
```
|
```
|
||||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
```
|
```
|
||||||
|
|
||||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. オペレーティングシステムにjemallocがない場合は、次のようにインストールします:
|
5. オペレーティングシステムにjemallocがない場合は、次のようにインストールします:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# ubuntu
|
# ubuntu
|
||||||
sudo apt-get install libjemalloc-dev
|
sudo apt-get install libjemalloc-dev
|
||||||
# centos
|
# centos
|
||||||
sudo yum install jemalloc
|
sudo yum install jemalloc
|
||||||
|
# mac
|
||||||
|
sudo brew install jemalloc
|
||||||
```
|
```
|
||||||
|
|
||||||
6. バックエンドサービスを起動する:
|
6. バックエンドサービスを起動する:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -316,14 +325,12 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
7. フロントエンドの依存関係をインストールする:
|
7. フロントエンドの依存関係をインストールする:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install
|
npm install
|
||||||
```
|
```
|
||||||
|
|
||||||
8. フロントエンドサービスを起動する:
|
8. フロントエンドサービスを起動する:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -333,14 +340,12 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
_以下の画面で、システムが正常に起動したことを示します:_
|
_以下の画面で、システムが正常に起動したことを示します:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
9. 開発が完了したら、RAGFlow のフロントエンド サービスとバックエンド サービスを停止します:
|
9. 開発が完了したら、RAGFlow のフロントエンド サービスとバックエンド サービスを停止します:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pkill -f "ragflow_server.py|task_executor.py"
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 ドキュメンテーション
|
## 📚 ドキュメンテーション
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
|||||||
75
README_ko.md
75
README_ko.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
<img src="web/src/assets/logo-with-text.svg" width="520" alt="ragflow logo">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.20.1">
|
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.22.1">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -43,11 +43,18 @@
|
|||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
#
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/ragflow-octoverse.png" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
## 💡 RAGFlow란?
|
## 💡 RAGFlow란?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
[RAGFlow](https://ragflow.io/) 는 최첨단 RAG(Retrieval-Augmented Generation)와 Agent 기능을 융합하여 대규모 언어 모델(LLM)을 위한 우수한 컨텍스트 계층을 생성하는 선도적인 오픈소스 RAG 엔진입니다. 모든 규모의 기업에 적용 가능한 효율적인 RAG 워크플로를 제공하며, 통합 컨텍스트 엔진과 사전 구축된 Agent 템플릿을 통해 개발자들이 복잡한 데이터를 예외적인 효율성과 정밀도로 고급 구현도의 프로덕션 준비 완료 AI 시스템으로 변환할 수 있도록 지원합니다.
|
||||||
|
|
||||||
## 🎮 데모
|
## 🎮 데모
|
||||||
|
|
||||||
@ -60,13 +67,15 @@
|
|||||||
|
|
||||||
## 🔥 업데이트
|
## 🔥 업데이트
|
||||||
|
|
||||||
|
- 2025-11-19 Gemini 3 Pro를 지원합니다.
|
||||||
|
- 2025-11-12 Confluence, S3, Notion, Discord, Google Drive에서 데이터 동기화를 지원합니다.
|
||||||
|
- 2025-10-23 문서 파싱 방법으로 MinerU 및 Docling을 지원합니다.
|
||||||
|
- 2025-10-15 조정된 데이터 파이프라인 지원.
|
||||||
- 2025-08-08 OpenAI의 최신 GPT-5 시리즈 모델을 지원합니다.
|
- 2025-08-08 OpenAI의 최신 GPT-5 시리즈 모델을 지원합니다.
|
||||||
- 2025-08-04 새로운 모델인 Kimi K2와 Grok 4를 포함하여 지원합니다.
|
|
||||||
- 2025-08-01 에이전트 워크플로우와 MCP를 지원합니다.
|
- 2025-08-01 에이전트 워크플로우와 MCP를 지원합니다.
|
||||||
- 2025-05-23 Agent에 Python/JS 코드 실행기 구성 요소를 추가합니다.
|
- 2025-05-23 Agent에 Python/JS 코드 실행기 구성 요소를 추가합니다.
|
||||||
- 2025-05-05 언어 간 쿼리를 지원합니다.
|
- 2025-05-05 언어 간 쿼리를 지원합니다.
|
||||||
- 2025-03-19 PDF 또는 DOCX 파일 내의 이미지를 이해하기 위해 다중 모드 모델을 사용하는 것을 지원합니다.
|
- 2025-03-19 PDF 또는 DOCX 파일 내의 이미지를 이해하기 위해 다중 모드 모델을 사용하는 것을 지원합니다.
|
||||||
- 2025-02-28 인터넷 검색(TAVILY)과 결합되어 모든 LLM에 대한 심층 연구를 지원합니다.
|
|
||||||
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||||
|
|
||||||
@ -109,7 +118,7 @@
|
|||||||
## 🔎 시스템 아키텍처
|
## 🔎 시스템 아키텍처
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎬 시작하기
|
## 🎬 시작하기
|
||||||
@ -160,28 +169,36 @@
|
|||||||
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||||
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||||
|
|
||||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.20.1-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.20.1-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.20.1을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.20.1로 설정합니다.
|
> 아래 명령어는 RAGFlow Docker 이미지의 v0.22.1 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.22.1과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
# Use CPU for embedding and DeepDoc tasks:
|
|
||||||
|
# git checkout v0.22.1
|
||||||
|
# Optional: use a stable tag (see releases: https://github.com/infiniflow/ragflow/releases)
|
||||||
|
# 이 단계는 코드의 entrypoint.sh 파일이 Docker 이미지 버전과 일치하도록 보장합니다.
|
||||||
|
|
||||||
|
# Use CPU for DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
# To use GPU to accelerate DeepDoc tasks:
|
||||||
# docker compose -f docker-compose-gpu.yml up -d
|
# sed -i '1i DEVICE=gpu' .env
|
||||||
```
|
# docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
> 참고: `v0.22.0` 이전 버전에서는 embedding 모델이 포함된 이미지와 embedding 모델이 포함되지 않은 slim 이미지를 모두 제공했습니다. 자세한 내용은 다음과 같습니다:
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
|
||||||
| v0.20.1 | ≈9 | :heavy_check_mark: | Stable release |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
| v0.20.1-slim | ≈2 | ❌ | Stable release |
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
| v0.21.1 | ≈9 | ✔️ | Stable release |
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||||
|
|
||||||
|
> `v0.22.0`부터는 slim 에디션만 배포하며 이미지 태그에 **-slim** 접미사를 더 이상 붙이지 않습니다.
|
||||||
|
|
||||||
1. 서버가 시작된 후 서버 상태를 확인하세요:
|
1. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f docker-ragflow-cpu-1
|
||||||
```
|
```
|
||||||
|
|
||||||
_다음 출력 결과로 시스템이 성공적으로 시작되었음을 확인합니다:_
|
_다음 출력 결과로 시스템이 성공적으로 시작되었음을 확인합니다:_
|
||||||
@ -243,20 +260,10 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
|||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
> Linux/arm64 시스템에서 Infinity로 전환하는 것은 공식적으로 지원되지 않습니다.
|
||||||
|
|
||||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함하지 않음)
|
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다
|
||||||
|
|
||||||
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
이 Docker 이미지의 크기는 약 1GB이며, 외부 대형 모델과 임베딩 서비스에 의존합니다.
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
|
||||||
cd ragflow/
|
|
||||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
|
||||||
|
|
||||||
이 Docker의 크기는 약 9GB이며, 이미 임베딩 모델을 포함하고 있으므로 외부 대형 모델 서비스에만 의존하면 됩니다.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
@ -265,7 +272,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
|
|
||||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||||
|
|
||||||
1. uv를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
1. `uv` 와 `pre-commit` 을 설치하거나, 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv pre-commit
|
pipx install uv pre-commit
|
||||||
@ -276,7 +283,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
uv sync --python 3.10 # install RAGFlow dependent python modules
|
||||||
uv run download_deps.py
|
uv run download_deps.py
|
||||||
pre-commit install
|
pre-commit install
|
||||||
```
|
```
|
||||||
@ -306,6 +313,8 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
sudo apt-get install libjemalloc-dev
|
sudo apt-get install libjemalloc-dev
|
||||||
# centos
|
# centos
|
||||||
sudo yum install jemalloc
|
sudo yum install jemalloc
|
||||||
|
# mac
|
||||||
|
sudo brew install jemalloc
|
||||||
```
|
```
|
||||||
|
|
||||||
6. 백엔드 서비스를 시작합니다:
|
6. 백엔드 서비스를 시작합니다:
|
||||||
@ -339,7 +348,7 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
```bash
|
```bash
|
||||||
pkill -f "ragflow_server.py|task_executor.py"
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 문서
|
## 📚 문서
|
||||||
|
|
||||||
|
|||||||
195
README_pt_br.md
195
README_pt_br.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
<img src="web/src/assets/logo-with-text.svg" width="520" alt="ragflow logo">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@
|
|||||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.20.1">
|
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.22.1">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||||
@ -43,7 +43,13 @@
|
|||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
#
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/ragflow-octoverse.png" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
|
</div>
|
||||||
|
|
||||||
<details open>
|
<details open>
|
||||||
<summary><b>📕 Índice</b></summary>
|
<summary><b>📕 Índice</b></summary>
|
||||||
@ -67,7 +73,7 @@
|
|||||||
|
|
||||||
## 💡 O que é o RAGFlow?
|
## 💡 O que é o RAGFlow?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/) é um mecanismo RAG (Geração Aumentada por Recuperação) de código aberto baseado em entendimento profundo de documentos. Ele oferece um fluxo de trabalho RAG simplificado para empresas de qualquer porte, combinando LLMs (Modelos de Linguagem de Grande Escala) para fornecer capacidades de perguntas e respostas verídicas, respaldadas por citações bem fundamentadas de diversos dados complexos formatados.
|
[RAGFlow](https://ragflow.io/) é um mecanismo de RAG (Retrieval-Augmented Generation) open-source líder que fusiona tecnologias RAG de ponta com funcionalidades Agent para criar uma camada contextual superior para LLMs. Oferece um fluxo de trabalho RAG otimizado adaptável a empresas de qualquer escala. Alimentado por um motor de contexto convergente e modelos Agent pré-construídos, o RAGFlow permite que desenvolvedores transformem dados complexos em sistemas de IA de alta fidelidade e pronto para produção com excepcional eficiência e precisão.
|
||||||
|
|
||||||
## 🎮 Demo
|
## 🎮 Demo
|
||||||
|
|
||||||
@ -80,13 +86,15 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
## 🔥 Últimas Atualizações
|
## 🔥 Últimas Atualizações
|
||||||
|
|
||||||
|
- 19-11-2025 Suporta Gemini 3 Pro.
|
||||||
|
- 12-11-2025 Suporta a sincronização de dados do Confluence, S3, Notion, Discord e Google Drive.
|
||||||
|
- 23-10-2025 Suporta MinerU e Docling como métodos de análise de documentos.
|
||||||
|
- 15-10-2025 Suporte para pipelines de dados orquestrados.
|
||||||
- 08-08-2025 Suporta a mais recente série GPT-5 da OpenAI.
|
- 08-08-2025 Suporta a mais recente série GPT-5 da OpenAI.
|
||||||
- 04-08-2025 Suporta novos modelos, incluindo Kimi K2 e Grok 4.
|
|
||||||
- 01-08-2025 Suporta fluxo de trabalho agente e MCP.
|
- 01-08-2025 Suporta fluxo de trabalho agente e MCP.
|
||||||
- 23-05-2025 Adicione o componente executor de código Python/JS ao Agente.
|
- 23-05-2025 Adicione o componente executor de código Python/JS ao Agente.
|
||||||
- 05-05-2025 Suporte a consultas entre idiomas.
|
- 05-05-2025 Suporte a consultas entre idiomas.
|
||||||
- 19-03-2025 Suporta o uso de um modelo multi-modal para entender imagens dentro de arquivos PDF ou DOCX.
|
- 19-03-2025 Suporta o uso de um modelo multi-modal para entender imagens dentro de arquivos PDF ou DOCX.
|
||||||
- 28-02-2025 combinado com a pesquisa na Internet (T AVI LY), suporta pesquisas profundas para qualquer LLM.
|
|
||||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||||
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
||||||
|
|
||||||
@ -129,7 +137,7 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
## 🔎 Arquitetura do Sistema
|
## 🔎 Arquitetura do Sistema
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎬 Primeiros Passos
|
## 🎬 Primeiros Passos
|
||||||
@ -147,84 +155,92 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
|||||||
|
|
||||||
### 🚀 Iniciar o servidor
|
### 🚀 Iniciar o servidor
|
||||||
|
|
||||||
1. Certifique-se de que `vm.max_map_count` >= 262144:
|
1. Certifique-se de que `vm.max_map_count` >= 262144:
|
||||||
|
|
||||||
> Para verificar o valor de `vm.max_map_count`:
|
> Para verificar o valor de `vm.max_map_count`:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> $ sysctl vm.max_map_count
|
> $ sysctl vm.max_map_count
|
||||||
> ```
|
> ```
|
||||||
>
|
>
|
||||||
> Se necessário, redefina `vm.max_map_count` para um valor de pelo menos 262144:
|
> Se necessário, redefina `vm.max_map_count` para um valor de pelo menos 262144:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> # Neste caso, defina para 262144:
|
> # Neste caso, defina para 262144:
|
||||||
> $ sudo sysctl -w vm.max_map_count=262144
|
> $ sudo sysctl -w vm.max_map_count=262144
|
||||||
> ```
|
> ```
|
||||||
>
|
>
|
||||||
> Essa mudança será resetada após a reinicialização do sistema. Para garantir que a alteração permaneça permanente, adicione ou atualize o valor de `vm.max_map_count` em **/etc/sysctl.conf**:
|
> Essa mudança será resetada após a reinicialização do sistema. Para garantir que a alteração permaneça permanente, adicione ou atualize o valor de `vm.max_map_count` em **/etc/sysctl.conf**:
|
||||||
>
|
>
|
||||||
> ```bash
|
> ```bash
|
||||||
> vm.max_map_count=262144
|
> vm.max_map_count=262144
|
||||||
> ```
|
> ```
|
||||||
|
>
|
||||||
|
2. Clone o repositório:
|
||||||
|
|
||||||
2. Clone o repositório:
|
```bash
|
||||||
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
```bash
|
```
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
||||||
```
|
|
||||||
|
|
||||||
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
|
||||||
|
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||||
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||||
|
|
||||||
> O comando abaixo baixa a edição `v0.20.1-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.20.1-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.20.1` para a edição completa `v0.20.1`.
|
> O comando abaixo baixa a edição`v0.22.1` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.22.1`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
# Use CPU for embedding and DeepDoc tasks:
|
|
||||||
$ docker compose -f docker-compose.yml up -d
|
# git checkout v0.22.1
|
||||||
|
# Opcional: use uma tag estável (veja releases: https://github.com/infiniflow/ragflow/releases)
|
||||||
|
# Esta etapa garante que o arquivo entrypoint.sh no código corresponda à versão da imagem do Docker.
|
||||||
|
|
||||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
# Use CPU for DeepDoc tasks:
|
||||||
# docker compose -f docker-compose-gpu.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
```
|
|
||||||
|
|
||||||
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
# To use GPU to accelerate DeepDoc tasks:
|
||||||
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
# sed -i '1i DEVICE=gpu' .env
|
||||||
| v0.20.1 | ~9 | :heavy_check_mark: | Lançamento estável |
|
# docker compose -f docker-compose.yml up -d
|
||||||
| v0.20.1-slim | ~2 | ❌ | Lançamento estável |
|
```
|
||||||
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
|
||||||
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
|
||||||
|
|
||||||
4. Verifique o status do servidor após tê-lo iniciado:
|
> Nota: Antes da `v0.22.0`, fornecíamos imagens com modelos de embedding e imagens slim sem modelos de embedding. Detalhes a seguir:
|
||||||
|
|
||||||
```bash
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
$ docker logs -f ragflow-server
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
```
|
| v0.21.1 | ≈9 | ✔️ | Stable release |
|
||||||
|
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||||
|
|
||||||
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
> A partir da `v0.22.0`, distribuímos apenas a edição slim e não adicionamos mais o sufixo **-slim** às tags das imagens.
|
||||||
|
|
||||||
```bash
|
4. Verifique o status do servidor após tê-lo iniciado:
|
||||||
____ ___ ______ ______ __
|
|
||||||
/ __ \ / | / ____// ____// /____ _ __
|
|
||||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
|
||||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
|
||||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
|
||||||
|
|
||||||
* Rodando em todos os endereços (0.0.0.0)
|
```bash
|
||||||
```
|
$ docker logs -f docker-ragflow-cpu-1
|
||||||
|
```
|
||||||
|
|
||||||
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
||||||
|
|
||||||
5. No seu navegador, insira o endereço IP do seu servidor e faça login no RAGFlow.
|
```bash
|
||||||
|
____ ___ ______ ______ __
|
||||||
|
/ __ \ / | / ____// ____// /____ _ __
|
||||||
|
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||||
|
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||||
|
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||||
|
|
||||||
> Com as configurações padrão, você só precisa digitar `http://IP_DO_SEU_MÁQUINA` (**sem** o número da porta), pois a porta HTTP padrão `80` pode ser omitida ao usar as configurações padrão.
|
* Rodando em todos os endereços (0.0.0.0)
|
||||||
|
```
|
||||||
|
|
||||||
6. Em [service_conf.yaml.template](./docker/service_conf.yaml.template), selecione a fábrica LLM desejada em `user_default_llm` e atualize o campo `API_KEY` com a chave de API correspondente.
|
> Se você pular essa etapa de confirmação e acessar diretamente o RAGFlow, seu navegador pode exibir um erro `network anormal`, pois, nesse momento, seu RAGFlow pode não estar totalmente inicializado.
|
||||||
|
>
|
||||||
|
5. No seu navegador, insira o endereço IP do seu servidor e faça login no RAGFlow.
|
||||||
|
|
||||||
> Consulte [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) para mais informações.
|
> Com as configurações padrão, você só precisa digitar `http://IP_DO_SEU_MÁQUINA` (**sem** o número da porta), pois a porta HTTP padrão `80` pode ser omitida ao usar as configurações padrão.
|
||||||
|
>
|
||||||
|
6. Em [service_conf.yaml.template](./docker/service_conf.yaml.template), selecione a fábrica LLM desejada em `user_default_llm` e atualize o campo `API_KEY` com a chave de API correspondente.
|
||||||
|
|
||||||
|
> Consulte [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) para mais informações.
|
||||||
|
>
|
||||||
|
|
||||||
_O show está no ar!_
|
_O show está no ar!_
|
||||||
|
|
||||||
@ -255,9 +271,9 @@ O RAGFlow usa o Elasticsearch por padrão para armazenar texto completo e vetore
|
|||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml down -v
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: `-v` irá deletar os volumes do contêiner, e os dados existentes serão apagados.
|
Note: `-v` irá deletar os volumes do contêiner, e os dados existentes serão apagados.
|
||||||
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
2. Defina `DOC_ENGINE` no **docker/.env** para `infinity`.
|
||||||
|
|
||||||
3. Inicie os contêineres:
|
3. Inicie os contêineres:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -265,22 +281,12 @@ O RAGFlow usa o Elasticsearch por padrão para armazenar texto completo e vetore
|
|||||||
```
|
```
|
||||||
|
|
||||||
> [!ATENÇÃO]
|
> [!ATENÇÃO]
|
||||||
> A mudança para o Infinity em uma máquina Linux/arm64 ainda não é oficialmente suportada.
|
> A mudança para o Infinity em uma máquina Linux/arm64 ainda não é oficialmente suportada.
|
||||||
|
|
||||||
## 🔧 Criar uma imagem Docker sem modelos de incorporação
|
## 🔧 Criar uma imagem Docker
|
||||||
|
|
||||||
Esta imagem tem cerca de 2 GB de tamanho e depende de serviços externos de LLM e incorporação.
|
Esta imagem tem cerca de 2 GB de tamanho e depende de serviços externos de LLM e incorporação.
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
|
||||||
cd ragflow/
|
|
||||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 Criar uma imagem Docker incluindo modelos de incorporação
|
|
||||||
|
|
||||||
Esta imagem tem cerca de 9 GB de tamanho. Como inclui modelos de incorporação, depende apenas de serviços externos de LLM.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
@ -289,22 +295,20 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
|
|
||||||
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
||||||
|
|
||||||
1. Instale o `uv`, ou pule esta etapa se ele já estiver instalado:
|
1. Instale o `uv` e o `pre-commit`, ou pule esta etapa se eles já estiverem instalados:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv pre-commit
|
pipx install uv pre-commit
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Clone o código-fonte e instale as dependências Python:
|
2. Clone o código-fonte e instale as dependências Python:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
uv sync --python 3.10 --all-extras # instala os módulos Python dependentes do RAGFlow
|
uv sync --python 3.10 # instala os módulos Python dependentes do RAGFlow
|
||||||
uv run download_deps.py
|
uv run download_deps.py
|
||||||
pre-commit install
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Inicie os serviços dependentes (MinIO, Elasticsearch, Redis e MySQL) usando Docker Compose:
|
3. Inicie os serviços dependentes (MinIO, Elasticsearch, Redis e MySQL) usando Docker Compose:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -316,22 +320,21 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
```
|
```
|
||||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Se não conseguir acessar o HuggingFace, defina a variável de ambiente `HF_ENDPOINT` para usar um site espelho:
|
4. Se não conseguir acessar o HuggingFace, defina a variável de ambiente `HF_ENDPOINT` para usar um site espelho:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Se o seu sistema operacional não tiver jemalloc, instale-o da seguinte maneira:
|
5. Se o seu sistema operacional não tiver jemalloc, instale-o da seguinte maneira:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# ubuntu
|
# ubuntu
|
||||||
sudo apt-get install libjemalloc-dev
|
sudo apt-get install libjemalloc-dev
|
||||||
# centos
|
# centos
|
||||||
sudo yum instalar jemalloc
|
sudo yum instalar jemalloc
|
||||||
```
|
# mac
|
||||||
|
sudo brew install jemalloc
|
||||||
|
```
|
||||||
6. Lance o serviço de back-end:
|
6. Lance o serviço de back-end:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -339,14 +342,12 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Instale as dependências do front-end:
|
7. Instale as dependências do front-end:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install
|
npm install
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Lance o serviço de front-end:
|
8. Lance o serviço de front-end:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -356,13 +357,11 @@ docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly
|
|||||||
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
_O seguinte resultado confirma o lançamento bem-sucedido do sistema:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
9. Pare os serviços de front-end e back-end do RAGFlow após a conclusão do desenvolvimento:
|
9. Pare os serviços de front-end e back-end do RAGFlow após a conclusão do desenvolvimento:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pkill -f "ragflow_server.py|task_executor.py"
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 Documentação
|
## 📚 Documentação
|
||||||
|
|
||||||
|
|||||||
105
README_tzh.md
105
README_tzh.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
<img src="web/src/assets/logo-with-text.svg" width="350" alt="ragflow logo">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.20.1">
|
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.22.1">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -43,7 +43,9 @@
|
|||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
#
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/ragflow-octoverse.png" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
@ -70,7 +72,7 @@
|
|||||||
|
|
||||||
## 💡 RAGFlow 是什麼?
|
## 💡 RAGFlow 是什麼?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/) 是一款基於深度文件理解所建構的開源 RAG(Retrieval-Augmented Generation)引擎。 RAGFlow 可以為各種規模的企業及個人提供一套精簡的 RAG 工作流程,結合大語言模型(LLM)針對用戶各類不同的複雜格式數據提供可靠的問答以及有理有據的引用。
|
[RAGFlow](https://ragflow.io/) 是一款領先的開源 RAG(Retrieval-Augmented Generation)引擎,通過融合前沿的 RAG 技術與 Agent 能力,為大型語言模型提供卓越的上下文層。它提供可適配任意規模企業的端到端 RAG 工作流,憑藉融合式上下文引擎與預置的 Agent 模板,助力開發者以極致效率與精度將複雜數據轉化為高可信、生產級的人工智能系統。
|
||||||
|
|
||||||
## 🎮 Demo 試用
|
## 🎮 Demo 試用
|
||||||
|
|
||||||
@ -83,13 +85,15 @@
|
|||||||
|
|
||||||
## 🔥 近期更新
|
## 🔥 近期更新
|
||||||
|
|
||||||
|
- 2025-11-19 支援 Gemini 3 Pro.
|
||||||
|
- 2025-11-12 支援從 Confluence、S3、Notion、Discord、Google Drive 進行資料同步。
|
||||||
|
- 2025-10-23 支援 MinerU 和 Docling 作為文件解析方法。
|
||||||
|
- 2025-10-15 支援可編排的資料管道。
|
||||||
- 2025-08-08 支援 OpenAI 最新的 GPT-5 系列模型。
|
- 2025-08-08 支援 OpenAI 最新的 GPT-5 系列模型。
|
||||||
- 2025-08-04 支援 Kimi K2 和 Grok 4 等模型.
|
|
||||||
- 2025-08-01 支援 agentic workflow 和 MCP
|
- 2025-08-01 支援 agentic workflow 和 MCP
|
||||||
- 2025-05-23 為 Agent 新增 Python/JS 程式碼執行器元件。
|
- 2025-05-23 為 Agent 新增 Python/JS 程式碼執行器元件。
|
||||||
- 2025-05-05 支援跨語言查詢。
|
- 2025-05-05 支援跨語言查詢。
|
||||||
- 2025-03-19 PDF和DOCX中的圖支持用多模態大模型去解析得到描述.
|
- 2025-03-19 PDF和DOCX中的圖支持用多模態大模型去解析得到描述.
|
||||||
- 2025-02-28 結合網路搜尋(Tavily),對於任意大模型實現類似 Deep Research 的推理功能.
|
|
||||||
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||||
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
||||||
|
|
||||||
@ -132,7 +136,7 @@
|
|||||||
## 🔎 系統架構
|
## 🔎 系統架構
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎬 快速開始
|
## 🎬 快速開始
|
||||||
@ -170,47 +174,54 @@
|
|||||||
> ```bash
|
> ```bash
|
||||||
> vm.max_map_count=262144
|
> vm.max_map_count=262144
|
||||||
> ```
|
> ```
|
||||||
|
>
|
||||||
2. 克隆倉庫:
|
2. 克隆倉庫:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/infiniflow/ragflow.git
|
$ git clone https://github.com/infiniflow/ragflow.git
|
||||||
```
|
```
|
||||||
|
|
||||||
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
||||||
|
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||||
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||||
|
|
||||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.20.1-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.20.1-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.20.1` 來下載 RAGFlow 鏡像的 `v0.20.1` 完整發行版。
|
> 執行以下指令會自動下載 RAGFlow Docker 映像 `v0.22.1`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.22.1` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
# Use CPU for embedding and DeepDoc tasks:
|
|
||||||
|
# git checkout v0.22.1
|
||||||
|
# 可選:使用穩定版標籤(查看發佈:https://github.com/infiniflow/ragflow/releases)
|
||||||
|
# 此步驟確保程式碼中的 entrypoint.sh 檔案與 Docker 映像版本一致。
|
||||||
|
|
||||||
|
# Use CPU for DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
# To use GPU to accelerate DeepDoc tasks:
|
||||||
# docker compose -f docker-compose-gpu.yml up -d
|
# sed -i '1i DEVICE=gpu' .env
|
||||||
```
|
# docker compose -f docker-compose.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
> 注意:在 `v0.22.0` 之前的版本,我們會同時提供包含 embedding 模型的映像和不含 embedding 模型的 slim 映像。具體如下:
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
|
||||||
| v0.20.1 | ≈9 | :heavy_check_mark: | Stable release |
|
|
||||||
| v0.20.1-slim | ≈2 | ❌ | Stable release |
|
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
|
||||||
|
|
||||||
> [!TIP]
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
> 如果你遇到 Docker 映像檔拉不下來的問題,可以在 **docker/.env** 檔案內根據變數 `RAGFLOW_IMAGE` 的註解提示選擇華為雲或阿里雲的對應映像。
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
>
|
| v0.21.1 | ≈9 | ✔️ | Stable release |
|
||||||
> - 華為雲鏡像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||||
> - 阿里雲鏡像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
|
||||||
|
> 從 `v0.22.0` 開始,我們只發佈 slim 版本,並且不再在映像標籤後附加 **-slim** 後綴。
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> 如果你遇到 Docker 映像檔拉不下來的問題,可以在 **docker/.env** 檔案內根據變數 `RAGFLOW_IMAGE` 的註解提示選擇華為雲或阿里雲的對應映像。
|
||||||
|
>
|
||||||
|
> - 華為雲鏡像名:`swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow`
|
||||||
|
> - 阿里雲鏡像名:`registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow`
|
||||||
|
|
||||||
4. 伺服器啟動成功後再次確認伺服器狀態:
|
4. 伺服器啟動成功後再次確認伺服器狀態:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f docker-ragflow-cpu-1
|
||||||
```
|
```
|
||||||
|
|
||||||
_出現以下介面提示說明伺服器啟動成功:_
|
_出現以下介面提示說明伺服器啟動成功:_
|
||||||
@ -226,12 +237,15 @@
|
|||||||
```
|
```
|
||||||
|
|
||||||
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
> 如果您跳過這一步驟系統確認步驟就登入 RAGFlow,你的瀏覽器有可能會提示 `network anormal` 或 `網路異常`,因為 RAGFlow 可能並未完全啟動成功。
|
||||||
|
>
|
||||||
5. 在你的瀏覽器中輸入你的伺服器對應的 IP 位址並登入 RAGFlow。
|
5. 在你的瀏覽器中輸入你的伺服器對應的 IP 位址並登入 RAGFlow。
|
||||||
|
|
||||||
> 上面這個範例中,您只需輸入 http://IP_OF_YOUR_MACHINE 即可:未改動過設定則無需輸入連接埠(預設的 HTTP 服務連接埠 80)。
|
> 上面這個範例中,您只需輸入 http://IP_OF_YOUR_MACHINE 即可:未改動過設定則無需輸入連接埠(預設的 HTTP 服務連接埠 80)。
|
||||||
|
>
|
||||||
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 檔案的 `user_default_llm` 欄位設定 LLM factory,並在 `API_KEY` 欄填入和你選擇的大模型相對應的 API key。
|
6. 在 [service_conf.yaml.template](./docker/service_conf.yaml.template) 檔案的 `user_default_llm` 欄位設定 LLM factory,並在 `API_KEY` 欄填入和你選擇的大模型相對應的 API key。
|
||||||
|
|
||||||
> 詳見 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
> 詳見 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)。
|
||||||
|
>
|
||||||
|
|
||||||
_好戲開始,接著奏樂接著舞! _
|
_好戲開始,接著奏樂接著舞! _
|
||||||
|
|
||||||
@ -249,7 +263,7 @@
|
|||||||
|
|
||||||
> [./docker/README](./docker/README.md) 解釋了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的環境變數設定和服務配置。
|
> [./docker/README](./docker/README.md) 解釋了 [service_conf.yaml.template](./docker/service_conf.yaml.template) 用到的環境變數設定和服務配置。
|
||||||
|
|
||||||
如需更新預設的 HTTP 服務連接埠(80), 可以在[docker-compose.yml](./docker/docker-compose.yml) 檔案中將配置`80:80` 改為`<YOUR_SERVING_PORT>:80` 。
|
如需更新預設的 HTTP 服務連接埠(80), 可以在[docker-compose.yml](./docker/docker-compose.yml) 檔案中將配置 `80:80` 改為 `<YOUR_SERVING_PORT>:80` 。
|
||||||
|
|
||||||
> 所有系統配置都需要透過系統重新啟動生效:
|
> 所有系統配置都需要透過系統重新啟動生效:
|
||||||
>
|
>
|
||||||
@ -266,10 +280,9 @@ RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換
|
|||||||
```bash
|
```bash
|
||||||
$ docker compose -f docker/docker-compose.yml down -v
|
$ docker compose -f docker/docker-compose.yml down -v
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: `-v` 將會刪除 docker 容器的 volumes,已有的資料會被清空。
|
Note: `-v` 將會刪除 docker 容器的 volumes,已有的資料會被清空。
|
||||||
|
|
||||||
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
2. 設定 **docker/.env** 目錄中的 `DOC_ENGINE` 為 `infinity`.
|
||||||
|
|
||||||
3. 啟動容器:
|
3. 啟動容器:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -279,45 +292,33 @@ RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換
|
|||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> Infinity 目前官方並未正式支援在 Linux/arm64 架構下的機器上運行.
|
> Infinity 目前官方並未正式支援在 Linux/arm64 架構下的機器上運行.
|
||||||
|
|
||||||
## 🔧 原始碼編譯 Docker 映像(不含 embedding 模型)
|
## 🔧 原始碼編譯 Docker 映像
|
||||||
|
|
||||||
本 Docker 映像大小約 2 GB 左右並且依賴外部的大模型和 embedding 服務。
|
本 Docker 映像大小約 2 GB 左右並且依賴外部的大模型和 embedding 服務。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 原始碼編譯 Docker 映像(包含 embedding 模型)
|
|
||||||
|
|
||||||
本 Docker 大小約 9 GB 左右。由於已包含 embedding 模型,所以只需依賴外部的大模型服務即可。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
|
||||||
cd ragflow/
|
|
||||||
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 以原始碼啟動服務
|
## 🔨 以原始碼啟動服務
|
||||||
|
|
||||||
1. 安裝 uv。如已安裝,可跳過此步驟:
|
1. 安裝 `uv` 和 `pre-commit`。如已安裝,可跳過此步驟:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv pre-commit
|
pipx install uv pre-commit
|
||||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 下載原始碼並安裝 Python 依賴:
|
2. 下載原始碼並安裝 Python 依賴:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
uv sync --python 3.10 # install RAGFlow dependent python modules
|
||||||
uv run download_deps.py
|
uv run download_deps.py
|
||||||
pre-commit install
|
pre-commit install
|
||||||
```
|
```
|
||||||
|
|
||||||
3. 透過 Docker Compose 啟動依賴的服務(MinIO, Elasticsearch, Redis, and MySQL):
|
3. 透過 Docker Compose 啟動依賴的服務(MinIO, Elasticsearch, Redis, and MySQL):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -329,13 +330,11 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
|||||||
```
|
```
|
||||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||||
```
|
```
|
||||||
|
|
||||||
4. 如果無法存取 HuggingFace,可以把環境變數 `HF_ENDPOINT` 設為對應的鏡像網站:
|
4. 如果無法存取 HuggingFace,可以把環境變數 `HF_ENDPOINT` 設為對應的鏡像網站:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export HF_ENDPOINT=https://hf-mirror.com
|
export HF_ENDPOINT=https://hf-mirror.com
|
||||||
```
|
```
|
||||||
|
|
||||||
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -343,8 +342,9 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
|||||||
sudo apt-get install libjemalloc-dev
|
sudo apt-get install libjemalloc-dev
|
||||||
# centos
|
# centos
|
||||||
sudo yum install jemalloc
|
sudo yum install jemalloc
|
||||||
|
# mac
|
||||||
|
sudo brew install jemalloc
|
||||||
```
|
```
|
||||||
|
|
||||||
6. 啟動後端服務:
|
6. 啟動後端服務:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -352,14 +352,12 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
|||||||
export PYTHONPATH=$(pwd)
|
export PYTHONPATH=$(pwd)
|
||||||
bash docker/launch_backend_service.sh
|
bash docker/launch_backend_service.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
7. 安裝前端依賴:
|
7. 安裝前端依賴:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd web
|
cd web
|
||||||
npm install
|
npm install
|
||||||
```
|
```
|
||||||
|
|
||||||
8. 啟動前端服務:
|
8. 啟動前端服務:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -369,15 +367,16 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
|||||||
以下界面說明系統已成功啟動:_
|
以下界面說明系統已成功啟動:_
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```
|
||||||
9. 開發完成後停止 RAGFlow 前端和後端服務:
|
9. 開發完成後停止 RAGFlow 前端和後端服務:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pkill -f "ragflow_server.py|task_executor.py"
|
pkill -f "ragflow_server.py|task_executor.py"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 📚 技術文檔
|
## 📚 技術文檔
|
||||||
|
|
||||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||||
|
|||||||
66
README_zh.md
66
README_zh.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://demo.ragflow.io/">
|
<a href="https://demo.ragflow.io/">
|
||||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
<img src="web/src/assets/logo-with-text.svg" width="350" alt="ragflow logo">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -22,7 +22,7 @@
|
|||||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.20.1">
|
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.22.1">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||||
@ -43,7 +43,9 @@
|
|||||||
<a href="https://demo.ragflow.io">Demo</a>
|
<a href="https://demo.ragflow.io">Demo</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
#
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
|
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/ragflow-octoverse.png" width="1200"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||||
@ -70,7 +72,7 @@
|
|||||||
|
|
||||||
## 💡 RAGFlow 是什么?
|
## 💡 RAGFlow 是什么?
|
||||||
|
|
||||||
[RAGFlow](https://ragflow.io/) 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。
|
[RAGFlow](https://ragflow.io/) 是一款领先的开源检索增强生成(RAG)引擎,通过融合前沿的 RAG 技术与 Agent 能力,为大型语言模型提供卓越的上下文层。它提供可适配任意规模企业的端到端 RAG 工作流,凭借融合式上下文引擎与预置的 Agent 模板,助力开发者以极致效率与精度将复杂数据转化为高可信、生产级的人工智能系统。
|
||||||
|
|
||||||
## 🎮 Demo 试用
|
## 🎮 Demo 试用
|
||||||
|
|
||||||
@ -83,13 +85,15 @@
|
|||||||
|
|
||||||
## 🔥 近期更新
|
## 🔥 近期更新
|
||||||
|
|
||||||
- 2025-08-08 支持 OpenAI 最新的 GPT-5 系列模型.
|
- 2025-11-19 支持 Gemini 3 Pro.
|
||||||
- 2025-08-04 新增对 Kimi K2 和 Grok 4 等模型的支持.
|
- 2025-11-12 支持从 Confluence、S3、Notion、Discord、Google Drive 进行数据同步。
|
||||||
|
- 2025-10-23 支持 MinerU 和 Docling 作为文档解析方法。
|
||||||
|
- 2025-10-15 支持可编排的数据管道。
|
||||||
|
- 2025-08-08 支持 OpenAI 最新的 GPT-5 系列模型。
|
||||||
- 2025-08-01 支持 agentic workflow 和 MCP。
|
- 2025-08-01 支持 agentic workflow 和 MCP。
|
||||||
- 2025-05-23 Agent 新增 Python/JS 代码执行器组件。
|
- 2025-05-23 Agent 新增 Python/JS 代码执行器组件。
|
||||||
- 2025-05-05 支持跨语言查询。
|
- 2025-05-05 支持跨语言查询。
|
||||||
- 2025-03-19 PDF 和 DOCX 中的图支持用多模态大模型去解析得到描述.
|
- 2025-03-19 PDF 和 DOCX 中的图支持用多模态大模型去解析得到描述.
|
||||||
- 2025-02-28 结合互联网搜索(Tavily),对于任意大模型实现类似 Deep Research 的推理功能.
|
|
||||||
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||||
|
|
||||||
@ -132,7 +136,7 @@
|
|||||||
## 🔎 系统架构
|
## 🔎 系统架构
|
||||||
|
|
||||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎬 快速开始
|
## 🎬 快速开始
|
||||||
@ -183,23 +187,31 @@
|
|||||||
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||||
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||||
|
|
||||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.20.1-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.20.1-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.20.1` 来下载 RAGFlow 镜像的 `v0.20.1` 完整发行版。
|
> 运行以下命令会自动下载 RAGFlow Docker 镜像 `v0.22.1`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.22.1` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cd ragflow/docker
|
$ cd ragflow/docker
|
||||||
# Use CPU for embedding and DeepDoc tasks:
|
|
||||||
|
# git checkout v0.22.1
|
||||||
|
# 可选:使用稳定版本标签(查看发布:https://github.com/infiniflow/ragflow/releases)
|
||||||
|
# 这一步确保代码中的 entrypoint.sh 文件与 Docker 镜像的版本保持一致。
|
||||||
|
|
||||||
|
# Use CPU for DeepDoc tasks:
|
||||||
$ docker compose -f docker-compose.yml up -d
|
$ docker compose -f docker-compose.yml up -d
|
||||||
|
|
||||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
# To use GPU to accelerate DeepDoc tasks:
|
||||||
# docker compose -f docker-compose-gpu.yml up -d
|
# sed -i '1i DEVICE=gpu' .env
|
||||||
|
# docker compose -f docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> 注意:在 `v0.22.0` 之前的版本,我们会同时提供包含 embedding 模型的镜像和不含 embedding 模型的 slim 镜像。具体如下:
|
||||||
|
|
||||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||||
| v0.20.1 | ≈9 | :heavy_check_mark: | Stable release |
|
| v0.21.1 | ≈9 | ✔️ | Stable release |
|
||||||
| v0.20.1-slim | ≈2 | ❌ | Stable release |
|
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
|
||||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
> 从 `v0.22.0` 开始,我们只发布 slim 版本,并且不再在镜像标签后附加 **-slim** 后缀。
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
> 如果你遇到 Docker 镜像拉不下来的问题,可以在 **docker/.env** 文件内根据变量 `RAGFLOW_IMAGE` 的注释提示选择华为云或者阿里云的相应镜像。
|
||||||
@ -210,7 +222,7 @@
|
|||||||
4. 服务器启动成功后再次确认服务器状态:
|
4. 服务器启动成功后再次确认服务器状态:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker logs -f ragflow-server
|
$ docker logs -f docker-ragflow-cpu-1
|
||||||
```
|
```
|
||||||
|
|
||||||
_出现以下界面提示说明服务器启动成功:_
|
_出现以下界面提示说明服务器启动成功:_
|
||||||
@ -279,29 +291,19 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
|||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
> Infinity 目前官方并未正式支持在 Linux/arm64 架构下的机器上运行.
|
> Infinity 目前官方并未正式支持在 Linux/arm64 架构下的机器上运行.
|
||||||
|
|
||||||
## 🔧 源码编译 Docker 镜像(不含 embedding 模型)
|
## 🔧 源码编译 Docker 镜像
|
||||||
|
|
||||||
本 Docker 镜像大小约 2 GB 左右并且依赖外部的大模型和 embedding 服务。
|
本 Docker 镜像大小约 2 GB 左右并且依赖外部的大模型和 embedding 服务。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||||
```
|
|
||||||
|
|
||||||
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
|
||||||
|
|
||||||
本 Docker 大小约 9 GB 左右。由于已包含 embedding 模型,所以只需依赖外部的大模型服务即可。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
|
||||||
cd ragflow/
|
|
||||||
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## 🔨 以源代码启动服务
|
## 🔨 以源代码启动服务
|
||||||
|
|
||||||
1. 安装 uv。如已经安装,可跳过本步骤:
|
1. 安装 `uv` 和 `pre-commit`。如已经安装,可跳过本步骤:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pipx install uv pre-commit
|
pipx install uv pre-commit
|
||||||
@ -313,7 +315,7 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
|||||||
```bash
|
```bash
|
||||||
git clone https://github.com/infiniflow/ragflow.git
|
git clone https://github.com/infiniflow/ragflow.git
|
||||||
cd ragflow/
|
cd ragflow/
|
||||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
uv sync --python 3.10 # install RAGFlow dependent python modules
|
||||||
uv run download_deps.py
|
uv run download_deps.py
|
||||||
pre-commit install
|
pre-commit install
|
||||||
```
|
```
|
||||||
@ -342,6 +344,8 @@ docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t i
|
|||||||
sudo apt-get install libjemalloc-dev
|
sudo apt-get install libjemalloc-dev
|
||||||
# centos
|
# centos
|
||||||
sudo yum install jemalloc
|
sudo yum install jemalloc
|
||||||
|
# mac
|
||||||
|
sudo brew install jemalloc
|
||||||
```
|
```
|
||||||
|
|
||||||
6. 启动后端服务:
|
6. 启动后端服务:
|
||||||
|
|||||||
47
admin/build_cli_release.sh
Executable file
47
admin/build_cli_release.sh
Executable file
@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🚀 Start building..."
|
||||||
|
echo "================================"
|
||||||
|
|
||||||
|
PROJECT_NAME="ragflow-cli"
|
||||||
|
|
||||||
|
RELEASE_DIR="release"
|
||||||
|
BUILD_DIR="dist"
|
||||||
|
SOURCE_DIR="src"
|
||||||
|
PACKAGE_DIR="ragflow_cli"
|
||||||
|
|
||||||
|
echo "🧹 Clean old build folder..."
|
||||||
|
rm -rf release/
|
||||||
|
|
||||||
|
echo "📁 Prepare source code..."
|
||||||
|
mkdir release/$PROJECT_NAME/$SOURCE_DIR -p
|
||||||
|
cp pyproject.toml release/$PROJECT_NAME/pyproject.toml
|
||||||
|
cp README.md release/$PROJECT_NAME/README.md
|
||||||
|
|
||||||
|
mkdir release/$PROJECT_NAME/$SOURCE_DIR/$PACKAGE_DIR -p
|
||||||
|
cp admin_client.py release/$PROJECT_NAME/$SOURCE_DIR/$PACKAGE_DIR/admin_client.py
|
||||||
|
|
||||||
|
if [ -d "release/$PROJECT_NAME/$SOURCE_DIR" ]; then
|
||||||
|
echo "✅ source dir: release/$PROJECT_NAME/$SOURCE_DIR"
|
||||||
|
else
|
||||||
|
echo "❌ source dir not exist: release/$PROJECT_NAME/$SOURCE_DIR"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🔨 Make build file..."
|
||||||
|
cd release/$PROJECT_NAME
|
||||||
|
export PYTHONPATH=$(pwd)
|
||||||
|
python -m build
|
||||||
|
|
||||||
|
echo "✅ check build result..."
|
||||||
|
if [ -d "$BUILD_DIR" ]; then
|
||||||
|
echo "📦 Package generated:"
|
||||||
|
ls -la $BUILD_DIR/
|
||||||
|
else
|
||||||
|
echo "❌ Build Failed: $BUILD_DIR not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "🎉 Build finished successfully!"
|
||||||
136
admin/client/README.md
Normal file
136
admin/client/README.md
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
# RAGFlow Admin Service & CLI
|
||||||
|
|
||||||
|
### Introduction
|
||||||
|
|
||||||
|
Admin Service is a dedicated management component designed to monitor, maintain, and administrate the RAGFlow system. It provides comprehensive tools for ensuring system stability, performing operational tasks, and managing users and permissions efficiently.
|
||||||
|
|
||||||
|
The service offers real-time monitoring of critical components, including the RAGFlow server, Task Executor processes, and dependent services such as MySQL, Infinity, Elasticsearch, Redis, and MinIO. It automatically checks their health status, resource usage, and uptime, and performs restarts in case of failures to minimize downtime.
|
||||||
|
|
||||||
|
For user and system management, it supports listing, creating, modifying, and deleting users and their associated resources like knowledge bases and Agents.
|
||||||
|
|
||||||
|
Built with scalability and reliability in mind, the Admin Service ensures smooth system operation and simplifies maintenance workflows.
|
||||||
|
|
||||||
|
It consists of a server-side Service and a command-line client (CLI), both implemented in Python. User commands are parsed using the Lark parsing toolkit.
|
||||||
|
|
||||||
|
- **Admin Service**: A backend service that interfaces with the RAGFlow system to execute administrative operations and monitor its status.
|
||||||
|
- **Admin CLI**: A command-line interface that allows users to connect to the Admin Service and issue commands for system management.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Starting the Admin Service
|
||||||
|
|
||||||
|
#### Launching from source code
|
||||||
|
|
||||||
|
1. Before start Admin Service, please make sure RAGFlow system is already started.
|
||||||
|
|
||||||
|
2. Launch from source code:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python admin/server/admin_server.py
|
||||||
|
```
|
||||||
|
The service will start and listen for incoming connections from the CLI on the configured port.
|
||||||
|
|
||||||
|
#### Using docker image
|
||||||
|
|
||||||
|
1. Before startup, please configure the `docker_compose.yml` file to enable admin server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
command:
|
||||||
|
- --enable-adminserver
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Start the containers, the service will start and listen for incoming connections from the CLI on the configured port.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Using the Admin CLI
|
||||||
|
|
||||||
|
1. Ensure the Admin Service is running.
|
||||||
|
2. Install ragflow-cli.
|
||||||
|
```bash
|
||||||
|
pip install ragflow-cli==0.22.1
|
||||||
|
```
|
||||||
|
3. Launch the CLI client:
|
||||||
|
```bash
|
||||||
|
ragflow-cli -h 127.0.0.1 -p 9381
|
||||||
|
```
|
||||||
|
You will be prompted to enter the superuser's password to log in.
|
||||||
|
The default password is admin.
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
|
||||||
|
- -h: RAGFlow admin server host address
|
||||||
|
|
||||||
|
- -p: RAGFlow admin server port
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Supported Commands
|
||||||
|
|
||||||
|
Commands are case-insensitive and must be terminated with a semicolon (`;`).
|
||||||
|
|
||||||
|
### Service Management Commands
|
||||||
|
|
||||||
|
- `LIST SERVICES;`
|
||||||
|
- Lists all available services within the RAGFlow system.
|
||||||
|
- `SHOW SERVICE <id>;`
|
||||||
|
- Shows detailed status information for the service identified by `<id>`.
|
||||||
|
|
||||||
|
|
||||||
|
### User Management Commands
|
||||||
|
|
||||||
|
- `LIST USERS;`
|
||||||
|
- Lists all users known to the system.
|
||||||
|
- `SHOW USER '<username>';`
|
||||||
|
- Shows details and permissions for the specified user. The username must be enclosed in single or double quotes.
|
||||||
|
|
||||||
|
- `CREATE USER <username> <password>;`
|
||||||
|
- Create user by username and password. The username and password must be enclosed in single or double quotes.
|
||||||
|
|
||||||
|
- `DROP USER '<username>';`
|
||||||
|
- Removes the specified user from the system. Use with caution.
|
||||||
|
- `ALTER USER PASSWORD '<username>' '<new_password>';`
|
||||||
|
- Changes the password for the specified user.
|
||||||
|
- `ALTER USER ACTIVE <username> <on/off>;`
|
||||||
|
- Changes the user to active or inactive.
|
||||||
|
|
||||||
|
|
||||||
|
### Data and Agent Commands
|
||||||
|
|
||||||
|
- `LIST DATASETS OF '<username>';`
|
||||||
|
- Lists the datasets associated with the specified user.
|
||||||
|
- `LIST AGENTS OF '<username>';`
|
||||||
|
- Lists the agents associated with the specified user.
|
||||||
|
|
||||||
|
### Meta-Commands
|
||||||
|
|
||||||
|
Meta-commands are prefixed with a backslash (`\`).
|
||||||
|
|
||||||
|
- `\?` or `\help`
|
||||||
|
- Shows help information for the available commands.
|
||||||
|
- `\q` or `\quit`
|
||||||
|
- Exits the CLI application.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
```commandline
|
||||||
|
admin> list users;
|
||||||
|
+-------------------------------+------------------------+-----------+-------------+
|
||||||
|
| create_date | email | is_active | nickname |
|
||||||
|
+-------------------------------+------------------------+-----------+-------------+
|
||||||
|
| Fri, 22 Nov 2024 16:03:41 GMT | jeffery@infiniflow.org | 1 | Jeffery |
|
||||||
|
| Fri, 22 Nov 2024 16:10:55 GMT | aya@infiniflow.org | 1 | Waterdancer |
|
||||||
|
+-------------------------------+------------------------+-----------+-------------+
|
||||||
|
|
||||||
|
admin> list services;
|
||||||
|
+-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+
|
||||||
|
| extra | host | id | name | port | service_type |
|
||||||
|
+-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+
|
||||||
|
| {} | 0.0.0.0 | 0 | ragflow_0 | 9380 | ragflow_server |
|
||||||
|
| {'meta_type': 'mysql', 'password': 'infini_rag_flow', 'username': 'root'} | localhost | 1 | mysql | 5455 | meta_data |
|
||||||
|
| {'password': 'infini_rag_flow', 'store_type': 'minio', 'user': 'rag_flow'} | localhost | 2 | minio | 9000 | file_store |
|
||||||
|
| {'password': 'infini_rag_flow', 'retrieval_type': 'elasticsearch', 'username': 'elastic'} | localhost | 3 | elasticsearch | 1200 | retrieval |
|
||||||
|
| {'db_name': 'default_db', 'retrieval_type': 'infinity'} | localhost | 4 | infinity | 23817 | retrieval |
|
||||||
|
| {'database': 1, 'mq_type': 'redis', 'password': 'infini_rag_flow'} | localhost | 5 | redis | 6379 | message_queue |
|
||||||
|
+-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+
|
||||||
|
```
|
||||||
978
admin/client/admin_client.py
Normal file
978
admin/client/admin_client.py
Normal file
@ -0,0 +1,978 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import base64
|
||||||
|
from cmd import Cmd
|
||||||
|
|
||||||
|
from Cryptodome.PublicKey import RSA
|
||||||
|
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
|
||||||
|
from typing import Dict, List, Any
|
||||||
|
from lark import Lark, Transformer, Tree
|
||||||
|
import requests
|
||||||
|
import getpass
|
||||||
|
|
||||||
|
GRAMMAR = r"""
|
||||||
|
start: command
|
||||||
|
|
||||||
|
command: sql_command | meta_command
|
||||||
|
|
||||||
|
sql_command: list_services
|
||||||
|
| show_service
|
||||||
|
| startup_service
|
||||||
|
| shutdown_service
|
||||||
|
| restart_service
|
||||||
|
| list_users
|
||||||
|
| show_user
|
||||||
|
| drop_user
|
||||||
|
| alter_user
|
||||||
|
| create_user
|
||||||
|
| activate_user
|
||||||
|
| list_datasets
|
||||||
|
| list_agents
|
||||||
|
| create_role
|
||||||
|
| drop_role
|
||||||
|
| alter_role
|
||||||
|
| list_roles
|
||||||
|
| show_role
|
||||||
|
| grant_permission
|
||||||
|
| revoke_permission
|
||||||
|
| alter_user_role
|
||||||
|
| show_user_permission
|
||||||
|
| show_version
|
||||||
|
|
||||||
|
// meta command definition
|
||||||
|
meta_command: "\\" meta_command_name [meta_args]
|
||||||
|
|
||||||
|
meta_command_name: /[a-zA-Z?]+/
|
||||||
|
meta_args: (meta_arg)+
|
||||||
|
|
||||||
|
meta_arg: /[^\\s"']+/ | quoted_string
|
||||||
|
|
||||||
|
// command definition
|
||||||
|
|
||||||
|
LIST: "LIST"i
|
||||||
|
SERVICES: "SERVICES"i
|
||||||
|
SHOW: "SHOW"i
|
||||||
|
CREATE: "CREATE"i
|
||||||
|
SERVICE: "SERVICE"i
|
||||||
|
SHUTDOWN: "SHUTDOWN"i
|
||||||
|
STARTUP: "STARTUP"i
|
||||||
|
RESTART: "RESTART"i
|
||||||
|
USERS: "USERS"i
|
||||||
|
DROP: "DROP"i
|
||||||
|
USER: "USER"i
|
||||||
|
ALTER: "ALTER"i
|
||||||
|
ACTIVE: "ACTIVE"i
|
||||||
|
PASSWORD: "PASSWORD"i
|
||||||
|
DATASETS: "DATASETS"i
|
||||||
|
OF: "OF"i
|
||||||
|
AGENTS: "AGENTS"i
|
||||||
|
ROLE: "ROLE"i
|
||||||
|
ROLES: "ROLES"i
|
||||||
|
DESCRIPTION: "DESCRIPTION"i
|
||||||
|
GRANT: "GRANT"i
|
||||||
|
REVOKE: "REVOKE"i
|
||||||
|
ALL: "ALL"i
|
||||||
|
PERMISSION: "PERMISSION"i
|
||||||
|
TO: "TO"i
|
||||||
|
FROM: "FROM"i
|
||||||
|
FOR: "FOR"i
|
||||||
|
RESOURCES: "RESOURCES"i
|
||||||
|
ON: "ON"i
|
||||||
|
SET: "SET"i
|
||||||
|
VERSION: "VERSION"i
|
||||||
|
|
||||||
|
list_services: LIST SERVICES ";"
|
||||||
|
show_service: SHOW SERVICE NUMBER ";"
|
||||||
|
startup_service: STARTUP SERVICE NUMBER ";"
|
||||||
|
shutdown_service: SHUTDOWN SERVICE NUMBER ";"
|
||||||
|
restart_service: RESTART SERVICE NUMBER ";"
|
||||||
|
|
||||||
|
list_users: LIST USERS ";"
|
||||||
|
drop_user: DROP USER quoted_string ";"
|
||||||
|
alter_user: ALTER USER PASSWORD quoted_string quoted_string ";"
|
||||||
|
show_user: SHOW USER quoted_string ";"
|
||||||
|
create_user: CREATE USER quoted_string quoted_string ";"
|
||||||
|
activate_user: ALTER USER ACTIVE quoted_string status ";"
|
||||||
|
|
||||||
|
list_datasets: LIST DATASETS OF quoted_string ";"
|
||||||
|
list_agents: LIST AGENTS OF quoted_string ";"
|
||||||
|
|
||||||
|
create_role: CREATE ROLE identifier [DESCRIPTION quoted_string] ";"
|
||||||
|
drop_role: DROP ROLE identifier ";"
|
||||||
|
alter_role: ALTER ROLE identifier SET DESCRIPTION quoted_string ";"
|
||||||
|
list_roles: LIST ROLES ";"
|
||||||
|
show_role: SHOW ROLE identifier ";"
|
||||||
|
|
||||||
|
grant_permission: GRANT action_list ON identifier TO ROLE identifier ";"
|
||||||
|
revoke_permission: REVOKE action_list ON identifier FROM ROLE identifier ";"
|
||||||
|
alter_user_role: ALTER USER quoted_string SET ROLE identifier ";"
|
||||||
|
show_user_permission: SHOW USER PERMISSION quoted_string ";"
|
||||||
|
|
||||||
|
show_version: SHOW VERSION ";"
|
||||||
|
|
||||||
|
action_list: identifier ("," identifier)*
|
||||||
|
|
||||||
|
identifier: WORD
|
||||||
|
quoted_string: QUOTED_STRING
|
||||||
|
status: WORD
|
||||||
|
|
||||||
|
QUOTED_STRING: /'[^']+'/ | /"[^"]+"/
|
||||||
|
WORD: /[a-zA-Z0-9_\-\.]+/
|
||||||
|
NUMBER: /[0-9]+/
|
||||||
|
|
||||||
|
%import common.WS
|
||||||
|
%ignore WS
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class AdminTransformer(Transformer):
|
||||||
|
|
||||||
|
def start(self, items):
|
||||||
|
return items[0]
|
||||||
|
|
||||||
|
def command(self, items):
|
||||||
|
return items[0]
|
||||||
|
|
||||||
|
def list_services(self, items):
|
||||||
|
result = {'type': 'list_services'}
|
||||||
|
return result
|
||||||
|
|
||||||
|
def show_service(self, items):
|
||||||
|
service_id = int(items[2])
|
||||||
|
return {"type": "show_service", "number": service_id}
|
||||||
|
|
||||||
|
def startup_service(self, items):
|
||||||
|
service_id = int(items[2])
|
||||||
|
return {"type": "startup_service", "number": service_id}
|
||||||
|
|
||||||
|
def shutdown_service(self, items):
|
||||||
|
service_id = int(items[2])
|
||||||
|
return {"type": "shutdown_service", "number": service_id}
|
||||||
|
|
||||||
|
def restart_service(self, items):
|
||||||
|
service_id = int(items[2])
|
||||||
|
return {"type": "restart_service", "number": service_id}
|
||||||
|
|
||||||
|
def list_users(self, items):
|
||||||
|
return {"type": "list_users"}
|
||||||
|
|
||||||
|
def show_user(self, items):
|
||||||
|
user_name = items[2]
|
||||||
|
return {"type": "show_user", "user_name": user_name}
|
||||||
|
|
||||||
|
def drop_user(self, items):
|
||||||
|
user_name = items[2]
|
||||||
|
return {"type": "drop_user", "user_name": user_name}
|
||||||
|
|
||||||
|
def alter_user(self, items):
|
||||||
|
user_name = items[3]
|
||||||
|
new_password = items[4]
|
||||||
|
return {"type": "alter_user", "user_name": user_name, "password": new_password}
|
||||||
|
|
||||||
|
def create_user(self, items):
|
||||||
|
user_name = items[2]
|
||||||
|
password = items[3]
|
||||||
|
return {"type": "create_user", "user_name": user_name, "password": password, "role": "user"}
|
||||||
|
|
||||||
|
def activate_user(self, items):
|
||||||
|
user_name = items[3]
|
||||||
|
activate_status = items[4]
|
||||||
|
return {"type": "activate_user", "activate_status": activate_status, "user_name": user_name}
|
||||||
|
|
||||||
|
def list_datasets(self, items):
|
||||||
|
user_name = items[3]
|
||||||
|
return {"type": "list_datasets", "user_name": user_name}
|
||||||
|
|
||||||
|
def list_agents(self, items):
|
||||||
|
user_name = items[3]
|
||||||
|
return {"type": "list_agents", "user_name": user_name}
|
||||||
|
|
||||||
|
def create_role(self, items):
|
||||||
|
role_name = items[2]
|
||||||
|
if len(items) > 4:
|
||||||
|
description = items[4]
|
||||||
|
return {"type": "create_role", "role_name": role_name, "description": description}
|
||||||
|
else:
|
||||||
|
return {"type": "create_role", "role_name": role_name}
|
||||||
|
|
||||||
|
def drop_role(self, items):
|
||||||
|
role_name = items[2]
|
||||||
|
return {"type": "drop_role", "role_name": role_name}
|
||||||
|
|
||||||
|
def alter_role(self, items):
|
||||||
|
role_name = items[2]
|
||||||
|
description = items[5]
|
||||||
|
return {"type": "alter_role", "role_name": role_name, "description": description}
|
||||||
|
|
||||||
|
def list_roles(self, items):
|
||||||
|
return {"type": "list_roles"}
|
||||||
|
|
||||||
|
def show_role(self, items):
|
||||||
|
role_name = items[2]
|
||||||
|
return {"type": "show_role", "role_name": role_name}
|
||||||
|
|
||||||
|
def grant_permission(self, items):
|
||||||
|
action_list = items[1]
|
||||||
|
resource = items[3]
|
||||||
|
role_name = items[6]
|
||||||
|
return {"type": "grant_permission", "role_name": role_name, "resource": resource, "actions": action_list}
|
||||||
|
|
||||||
|
def revoke_permission(self, items):
|
||||||
|
action_list = items[1]
|
||||||
|
resource = items[3]
|
||||||
|
role_name = items[6]
|
||||||
|
return {
|
||||||
|
"type": "revoke_permission",
|
||||||
|
"role_name": role_name,
|
||||||
|
"resource": resource, "actions": action_list
|
||||||
|
}
|
||||||
|
|
||||||
|
def alter_user_role(self, items):
|
||||||
|
user_name = items[2]
|
||||||
|
role_name = items[5]
|
||||||
|
return {"type": "alter_user_role", "user_name": user_name, "role_name": role_name}
|
||||||
|
|
||||||
|
def show_user_permission(self, items):
|
||||||
|
user_name = items[3]
|
||||||
|
return {"type": "show_user_permission", "user_name": user_name}
|
||||||
|
|
||||||
|
def show_version(self, items):
|
||||||
|
return {"type": "show_version"}
|
||||||
|
|
||||||
|
def action_list(self, items):
|
||||||
|
return items
|
||||||
|
|
||||||
|
def meta_command(self, items):
|
||||||
|
command_name = str(items[0]).lower()
|
||||||
|
args = items[1:] if len(items) > 1 else []
|
||||||
|
|
||||||
|
# handle quoted parameter
|
||||||
|
parsed_args = []
|
||||||
|
for arg in args:
|
||||||
|
if hasattr(arg, 'value'):
|
||||||
|
parsed_args.append(arg.value)
|
||||||
|
else:
|
||||||
|
parsed_args.append(str(arg))
|
||||||
|
|
||||||
|
return {'type': 'meta', 'command': command_name, 'args': parsed_args}
|
||||||
|
|
||||||
|
def meta_command_name(self, items):
|
||||||
|
return items[0]
|
||||||
|
|
||||||
|
def meta_args(self, items):
|
||||||
|
return items
|
||||||
|
|
||||||
|
|
||||||
|
def encrypt(input_string):
|
||||||
|
pub = '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArq9XTUSeYr2+N1h3Afl/z8Dse/2yD0ZGrKwx+EEEcdsBLca9Ynmx3nIB5obmLlSfmskLpBo0UACBmB5rEjBp2Q2f3AG3Hjd4B+gNCG6BDaawuDlgANIhGnaTLrIqWrrcm4EMzJOnAOI1fgzJRsOOUEfaS318Eq9OVO3apEyCCt0lOQK6PuksduOjVxtltDav+guVAA068NrPYmRNabVKRNLJpL8w4D44sfth5RvZ3q9t+6RTArpEtc5sh5ChzvqPOzKGMXW83C95TxmXqpbK6olN4RevSfVjEAgCydH6HN6OhtOQEcnrU97r9H0iZOWwbw3pVrZiUkuRD1R56Wzs2wIDAQAB\n-----END PUBLIC KEY-----'
|
||||||
|
pub_key = RSA.importKey(pub)
|
||||||
|
cipher = Cipher_pkcs1_v1_5.new(pub_key)
|
||||||
|
cipher_text = cipher.encrypt(base64.b64encode(input_string.encode('utf-8')))
|
||||||
|
return base64.b64encode(cipher_text).decode("utf-8")
|
||||||
|
|
||||||
|
|
||||||
|
def encode_to_base64(input_string):
|
||||||
|
base64_encoded = base64.b64encode(input_string.encode('utf-8'))
|
||||||
|
return base64_encoded.decode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
|
class AdminCLI(Cmd):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.parser = Lark(GRAMMAR, start='start', parser='lalr', transformer=AdminTransformer())
|
||||||
|
self.command_history = []
|
||||||
|
self.is_interactive = False
|
||||||
|
self.admin_account = "admin@ragflow.io"
|
||||||
|
self.admin_password: str = "admin"
|
||||||
|
self.session = requests.Session()
|
||||||
|
self.access_token: str = ""
|
||||||
|
self.host: str = ""
|
||||||
|
self.port: int = 0
|
||||||
|
|
||||||
|
intro = r"""Type "\h" for help."""
|
||||||
|
prompt = "admin> "
|
||||||
|
|
||||||
|
def onecmd(self, command: str) -> bool:
|
||||||
|
try:
|
||||||
|
result = self.parse_command(command)
|
||||||
|
|
||||||
|
if isinstance(result, dict):
|
||||||
|
if 'type' in result and result.get('type') == 'empty':
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.execute_command(result)
|
||||||
|
|
||||||
|
if isinstance(result, Tree):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if result.get('type') == 'meta' and result.get('command') in ['q', 'quit', 'exit']:
|
||||||
|
return True
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nUse '\\q' to quit")
|
||||||
|
except EOFError:
|
||||||
|
print("\nGoodbye!")
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def emptyline(self) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def default(self, line: str) -> bool:
|
||||||
|
return self.onecmd(line)
|
||||||
|
|
||||||
|
def parse_command(self, command_str: str) -> dict[str, str]:
|
||||||
|
if not command_str.strip():
|
||||||
|
return {'type': 'empty'}
|
||||||
|
|
||||||
|
self.command_history.append(command_str)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = self.parser.parse(command_str)
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
return {'type': 'error', 'message': f'Parse error: {str(e)}'}
|
||||||
|
|
||||||
|
def verify_admin(self, arguments: dict, single_command: bool):
|
||||||
|
self.host = arguments['host']
|
||||||
|
self.port = arguments['port']
|
||||||
|
print(f"Attempt to access ip: {self.host}, port: {self.port}")
|
||||||
|
url = f"http://{self.host}:{self.port}/api/v1/admin/login"
|
||||||
|
|
||||||
|
attempt_count = 3
|
||||||
|
if single_command:
|
||||||
|
attempt_count = 1
|
||||||
|
|
||||||
|
try_count = 0
|
||||||
|
while True:
|
||||||
|
try_count += 1
|
||||||
|
if try_count > attempt_count:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if single_command:
|
||||||
|
admin_passwd = arguments['password']
|
||||||
|
else:
|
||||||
|
admin_passwd = getpass.getpass(f"password for {self.admin_account}: ").strip()
|
||||||
|
try:
|
||||||
|
self.admin_password = encrypt(admin_passwd)
|
||||||
|
response = self.session.post(url, json={'email': self.admin_account, 'password': self.admin_password})
|
||||||
|
if response.status_code == 200:
|
||||||
|
res_json = response.json()
|
||||||
|
error_code = res_json.get('code', -1)
|
||||||
|
if error_code == 0:
|
||||||
|
self.session.headers.update({
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Authorization': response.headers['Authorization'],
|
||||||
|
'User-Agent': 'RAGFlow-CLI/0.22.1'
|
||||||
|
})
|
||||||
|
print("Authentication successful.")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
error_message = res_json.get('message', 'Unknown error')
|
||||||
|
print(f"Authentication failed: {error_message}, try again")
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
print(f"Bad response,status: {response.status_code}, password is wrong")
|
||||||
|
except Exception as e:
|
||||||
|
print(str(e))
|
||||||
|
print(f"Can't access {self.host}, port: {self.port}")
|
||||||
|
|
||||||
|
def _format_service_detail_table(self, data):
|
||||||
|
if isinstance(data, list):
|
||||||
|
return data
|
||||||
|
if not all([isinstance(v, list) for v in data.values()]):
|
||||||
|
# normal table
|
||||||
|
return data
|
||||||
|
# handle task_executor heartbeats map, for example {'name': [{'done': 2, 'now': timestamp1}, {'done': 3, 'now': timestamp2}]
|
||||||
|
task_executor_list = []
|
||||||
|
for k, v in data.items():
|
||||||
|
# display latest status
|
||||||
|
heartbeats = sorted(v, key=lambda x: x["now"], reverse=True)
|
||||||
|
task_executor_list.append({
|
||||||
|
"task_executor_name": k,
|
||||||
|
**heartbeats[0],
|
||||||
|
} if heartbeats else {"task_executor_name": k})
|
||||||
|
return task_executor_list
|
||||||
|
|
||||||
|
def _print_table_simple(self, data):
|
||||||
|
if not data:
|
||||||
|
print("No data to print")
|
||||||
|
return
|
||||||
|
if isinstance(data, dict):
|
||||||
|
# handle single row data
|
||||||
|
data = [data]
|
||||||
|
|
||||||
|
columns = list(set().union(*(d.keys() for d in data)))
|
||||||
|
columns.sort()
|
||||||
|
col_widths = {}
|
||||||
|
|
||||||
|
def get_string_width(text):
|
||||||
|
half_width_chars = (
|
||||||
|
" !\"#$%&'()*+,-./0123456789:;<=>?@"
|
||||||
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`"
|
||||||
|
"abcdefghijklmnopqrstuvwxyz{|}~"
|
||||||
|
"\t\n\r"
|
||||||
|
)
|
||||||
|
width = 0
|
||||||
|
for char in text:
|
||||||
|
if char in half_width_chars:
|
||||||
|
width += 1
|
||||||
|
else:
|
||||||
|
width += 2
|
||||||
|
return width
|
||||||
|
|
||||||
|
for col in columns:
|
||||||
|
max_width = get_string_width(str(col))
|
||||||
|
for item in data:
|
||||||
|
value_len = get_string_width(str(item.get(col, '')))
|
||||||
|
if value_len > max_width:
|
||||||
|
max_width = value_len
|
||||||
|
col_widths[col] = max(2, max_width)
|
||||||
|
|
||||||
|
# Generate delimiter
|
||||||
|
separator = "+" + "+".join(["-" * (col_widths[col] + 2) for col in columns]) + "+"
|
||||||
|
|
||||||
|
# Print header
|
||||||
|
print(separator)
|
||||||
|
header = "|" + "|".join([f" {col:<{col_widths[col]}} " for col in columns]) + "|"
|
||||||
|
print(header)
|
||||||
|
print(separator)
|
||||||
|
|
||||||
|
# Print data
|
||||||
|
for item in data:
|
||||||
|
row = "|"
|
||||||
|
for col in columns:
|
||||||
|
value = str(item.get(col, ''))
|
||||||
|
if get_string_width(value) > col_widths[col]:
|
||||||
|
value = value[:col_widths[col] - 3] + "..."
|
||||||
|
row += f" {value:<{col_widths[col] - (get_string_width(value) - len(value))}} |"
|
||||||
|
print(row)
|
||||||
|
|
||||||
|
print(separator)
|
||||||
|
|
||||||
|
def run_interactive(self):
|
||||||
|
|
||||||
|
self.is_interactive = True
|
||||||
|
print("RAGFlow Admin command line interface - Type '\\?' for help, '\\q' to quit")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
command = input("admin> ").strip()
|
||||||
|
if not command:
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"command: {command}")
|
||||||
|
result = self.parse_command(command)
|
||||||
|
self.execute_command(result)
|
||||||
|
|
||||||
|
if isinstance(result, Tree):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if result.get('type') == 'meta' and result.get('command') in ['q', 'quit', 'exit']:
|
||||||
|
break
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nUse '\\q' to quit")
|
||||||
|
except EOFError:
|
||||||
|
print("\nGoodbye!")
|
||||||
|
break
|
||||||
|
|
||||||
|
def run_single_command(self, command: str):
|
||||||
|
result = self.parse_command(command)
|
||||||
|
self.execute_command(result)
|
||||||
|
|
||||||
|
def parse_connection_args(self, args: List[str]) -> Dict[str, Any]:
|
||||||
|
parser = argparse.ArgumentParser(description='Admin CLI Client', add_help=False)
|
||||||
|
parser.add_argument('-h', '--host', default='localhost', help='Admin service host')
|
||||||
|
parser.add_argument('-p', '--port', type=int, default=9381, help='Admin service port')
|
||||||
|
parser.add_argument('-w', '--password', default='admin', type=str, help='Superuser password')
|
||||||
|
parser.add_argument('command', nargs='?', help='Single command')
|
||||||
|
try:
|
||||||
|
parsed_args, remaining_args = parser.parse_known_args(args)
|
||||||
|
if remaining_args:
|
||||||
|
command = remaining_args[0]
|
||||||
|
return {
|
||||||
|
'host': parsed_args.host,
|
||||||
|
'port': parsed_args.port,
|
||||||
|
'password': parsed_args.password,
|
||||||
|
'command': command
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
'host': parsed_args.host,
|
||||||
|
'port': parsed_args.port,
|
||||||
|
}
|
||||||
|
except SystemExit:
|
||||||
|
return {'error': 'Invalid connection arguments'}
|
||||||
|
|
||||||
|
def execute_command(self, parsed_command: Dict[str, Any]):
|
||||||
|
|
||||||
|
command_dict: dict
|
||||||
|
if isinstance(parsed_command, Tree):
|
||||||
|
command_dict = parsed_command.children[0]
|
||||||
|
else:
|
||||||
|
if parsed_command['type'] == 'error':
|
||||||
|
print(f"Error: {parsed_command['message']}")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
command_dict = parsed_command
|
||||||
|
|
||||||
|
# print(f"Parsed command: {command_dict}")
|
||||||
|
|
||||||
|
command_type = command_dict['type']
|
||||||
|
|
||||||
|
match command_type:
|
||||||
|
case 'list_services':
|
||||||
|
self._handle_list_services(command_dict)
|
||||||
|
case 'show_service':
|
||||||
|
self._handle_show_service(command_dict)
|
||||||
|
case 'restart_service':
|
||||||
|
self._handle_restart_service(command_dict)
|
||||||
|
case 'shutdown_service':
|
||||||
|
self._handle_shutdown_service(command_dict)
|
||||||
|
case 'startup_service':
|
||||||
|
self._handle_startup_service(command_dict)
|
||||||
|
case 'list_users':
|
||||||
|
self._handle_list_users(command_dict)
|
||||||
|
case 'show_user':
|
||||||
|
self._handle_show_user(command_dict)
|
||||||
|
case 'drop_user':
|
||||||
|
self._handle_drop_user(command_dict)
|
||||||
|
case 'alter_user':
|
||||||
|
self._handle_alter_user(command_dict)
|
||||||
|
case 'create_user':
|
||||||
|
self._handle_create_user(command_dict)
|
||||||
|
case 'activate_user':
|
||||||
|
self._handle_activate_user(command_dict)
|
||||||
|
case 'list_datasets':
|
||||||
|
self._handle_list_datasets(command_dict)
|
||||||
|
case 'list_agents':
|
||||||
|
self._handle_list_agents(command_dict)
|
||||||
|
case 'create_role':
|
||||||
|
self._create_role(command_dict)
|
||||||
|
case 'drop_role':
|
||||||
|
self._drop_role(command_dict)
|
||||||
|
case 'alter_role':
|
||||||
|
self._alter_role(command_dict)
|
||||||
|
case 'list_roles':
|
||||||
|
self._list_roles(command_dict)
|
||||||
|
case 'show_role':
|
||||||
|
self._show_role(command_dict)
|
||||||
|
case 'grant_permission':
|
||||||
|
self._grant_permission(command_dict)
|
||||||
|
case 'revoke_permission':
|
||||||
|
self._revoke_permission(command_dict)
|
||||||
|
case 'alter_user_role':
|
||||||
|
self._alter_user_role(command_dict)
|
||||||
|
case 'show_user_permission':
|
||||||
|
self._show_user_permission(command_dict)
|
||||||
|
case 'show_version':
|
||||||
|
self._show_version(command_dict)
|
||||||
|
case 'meta':
|
||||||
|
self._handle_meta_command(command_dict)
|
||||||
|
case _:
|
||||||
|
print(f"Command '{command_type}' would be executed with API")
|
||||||
|
|
||||||
|
def _handle_list_services(self, command):
|
||||||
|
print("Listing all services")
|
||||||
|
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/services'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(f"Fail to get all services, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_show_service(self, command):
|
||||||
|
service_id: int = command['number']
|
||||||
|
print(f"Showing service: {service_id}")
|
||||||
|
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/services/{service_id}'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
res_data = res_json['data']
|
||||||
|
if 'status' in res_data and res_data['status'] == 'alive':
|
||||||
|
print(f"Service {res_data['service_name']} is alive, ")
|
||||||
|
if isinstance(res_data['message'], str):
|
||||||
|
print(res_data['message'])
|
||||||
|
else:
|
||||||
|
data = self._format_service_detail_table(res_data['message'])
|
||||||
|
self._print_table_simple(data)
|
||||||
|
else:
|
||||||
|
print(f"Service {res_data['service_name']} is down, {res_data['message']}")
|
||||||
|
else:
|
||||||
|
print(f"Fail to show service, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_restart_service(self, command):
|
||||||
|
service_id: int = command['number']
|
||||||
|
print(f"Restart service {service_id}")
|
||||||
|
|
||||||
|
def _handle_shutdown_service(self, command):
|
||||||
|
service_id: int = command['number']
|
||||||
|
print(f"Shutdown service {service_id}")
|
||||||
|
|
||||||
|
def _handle_startup_service(self, command):
|
||||||
|
service_id: int = command['number']
|
||||||
|
print(f"Startup service {service_id}")
|
||||||
|
|
||||||
|
def _handle_list_users(self, command):
|
||||||
|
print("Listing all users")
|
||||||
|
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(f"Fail to get all users, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_show_user(self, command):
|
||||||
|
username_tree: Tree = command['user_name']
|
||||||
|
user_name: str = username_tree.children[0].strip("'\"")
|
||||||
|
print(f"Showing user: {user_name}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
table_data = res_json['data']
|
||||||
|
table_data.pop('avatar')
|
||||||
|
self._print_table_simple(table_data)
|
||||||
|
else:
|
||||||
|
print(f"Fail to get user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_drop_user(self, command):
|
||||||
|
username_tree: Tree = command['user_name']
|
||||||
|
user_name: str = username_tree.children[0].strip("'\"")
|
||||||
|
print(f"Drop user: {user_name}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}'
|
||||||
|
response = self.session.delete(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
print(res_json["message"])
|
||||||
|
else:
|
||||||
|
print(f"Fail to drop user, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_alter_user(self, command):
|
||||||
|
user_name_tree: Tree = command['user_name']
|
||||||
|
user_name: str = user_name_tree.children[0].strip("'\"")
|
||||||
|
password_tree: Tree = command['password']
|
||||||
|
password: str = password_tree.children[0].strip("'\"")
|
||||||
|
print(f"Alter user: {user_name}, password: {password}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/password'
|
||||||
|
response = self.session.put(url, json={'new_password': encrypt(password)})
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
print(res_json["message"])
|
||||||
|
else:
|
||||||
|
print(f"Fail to alter password, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_create_user(self, command):
|
||||||
|
user_name_tree: Tree = command['user_name']
|
||||||
|
user_name: str = user_name_tree.children[0].strip("'\"")
|
||||||
|
password_tree: Tree = command['password']
|
||||||
|
password: str = password_tree.children[0].strip("'\"")
|
||||||
|
role: str = command['role']
|
||||||
|
print(f"Create user: {user_name}, password: {password}, role: {role}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users'
|
||||||
|
response = self.session.post(
|
||||||
|
url,
|
||||||
|
json={'user_name': user_name, 'password': encrypt(password), 'role': role}
|
||||||
|
)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(f"Fail to create user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_activate_user(self, command):
|
||||||
|
user_name_tree: Tree = command['user_name']
|
||||||
|
user_name: str = user_name_tree.children[0].strip("'\"")
|
||||||
|
activate_tree: Tree = command['activate_status']
|
||||||
|
activate_status: str = activate_tree.children[0].strip("'\"")
|
||||||
|
if activate_status.lower() in ['on', 'off']:
|
||||||
|
print(f"Alter user {user_name} activate status, turn {activate_status.lower()}.")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/activate'
|
||||||
|
response = self.session.put(url, json={'activate_status': activate_status})
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
print(res_json["message"])
|
||||||
|
else:
|
||||||
|
print(f"Fail to alter activate status, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
else:
|
||||||
|
print(f"Unknown activate status: {activate_status}.")
|
||||||
|
|
||||||
|
def _handle_list_datasets(self, command):
|
||||||
|
username_tree: Tree = command['user_name']
|
||||||
|
user_name: str = username_tree.children[0].strip("'\"")
|
||||||
|
print(f"Listing all datasets of user: {user_name}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/datasets'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
table_data = res_json['data']
|
||||||
|
for t in table_data:
|
||||||
|
t.pop('avatar')
|
||||||
|
self._print_table_simple(table_data)
|
||||||
|
else:
|
||||||
|
print(f"Fail to get all datasets of {user_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_list_agents(self, command):
|
||||||
|
username_tree: Tree = command['user_name']
|
||||||
|
user_name: str = username_tree.children[0].strip("'\"")
|
||||||
|
print(f"Listing all agents of user: {user_name}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/agents'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
table_data = res_json['data']
|
||||||
|
for t in table_data:
|
||||||
|
t.pop('avatar')
|
||||||
|
self._print_table_simple(table_data)
|
||||||
|
else:
|
||||||
|
print(f"Fail to get all agents of {user_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _create_role(self, command):
|
||||||
|
role_name_tree: Tree = command['role_name']
|
||||||
|
role_name: str = role_name_tree.children[0].strip("'\"")
|
||||||
|
desc_str: str = ''
|
||||||
|
if 'description' in command:
|
||||||
|
desc_tree: Tree = command['description']
|
||||||
|
desc_str = desc_tree.children[0].strip("'\"")
|
||||||
|
|
||||||
|
print(f"create role name: {role_name}, description: {desc_str}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/roles'
|
||||||
|
response = self.session.post(
|
||||||
|
url,
|
||||||
|
json={'role_name': role_name, 'description': desc_str}
|
||||||
|
)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(f"Fail to create role {role_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _drop_role(self, command):
|
||||||
|
role_name_tree: Tree = command['role_name']
|
||||||
|
role_name: str = role_name_tree.children[0].strip("'\"")
|
||||||
|
print(f"drop role name: {role_name}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name}'
|
||||||
|
response = self.session.delete(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(f"Fail to drop role {role_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _alter_role(self, command):
|
||||||
|
role_name_tree: Tree = command['role_name']
|
||||||
|
role_name: str = role_name_tree.children[0].strip("'\"")
|
||||||
|
desc_tree: Tree = command['description']
|
||||||
|
desc_str: str = desc_tree.children[0].strip("'\"")
|
||||||
|
|
||||||
|
print(f"alter role name: {role_name}, description: {desc_str}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name}'
|
||||||
|
response = self.session.put(
|
||||||
|
url,
|
||||||
|
json={'description': desc_str}
|
||||||
|
)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Fail to update role {role_name} with description: {desc_str}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _list_roles(self, command):
|
||||||
|
print("Listing all roles")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/roles'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(f"Fail to list roles, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _show_role(self, command):
|
||||||
|
role_name_tree: Tree = command['role_name']
|
||||||
|
role_name: str = role_name_tree.children[0].strip("'\"")
|
||||||
|
print(f"show role: {role_name}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name}/permission'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(f"Fail to list roles, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _grant_permission(self, command):
|
||||||
|
role_name_tree: Tree = command['role_name']
|
||||||
|
role_name_str: str = role_name_tree.children[0].strip("'\"")
|
||||||
|
resource_tree: Tree = command['resource']
|
||||||
|
resource_str: str = resource_tree.children[0].strip("'\"")
|
||||||
|
action_tree_list: list = command['actions']
|
||||||
|
actions: list = []
|
||||||
|
for action_tree in action_tree_list:
|
||||||
|
action_str: str = action_tree.children[0].strip("'\"")
|
||||||
|
actions.append(action_str)
|
||||||
|
print(f"grant role_name: {role_name_str}, resource: {resource_str}, actions: {actions}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name_str}/permission'
|
||||||
|
response = self.session.post(
|
||||||
|
url,
|
||||||
|
json={'actions': actions, 'resource': resource_str}
|
||||||
|
)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Fail to grant role {role_name_str} with {actions} on {resource_str}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _revoke_permission(self, command):
|
||||||
|
role_name_tree: Tree = command['role_name']
|
||||||
|
role_name_str: str = role_name_tree.children[0].strip("'\"")
|
||||||
|
resource_tree: Tree = command['resource']
|
||||||
|
resource_str: str = resource_tree.children[0].strip("'\"")
|
||||||
|
action_tree_list: list = command['actions']
|
||||||
|
actions: list = []
|
||||||
|
for action_tree in action_tree_list:
|
||||||
|
action_str: str = action_tree.children[0].strip("'\"")
|
||||||
|
actions.append(action_str)
|
||||||
|
print(f"revoke role_name: {role_name_str}, resource: {resource_str}, actions: {actions}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name_str}/permission'
|
||||||
|
response = self.session.delete(
|
||||||
|
url,
|
||||||
|
json={'actions': actions, 'resource': resource_str}
|
||||||
|
)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Fail to revoke role {role_name_str} with {actions} on {resource_str}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _alter_user_role(self, command):
|
||||||
|
role_name_tree: Tree = command['role_name']
|
||||||
|
role_name_str: str = role_name_tree.children[0].strip("'\"")
|
||||||
|
user_name_tree: Tree = command['user_name']
|
||||||
|
user_name_str: str = user_name_tree.children[0].strip("'\"")
|
||||||
|
print(f"alter_user_role user_name: {user_name_str}, role_name: {role_name_str}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name_str}/role'
|
||||||
|
response = self.session.put(
|
||||||
|
url,
|
||||||
|
json={'role_name': role_name_str}
|
||||||
|
)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Fail to alter user: {user_name_str} to role {role_name_str}, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _show_user_permission(self, command):
|
||||||
|
user_name_tree: Tree = command['user_name']
|
||||||
|
user_name_str: str = user_name_tree.children[0].strip("'\"")
|
||||||
|
print(f"show_user_permission user_name: {user_name_str}")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name_str}/permission'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Fail to show user: {user_name_str} permission, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _show_version(self, command):
|
||||||
|
print("show_version")
|
||||||
|
url = f'http://{self.host}:{self.port}/api/v1/admin/version'
|
||||||
|
response = self.session.get(url)
|
||||||
|
res_json = response.json()
|
||||||
|
if response.status_code == 200:
|
||||||
|
self._print_table_simple(res_json['data'])
|
||||||
|
else:
|
||||||
|
print(f"Fail to show version, code: {res_json['code']}, message: {res_json['message']}")
|
||||||
|
|
||||||
|
def _handle_meta_command(self, command):
|
||||||
|
meta_command = command['command']
|
||||||
|
args = command.get('args', [])
|
||||||
|
|
||||||
|
if meta_command in ['?', 'h', 'help']:
|
||||||
|
self.show_help()
|
||||||
|
elif meta_command in ['q', 'quit', 'exit']:
|
||||||
|
print("Goodbye!")
|
||||||
|
else:
|
||||||
|
print(f"Meta command '{meta_command}' with args {args}")
|
||||||
|
|
||||||
|
def show_help(self):
|
||||||
|
"""Help info"""
|
||||||
|
help_text = """
|
||||||
|
Commands:
|
||||||
|
LIST SERVICES
|
||||||
|
SHOW SERVICE <service>
|
||||||
|
STARTUP SERVICE <service>
|
||||||
|
SHUTDOWN SERVICE <service>
|
||||||
|
RESTART SERVICE <service>
|
||||||
|
LIST USERS
|
||||||
|
SHOW USER <user>
|
||||||
|
DROP USER <user>
|
||||||
|
CREATE USER <user> <password>
|
||||||
|
ALTER USER PASSWORD <user> <new_password>
|
||||||
|
ALTER USER ACTIVE <user> <on/off>
|
||||||
|
LIST DATASETS OF <user>
|
||||||
|
LIST AGENTS OF <user>
|
||||||
|
|
||||||
|
Meta Commands:
|
||||||
|
\\?, \\h, \\help Show this help
|
||||||
|
\\q, \\quit, \\exit Quit the CLI
|
||||||
|
"""
|
||||||
|
print(help_text)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
import sys
|
||||||
|
|
||||||
|
cli = AdminCLI()
|
||||||
|
|
||||||
|
args = cli.parse_connection_args(sys.argv)
|
||||||
|
if 'error' in args:
|
||||||
|
print(f"Error: {args['error']}")
|
||||||
|
return
|
||||||
|
|
||||||
|
if 'command' in args:
|
||||||
|
if 'password' not in args:
|
||||||
|
print("Error: password is missing")
|
||||||
|
return
|
||||||
|
if cli.verify_admin(args, single_command=True):
|
||||||
|
command: str = args['command']
|
||||||
|
print(f"Run single command: {command}")
|
||||||
|
cli.run_single_command(command)
|
||||||
|
else:
|
||||||
|
if cli.verify_admin(args, single_command=False):
|
||||||
|
print(r"""
|
||||||
|
____ ___ ______________ ___ __ _
|
||||||
|
/ __ \/ | / ____/ ____/ /___ _ __ / | ____/ /___ ___ (_)___
|
||||||
|
/ /_/ / /| |/ / __/ /_ / / __ \ | /| / / / /| |/ __ / __ `__ \/ / __ \
|
||||||
|
/ _, _/ ___ / /_/ / __/ / / /_/ / |/ |/ / / ___ / /_/ / / / / / / / / / /
|
||||||
|
/_/ |_/_/ |_\____/_/ /_/\____/|__/|__/ /_/ |_\__,_/_/ /_/ /_/_/_/ /_/
|
||||||
|
""")
|
||||||
|
cli.cmdloop()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
24
admin/client/pyproject.toml
Normal file
24
admin/client/pyproject.toml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
[project]
|
||||||
|
name = "ragflow-cli"
|
||||||
|
version = "0.22.1"
|
||||||
|
description = "Admin Service's client of [RAGFlow](https://github.com/infiniflow/ragflow). The Admin Service provides user management and system monitoring. "
|
||||||
|
authors = [{ name = "Lynn", email = "lynn_inf@hotmail.com" }]
|
||||||
|
license = { text = "Apache License, Version 2.0" }
|
||||||
|
readme = "README.md"
|
||||||
|
requires-python = ">=3.10,<3.13"
|
||||||
|
dependencies = [
|
||||||
|
"requests>=2.30.0,<3.0.0",
|
||||||
|
"beartype>=0.20.0,<1.0.0",
|
||||||
|
"pycryptodomex>=3.10.0",
|
||||||
|
"lark>=1.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[dependency-groups]
|
||||||
|
test = [
|
||||||
|
"pytest>=8.3.5",
|
||||||
|
"requests>=2.32.3",
|
||||||
|
"requests-toolbelt>=1.0.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
ragflow-cli = "admin_client:main"
|
||||||
298
admin/client/uv.lock
generated
Normal file
298
admin/client/uv.lock
generated
Normal file
@ -0,0 +1,298 @@
|
|||||||
|
version = 1
|
||||||
|
revision = 3
|
||||||
|
requires-python = ">=3.10, <3.13"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "beartype"
|
||||||
|
version = "0.22.6"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/88/e2/105ceb1704cb80fe4ab3872529ab7b6f365cf7c74f725e6132d0efcf1560/beartype-0.22.6.tar.gz", hash = "sha256:97fbda69c20b48c5780ac2ca60ce3c1bb9af29b3a1a0216898ffabdd523e48f4", size = 1588975, upload-time = "2025-11-20T04:47:14.736Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/98/c9/ceecc71fe2c9495a1d8e08d44f5f31f5bca1350d5b2e27a4b6265424f59e/beartype-0.22.6-py3-none-any.whl", hash = "sha256:0584bc46a2ea2a871509679278cda992eadde676c01356ab0ac77421f3c9a093", size = 1324807, upload-time = "2025-11-20T04:47:11.837Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "certifi"
|
||||||
|
version = "2025.11.12"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "charset-normalizer"
|
||||||
|
version = "3.4.4"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/1f/b8/6d51fc1d52cbd52cd4ccedd5b5b2f0f6a11bbf6765c782298b0f3e808541/charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d", size = 209709, upload-time = "2025-10-14T04:40:11.385Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/5c/af/1f9d7f7faafe2ddfb6f72a2e07a548a629c61ad510fe60f9630309908fef/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8", size = 148814, upload-time = "2025-10-14T04:40:13.135Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/79/3d/f2e3ac2bbc056ca0c204298ea4e3d9db9b4afe437812638759db2c976b5f/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad", size = 144467, upload-time = "2025-10-14T04:40:14.728Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/ec/85/1bf997003815e60d57de7bd972c57dc6950446a3e4ccac43bc3070721856/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8", size = 162280, upload-time = "2025-10-14T04:40:16.14Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/3e/8e/6aa1952f56b192f54921c436b87f2aaf7c7a7c3d0d1a765547d64fd83c13/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d", size = 159454, upload-time = "2025-10-14T04:40:17.567Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/36/3b/60cbd1f8e93aa25d1c669c649b7a655b0b5fb4c571858910ea9332678558/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313", size = 153609, upload-time = "2025-10-14T04:40:19.08Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/64/91/6a13396948b8fd3c4b4fd5bc74d045f5637d78c9675585e8e9fbe5636554/charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e", size = 151849, upload-time = "2025-10-14T04:40:20.607Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/b7/7a/59482e28b9981d105691e968c544cc0df3b7d6133152fb3dcdc8f135da7a/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93", size = 151586, upload-time = "2025-10-14T04:40:21.719Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/92/59/f64ef6a1c4bdd2baf892b04cd78792ed8684fbc48d4c2afe467d96b4df57/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0", size = 145290, upload-time = "2025-10-14T04:40:23.069Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/6b/63/3bf9f279ddfa641ffa1962b0db6a57a9c294361cc2f5fcac997049a00e9c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84", size = 163663, upload-time = "2025-10-14T04:40:24.17Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/ed/09/c9e38fc8fa9e0849b172b581fd9803bdf6e694041127933934184e19f8c3/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e", size = 151964, upload-time = "2025-10-14T04:40:25.368Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/d2/d1/d28b747e512d0da79d8b6a1ac18b7ab2ecfd81b2944c4c710e166d8dd09c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db", size = 161064, upload-time = "2025-10-14T04:40:26.806Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/bb/9a/31d62b611d901c3b9e5500c36aab0ff5eb442043fb3a1c254200d3d397d9/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6", size = 155015, upload-time = "2025-10-14T04:40:28.284Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/1f/f3/107e008fa2bff0c8b9319584174418e5e5285fef32f79d8ee6a430d0039c/charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f", size = 99792, upload-time = "2025-10-14T04:40:29.613Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/eb/66/e396e8a408843337d7315bab30dbf106c38966f1819f123257f5520f8a96/charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d", size = 107198, upload-time = "2025-10-14T04:40:30.644Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/b5/58/01b4f815bf0312704c267f2ccb6e5d42bcc7752340cd487bc9f8c3710597/charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69", size = 100262, upload-time = "2025-10-14T04:40:32.108Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "colorama"
|
||||||
|
version = "0.4.6"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "exceptiongroup"
|
||||||
|
version = "1.3.1"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "typing-extensions" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "idna"
|
||||||
|
version = "3.11"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "iniconfig"
|
||||||
|
version = "2.3.0"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "lark"
|
||||||
|
version = "1.3.1"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/da/34/28fff3ab31ccff1fd4f6c7c7b0ceb2b6968d8ea4950663eadcb5720591a0/lark-1.3.1.tar.gz", hash = "sha256:b426a7a6d6d53189d318f2b6236ab5d6429eaf09259f1ca33eb716eed10d2905", size = 382732, upload-time = "2025-10-27T18:25:56.653Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/82/3d/14ce75ef66813643812f3093ab17e46d3a206942ce7376d31ec2d36229e7/lark-1.3.1-py3-none-any.whl", hash = "sha256:c629b661023a014c37da873b4ff58a817398d12635d3bbb2c5a03be7fe5d1e12", size = 113151, upload-time = "2025-10-27T18:25:54.882Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "packaging"
|
||||||
|
version = "25.0"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pluggy"
|
||||||
|
version = "1.6.0"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pycryptodomex"
|
||||||
|
version = "3.23.0"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/c9/85/e24bf90972a30b0fcd16c73009add1d7d7cd9140c2498a68252028899e41/pycryptodomex-3.23.0.tar.gz", hash = "sha256:71909758f010c82bc99b0abf4ea12012c98962fbf0583c2164f8b84533c2e4da", size = 4922157, upload-time = "2025-05-17T17:23:41.434Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/dd/9c/1a8f35daa39784ed8adf93a694e7e5dc15c23c741bbda06e1d45f8979e9e/pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:06698f957fe1ab229a99ba2defeeae1c09af185baa909a31a5d1f9d42b1aaed6", size = 2499240, upload-time = "2025-05-17T17:22:46.953Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/7a/62/f5221a191a97157d240cf6643747558759126c76ee92f29a3f4aee3197a5/pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2c2537863eccef2d41061e82a881dcabb04944c5c06c5aa7110b577cc487545", size = 1644042, upload-time = "2025-05-17T17:22:49.098Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/8c/fd/5a054543c8988d4ed7b612721d7e78a4b9bf36bc3c5ad45ef45c22d0060e/pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43c446e2ba8df8889e0e16f02211c25b4934898384c1ec1ec04d7889c0333587", size = 2186227, upload-time = "2025-05-17T17:22:51.139Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/c8/a9/8862616a85cf450d2822dbd4fff1fcaba90877907a6ff5bc2672cafe42f8/pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f489c4765093fb60e2edafdf223397bc716491b2b69fe74367b70d6999257a5c", size = 2272578, upload-time = "2025-05-17T17:22:53.676Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/46/9f/bda9c49a7c1842820de674ab36c79f4fbeeee03f8ff0e4f3546c3889076b/pycryptodomex-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdc69d0d3d989a1029df0eed67cc5e8e5d968f3724f4519bd03e0ec68df7543c", size = 2312166, upload-time = "2025-05-17T17:22:56.585Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/03/cc/870b9bf8ca92866ca0186534801cf8d20554ad2a76ca959538041b7a7cf4/pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6bbcb1dd0f646484939e142462d9e532482bc74475cecf9c4903d4e1cd21f003", size = 2185467, upload-time = "2025-05-17T17:22:59.237Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/96/e3/ce9348236d8e669fea5dd82a90e86be48b9c341210f44e25443162aba187/pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:8a4fcd42ccb04c31268d1efeecfccfd1249612b4de6374205376b8f280321744", size = 2346104, upload-time = "2025-05-17T17:23:02.112Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/a5/e9/e869bcee87beb89040263c416a8a50204f7f7a83ac11897646c9e71e0daf/pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:55ccbe27f049743a4caf4f4221b166560d3438d0b1e5ab929e07ae1702a4d6fd", size = 2271038, upload-time = "2025-05-17T17:23:04.872Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/8d/67/09ee8500dd22614af5fbaa51a4aee6e342b5fa8aecf0a6cb9cbf52fa6d45/pycryptodomex-3.23.0-cp37-abi3-win32.whl", hash = "sha256:189afbc87f0b9f158386bf051f720e20fa6145975f1e76369303d0f31d1a8d7c", size = 1771969, upload-time = "2025-05-17T17:23:07.115Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/69/96/11f36f71a865dd6df03716d33bd07a67e9d20f6b8d39820470b766af323c/pycryptodomex-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:52e5ca58c3a0b0bd5e100a9fbc8015059b05cffc6c66ce9d98b4b45e023443b9", size = 1803124, upload-time = "2025-05-17T17:23:09.267Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/f9/93/45c1cdcbeb182ccd2e144c693eaa097763b08b38cded279f0053ed53c553/pycryptodomex-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:02d87b80778c171445d67e23d1caef279bf4b25c3597050ccd2e13970b57fd51", size = 1707161, upload-time = "2025-05-17T17:23:11.414Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/f3/b8/3e76d948c3c4ac71335bbe75dac53e154b40b0f8f1f022dfa295257a0c96/pycryptodomex-3.23.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ebfff755c360d674306e5891c564a274a47953562b42fb74a5c25b8fc1fb1cb5", size = 1627695, upload-time = "2025-05-17T17:23:17.38Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/6a/cf/80f4297a4820dfdfd1c88cf6c4666a200f204b3488103d027b5edd9176ec/pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eca54f4bb349d45afc17e3011ed4264ef1cc9e266699874cdd1349c504e64798", size = 1675772, upload-time = "2025-05-17T17:23:19.202Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/d1/42/1e969ee0ad19fe3134b0e1b856c39bd0b70d47a4d0e81c2a8b05727394c9/pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2596e643d4365e14d0879dc5aafe6355616c61c2176009270f3048f6d9a61f", size = 1668083, upload-time = "2025-05-17T17:23:21.867Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/6e/c3/1de4f7631fea8a992a44ba632aa40e0008764c0fb9bf2854b0acf78c2cf2/pycryptodomex-3.23.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdfac7cda115bca3a5abb2f9e43bc2fb66c2b65ab074913643803ca7083a79ea", size = 1706056, upload-time = "2025-05-17T17:23:24.031Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/f2/5f/af7da8e6f1e42b52f44a24d08b8e4c726207434e2593732d39e7af5e7256/pycryptodomex-3.23.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:14c37aaece158d0ace436f76a7bb19093db3b4deade9797abfc39ec6cd6cc2fe", size = 1806478, upload-time = "2025-05-17T17:23:26.066Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pygments"
|
||||||
|
version = "2.19.2"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pytest"
|
||||||
|
version = "9.0.1"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||||
|
{ name = "exceptiongroup", marker = "python_full_version < '3.11'" },
|
||||||
|
{ name = "iniconfig" },
|
||||||
|
{ name = "packaging" },
|
||||||
|
{ name = "pluggy" },
|
||||||
|
{ name = "pygments" },
|
||||||
|
{ name = "tomli", marker = "python_full_version < '3.11'" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/07/56/f013048ac4bc4c1d9be45afd4ab209ea62822fb1598f40687e6bf45dcea4/pytest-9.0.1.tar.gz", hash = "sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8", size = 1564125, upload-time = "2025-11-12T13:05:09.333Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/0b/8b/6300fb80f858cda1c51ffa17075df5d846757081d11ab4aa35cef9e6258b/pytest-9.0.1-py3-none-any.whl", hash = "sha256:67be0030d194df2dfa7b556f2e56fb3c3315bd5c8822c6951162b92b32ce7dad", size = 373668, upload-time = "2025-11-12T13:05:07.379Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ragflow-cli"
|
||||||
|
version = "0.22.1"
|
||||||
|
source = { virtual = "." }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "beartype" },
|
||||||
|
{ name = "lark" },
|
||||||
|
{ name = "pycryptodomex" },
|
||||||
|
{ name = "requests" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dev-dependencies]
|
||||||
|
test = [
|
||||||
|
{ name = "pytest" },
|
||||||
|
{ name = "requests" },
|
||||||
|
{ name = "requests-toolbelt" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.metadata]
|
||||||
|
requires-dist = [
|
||||||
|
{ name = "beartype", specifier = ">=0.20.0,<1.0.0" },
|
||||||
|
{ name = "lark", specifier = ">=1.1.0" },
|
||||||
|
{ name = "pycryptodomex", specifier = ">=3.10.0" },
|
||||||
|
{ name = "requests", specifier = ">=2.30.0,<3.0.0" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.metadata.requires-dev]
|
||||||
|
test = [
|
||||||
|
{ name = "pytest", specifier = ">=8.3.5" },
|
||||||
|
{ name = "requests", specifier = ">=2.32.3" },
|
||||||
|
{ name = "requests-toolbelt", specifier = ">=1.0.0" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "requests"
|
||||||
|
version = "2.32.5"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "certifi" },
|
||||||
|
{ name = "charset-normalizer" },
|
||||||
|
{ name = "idna" },
|
||||||
|
{ name = "urllib3" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "requests-toolbelt"
|
||||||
|
version = "1.0.0"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
dependencies = [
|
||||||
|
{ name = "requests" },
|
||||||
|
]
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888, upload-time = "2023-05-01T04:11:33.229Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tomli"
|
||||||
|
version = "2.3.0"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" },
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "typing-extensions"
|
||||||
|
version = "4.15.0"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "urllib3"
|
||||||
|
version = "2.5.0"
|
||||||
|
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
|
||||||
|
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
|
||||||
|
]
|
||||||
82
admin/server/admin_server.py
Normal file
82
admin/server/admin_server.py
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import threading
|
||||||
|
import traceback
|
||||||
|
import faulthandler
|
||||||
|
|
||||||
|
from flask import Flask
|
||||||
|
from flask_login import LoginManager
|
||||||
|
from werkzeug.serving import run_simple
|
||||||
|
from routes import admin_bp
|
||||||
|
from common.log_utils import init_root_logger
|
||||||
|
from common.constants import SERVICE_CONF
|
||||||
|
from common.config_utils import show_configs
|
||||||
|
from common import settings
|
||||||
|
from config import load_configurations, SERVICE_CONFIGS
|
||||||
|
from auth import init_default_admin, setup_auth
|
||||||
|
from flask_session import Session
|
||||||
|
from common.versions import get_ragflow_version
|
||||||
|
|
||||||
|
stop_event = threading.Event()
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
faulthandler.enable()
|
||||||
|
init_root_logger("admin_service")
|
||||||
|
logging.info(r"""
|
||||||
|
____ ___ ______________ ___ __ _
|
||||||
|
/ __ \/ | / ____/ ____/ /___ _ __ / | ____/ /___ ___ (_)___
|
||||||
|
/ /_/ / /| |/ / __/ /_ / / __ \ | /| / / / /| |/ __ / __ `__ \/ / __ \
|
||||||
|
/ _, _/ ___ / /_/ / __/ / / /_/ / |/ |/ / / ___ / /_/ / / / / / / / / / /
|
||||||
|
/_/ |_/_/ |_\____/_/ /_/\____/|__/|__/ /_/ |_\__,_/_/ /_/ /_/_/_/ /_/
|
||||||
|
""")
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
app.register_blueprint(admin_bp)
|
||||||
|
app.config["SESSION_PERMANENT"] = False
|
||||||
|
app.config["SESSION_TYPE"] = "filesystem"
|
||||||
|
app.config["MAX_CONTENT_LENGTH"] = int(
|
||||||
|
os.environ.get("MAX_CONTENT_LENGTH", 1024 * 1024 * 1024)
|
||||||
|
)
|
||||||
|
Session(app)
|
||||||
|
logging.info(f'RAGFlow version: {get_ragflow_version()}')
|
||||||
|
show_configs()
|
||||||
|
login_manager = LoginManager()
|
||||||
|
login_manager.init_app(app)
|
||||||
|
settings.init_settings()
|
||||||
|
setup_auth(login_manager)
|
||||||
|
init_default_admin()
|
||||||
|
SERVICE_CONFIGS.configs = load_configurations(SERVICE_CONF)
|
||||||
|
|
||||||
|
try:
|
||||||
|
logging.info("RAGFlow Admin service start...")
|
||||||
|
run_simple(
|
||||||
|
hostname="0.0.0.0",
|
||||||
|
port=9381,
|
||||||
|
application=app,
|
||||||
|
threaded=True,
|
||||||
|
use_reloader=False,
|
||||||
|
use_debugger=True,
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
stop_event.set()
|
||||||
|
time.sleep(1)
|
||||||
|
os.kill(os.getpid(), signal.SIGKILL)
|
||||||
188
admin/server/auth.py
Normal file
188
admin/server/auth.py
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from functools import wraps
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from flask import jsonify, request
|
||||||
|
from flask_login import current_user, login_user
|
||||||
|
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||||
|
|
||||||
|
from api.common.exceptions import AdminException, UserNotFoundError
|
||||||
|
from api.common.base64 import encode_to_base64
|
||||||
|
from api.db.services import UserService
|
||||||
|
from common.constants import ActiveEnum, StatusEnum
|
||||||
|
from api.utils.crypt import decrypt
|
||||||
|
from common.misc_utils import get_uuid
|
||||||
|
from common.time_utils import current_timestamp, datetime_format, get_format_time
|
||||||
|
from common.connection_utils import sync_construct_response
|
||||||
|
from common import settings
|
||||||
|
|
||||||
|
|
||||||
|
def setup_auth(login_manager):
|
||||||
|
@login_manager.request_loader
|
||||||
|
def load_user(web_request):
|
||||||
|
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||||
|
authorization = web_request.headers.get("Authorization")
|
||||||
|
if authorization:
|
||||||
|
try:
|
||||||
|
access_token = str(jwt.loads(authorization))
|
||||||
|
|
||||||
|
if not access_token or not access_token.strip():
|
||||||
|
logging.warning("Authentication attempt with empty access token")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Access tokens should be UUIDs (32 hex characters)
|
||||||
|
if len(access_token.strip()) < 32:
|
||||||
|
logging.warning(f"Authentication attempt with invalid token format: {len(access_token)} chars")
|
||||||
|
return None
|
||||||
|
|
||||||
|
user = UserService.query(
|
||||||
|
access_token=access_token, status=StatusEnum.VALID.value
|
||||||
|
)
|
||||||
|
if user:
|
||||||
|
if not user[0].access_token or not user[0].access_token.strip():
|
||||||
|
logging.warning(f"User {user[0].email} has empty access_token in database")
|
||||||
|
return None
|
||||||
|
return user[0]
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning(f"load_user got exception {e}")
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def init_default_admin():
|
||||||
|
# Verify that at least one active admin user exists. If not, create a default one.
|
||||||
|
users = UserService.query(is_superuser=True)
|
||||||
|
if not users:
|
||||||
|
default_admin = {
|
||||||
|
"id": uuid.uuid1().hex,
|
||||||
|
"password": encode_to_base64("admin"),
|
||||||
|
"nickname": "admin",
|
||||||
|
"is_superuser": True,
|
||||||
|
"email": "admin@ragflow.io",
|
||||||
|
"creator": "system",
|
||||||
|
"status": "1",
|
||||||
|
}
|
||||||
|
if not UserService.save(**default_admin):
|
||||||
|
raise AdminException("Can't init admin.", 500)
|
||||||
|
elif not any([u.is_active == ActiveEnum.ACTIVE.value for u in users]):
|
||||||
|
raise AdminException("No active admin. Please update 'is_active' in db manually.", 500)
|
||||||
|
|
||||||
|
|
||||||
|
def check_admin_auth(func):
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
user = UserService.filter_by_id(current_user.id)
|
||||||
|
if not user:
|
||||||
|
raise UserNotFoundError(current_user.email)
|
||||||
|
if not user.is_superuser:
|
||||||
|
raise AdminException("Not admin", 403)
|
||||||
|
if user.is_active == ActiveEnum.INACTIVE.value:
|
||||||
|
raise AdminException(f"User {current_user.email} inactive", 403)
|
||||||
|
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def login_admin(email: str, password: str):
|
||||||
|
"""
|
||||||
|
:param email: admin email
|
||||||
|
:param password: string before decrypt
|
||||||
|
"""
|
||||||
|
users = UserService.query(email=email)
|
||||||
|
if not users:
|
||||||
|
raise UserNotFoundError(email)
|
||||||
|
psw = decrypt(password)
|
||||||
|
user = UserService.query_user(email, psw)
|
||||||
|
if not user:
|
||||||
|
raise AdminException("Email and password do not match!")
|
||||||
|
if not user.is_superuser:
|
||||||
|
raise AdminException("Not admin", 403)
|
||||||
|
if user.is_active == ActiveEnum.INACTIVE.value:
|
||||||
|
raise AdminException(f"User {email} inactive", 403)
|
||||||
|
|
||||||
|
resp = user.to_json()
|
||||||
|
user.access_token = get_uuid()
|
||||||
|
login_user(user)
|
||||||
|
user.update_time = (current_timestamp(),)
|
||||||
|
user.update_date = (datetime_format(datetime.now()),)
|
||||||
|
user.last_login_time = get_format_time()
|
||||||
|
user.save()
|
||||||
|
msg = "Welcome back!"
|
||||||
|
return sync_construct_response(data=resp, auth=user.get_id(), message=msg)
|
||||||
|
|
||||||
|
|
||||||
|
def check_admin(username: str, password: str):
|
||||||
|
users = UserService.query(email=username)
|
||||||
|
if not users:
|
||||||
|
logging.info(f"Username: {username} is not registered!")
|
||||||
|
user_info = {
|
||||||
|
"id": uuid.uuid1().hex,
|
||||||
|
"password": encode_to_base64("admin"),
|
||||||
|
"nickname": "admin",
|
||||||
|
"is_superuser": True,
|
||||||
|
"email": "admin@ragflow.io",
|
||||||
|
"creator": "system",
|
||||||
|
"status": "1",
|
||||||
|
}
|
||||||
|
if not UserService.save(**user_info):
|
||||||
|
raise AdminException("Can't init admin.", 500)
|
||||||
|
|
||||||
|
user = UserService.query_user(username, password)
|
||||||
|
if user:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def login_verify(f):
|
||||||
|
@wraps(f)
|
||||||
|
def decorated(*args, **kwargs):
|
||||||
|
auth = request.authorization
|
||||||
|
if not auth or 'username' not in auth.parameters or 'password' not in auth.parameters:
|
||||||
|
return jsonify({
|
||||||
|
"code": 401,
|
||||||
|
"message": "Authentication required",
|
||||||
|
"data": None
|
||||||
|
}), 200
|
||||||
|
|
||||||
|
username = auth.parameters['username']
|
||||||
|
password = auth.parameters['password']
|
||||||
|
try:
|
||||||
|
if not check_admin(username, password):
|
||||||
|
return jsonify({
|
||||||
|
"code": 500,
|
||||||
|
"message": "Access denied",
|
||||||
|
"data": None
|
||||||
|
}), 200
|
||||||
|
except Exception as e:
|
||||||
|
error_msg = str(e)
|
||||||
|
return jsonify({
|
||||||
|
"code": 500,
|
||||||
|
"message": error_msg
|
||||||
|
}), 200
|
||||||
|
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
|
||||||
|
return decorated
|
||||||
317
admin/server/config.py
Normal file
317
admin/server/config.py
Normal file
@ -0,0 +1,317 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from typing import Any
|
||||||
|
from common.config_utils import read_config
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
class BaseConfig(BaseModel):
|
||||||
|
id: int
|
||||||
|
name: str
|
||||||
|
host: str
|
||||||
|
port: int
|
||||||
|
service_type: str
|
||||||
|
detail_func_name: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
return {'id': self.id, 'name': self.name, 'host': self.host, 'port': self.port,
|
||||||
|
'service_type': self.service_type}
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceConfigs:
|
||||||
|
configs = list[BaseConfig]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.configs = []
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
|
||||||
|
|
||||||
|
SERVICE_CONFIGS = ServiceConfigs
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceType(Enum):
|
||||||
|
METADATA = "metadata"
|
||||||
|
RETRIEVAL = "retrieval"
|
||||||
|
MESSAGE_QUEUE = "message_queue"
|
||||||
|
RAGFLOW_SERVER = "ragflow_server"
|
||||||
|
TASK_EXECUTOR = "task_executor"
|
||||||
|
FILE_STORE = "file_store"
|
||||||
|
|
||||||
|
|
||||||
|
class MetaConfig(BaseConfig):
|
||||||
|
meta_type: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['meta_type'] = self.meta_type
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class MySQLConfig(MetaConfig):
|
||||||
|
username: str
|
||||||
|
password: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['username'] = self.username
|
||||||
|
extra_dict['password'] = self.password
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class PostgresConfig(MetaConfig):
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class RetrievalConfig(BaseConfig):
|
||||||
|
retrieval_type: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['retrieval_type'] = self.retrieval_type
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class InfinityConfig(RetrievalConfig):
|
||||||
|
db_name: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['db_name'] = self.db_name
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class ElasticsearchConfig(RetrievalConfig):
|
||||||
|
username: str
|
||||||
|
password: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['username'] = self.username
|
||||||
|
extra_dict['password'] = self.password
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class MessageQueueConfig(BaseConfig):
|
||||||
|
mq_type: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['mq_type'] = self.mq_type
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class RedisConfig(MessageQueueConfig):
|
||||||
|
database: int
|
||||||
|
password: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['database'] = self.database
|
||||||
|
extra_dict['password'] = self.password
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class RabbitMQConfig(MessageQueueConfig):
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class RAGFlowServerConfig(BaseConfig):
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class TaskExecutorConfig(BaseConfig):
|
||||||
|
message_queue_type: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
result['extra']['message_queue_type'] = self.message_queue_type
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class FileStoreConfig(BaseConfig):
|
||||||
|
store_type: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['store_type'] = self.store_type
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class MinioConfig(FileStoreConfig):
|
||||||
|
user: str
|
||||||
|
password: str
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
result = super().to_dict()
|
||||||
|
if 'extra' not in result:
|
||||||
|
result['extra'] = dict()
|
||||||
|
extra_dict = result['extra'].copy()
|
||||||
|
extra_dict['user'] = self.user
|
||||||
|
extra_dict['password'] = self.password
|
||||||
|
result['extra'] = extra_dict
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def load_configurations(config_path: str) -> list[BaseConfig]:
|
||||||
|
raw_configs = read_config(config_path)
|
||||||
|
configurations = []
|
||||||
|
ragflow_count = 0
|
||||||
|
id_count = 0
|
||||||
|
for k, v in raw_configs.items():
|
||||||
|
match k:
|
||||||
|
case "ragflow":
|
||||||
|
name: str = f'ragflow_{ragflow_count}'
|
||||||
|
host: str = v['host']
|
||||||
|
http_port: int = v['http_port']
|
||||||
|
config = RAGFlowServerConfig(id=id_count, name=name, host=host, port=http_port,
|
||||||
|
service_type="ragflow_server",
|
||||||
|
detail_func_name="check_ragflow_server_alive")
|
||||||
|
configurations.append(config)
|
||||||
|
id_count += 1
|
||||||
|
case "es":
|
||||||
|
name: str = 'elasticsearch'
|
||||||
|
url = v['hosts']
|
||||||
|
parsed = urlparse(url)
|
||||||
|
host: str = parsed.hostname
|
||||||
|
port: int = parsed.port
|
||||||
|
username: str = v.get('username')
|
||||||
|
password: str = v.get('password')
|
||||||
|
config = ElasticsearchConfig(id=id_count, name=name, host=host, port=port, service_type="retrieval",
|
||||||
|
retrieval_type="elasticsearch",
|
||||||
|
username=username, password=password,
|
||||||
|
detail_func_name="get_es_cluster_stats")
|
||||||
|
configurations.append(config)
|
||||||
|
id_count += 1
|
||||||
|
|
||||||
|
case "infinity":
|
||||||
|
name: str = 'infinity'
|
||||||
|
url = v['uri']
|
||||||
|
parts = url.split(':', 1)
|
||||||
|
host = parts[0]
|
||||||
|
port = int(parts[1])
|
||||||
|
database: str = v.get('db_name', 'default_db')
|
||||||
|
config = InfinityConfig(id=id_count, name=name, host=host, port=port, service_type="retrieval",
|
||||||
|
retrieval_type="infinity",
|
||||||
|
db_name=database, detail_func_name="get_infinity_status")
|
||||||
|
configurations.append(config)
|
||||||
|
id_count += 1
|
||||||
|
case "minio":
|
||||||
|
name: str = 'minio'
|
||||||
|
url = v['host']
|
||||||
|
parts = url.split(':', 1)
|
||||||
|
host = parts[0]
|
||||||
|
port = int(parts[1])
|
||||||
|
user = v.get('user')
|
||||||
|
password = v.get('password')
|
||||||
|
config = MinioConfig(id=id_count, name=name, host=host, port=port, user=user, password=password,
|
||||||
|
service_type="file_store",
|
||||||
|
store_type="minio", detail_func_name="check_minio_alive")
|
||||||
|
configurations.append(config)
|
||||||
|
id_count += 1
|
||||||
|
case "redis":
|
||||||
|
name: str = 'redis'
|
||||||
|
url = v['host']
|
||||||
|
parts = url.split(':', 1)
|
||||||
|
host = parts[0]
|
||||||
|
port = int(parts[1])
|
||||||
|
password = v.get('password')
|
||||||
|
db: int = v.get('db')
|
||||||
|
config = RedisConfig(id=id_count, name=name, host=host, port=port, password=password, database=db,
|
||||||
|
service_type="message_queue", mq_type="redis", detail_func_name="get_redis_info")
|
||||||
|
configurations.append(config)
|
||||||
|
id_count += 1
|
||||||
|
case "mysql":
|
||||||
|
name: str = 'mysql'
|
||||||
|
host: str = v.get('host')
|
||||||
|
port: int = v.get('port')
|
||||||
|
username = v.get('user')
|
||||||
|
password = v.get('password')
|
||||||
|
config = MySQLConfig(id=id_count, name=name, host=host, port=port, username=username, password=password,
|
||||||
|
service_type="meta_data", meta_type="mysql", detail_func_name="get_mysql_status")
|
||||||
|
configurations.append(config)
|
||||||
|
id_count += 1
|
||||||
|
case "admin":
|
||||||
|
pass
|
||||||
|
case "task_executor":
|
||||||
|
name: str = 'task_executor'
|
||||||
|
host: str = v.get('host', '')
|
||||||
|
port: int = v.get('port', 0)
|
||||||
|
message_queue_type: str = v.get('message_queue_type')
|
||||||
|
config = TaskExecutorConfig(id=id_count, name=name, host=host, port=port, message_queue_type=message_queue_type,
|
||||||
|
service_type="task_executor", detail_func_name="check_task_executor_alive")
|
||||||
|
configurations.append(config)
|
||||||
|
id_count += 1
|
||||||
|
case _:
|
||||||
|
logging.warning(f"Unknown configuration key: {k}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
return configurations
|
||||||
17
admin/server/exceptions.py
Normal file
17
admin/server/exceptions.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
class AdminException(Exception):
|
||||||
|
def __init__(self, message, code=400):
|
||||||
|
super().__init__(message)
|
||||||
|
self.code = code
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
class UserNotFoundError(AdminException):
|
||||||
|
def __init__(self, username):
|
||||||
|
super().__init__(f"User '{username}' not found", 404)
|
||||||
|
|
||||||
|
class UserAlreadyExistsError(AdminException):
|
||||||
|
def __init__(self, username):
|
||||||
|
super().__init__(f"User '{username}' already exists", 409)
|
||||||
|
|
||||||
|
class CannotDeleteAdminError(AdminException):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("Cannot delete admin account", 403)
|
||||||
32
admin/server/responses.py
Normal file
32
admin/server/responses.py
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from flask import jsonify
|
||||||
|
|
||||||
|
|
||||||
|
def success_response(data=None, message="Success", code=0):
|
||||||
|
return jsonify({
|
||||||
|
"code": code,
|
||||||
|
"message": message,
|
||||||
|
"data": data
|
||||||
|
}), 200
|
||||||
|
|
||||||
|
|
||||||
|
def error_response(message="Error", code=-1, data=None):
|
||||||
|
return jsonify({
|
||||||
|
"code": code,
|
||||||
|
"message": message,
|
||||||
|
"data": data
|
||||||
|
}), 400
|
||||||
76
admin/server/roles.py
Normal file
76
admin/server/roles.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
from api.common.exceptions import AdminException
|
||||||
|
|
||||||
|
|
||||||
|
class RoleMgr:
|
||||||
|
@staticmethod
|
||||||
|
def create_role(role_name: str, description: str):
|
||||||
|
error_msg = f"not implement: create role: {role_name}, description: {description}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def update_role_description(role_name: str, description: str) -> Dict[str, Any]:
|
||||||
|
error_msg = f"not implement: update role: {role_name} with description: {description}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def delete_role(role_name: str) -> Dict[str, Any]:
|
||||||
|
error_msg = f"not implement: drop role: {role_name}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def list_roles() -> Dict[str, Any]:
|
||||||
|
error_msg = "not implement: list roles"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_role_permission(role_name: str) -> Dict[str, Any]:
|
||||||
|
error_msg = f"not implement: show role {role_name}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def grant_role_permission(role_name: str, actions: list, resource: str) -> Dict[str, Any]:
|
||||||
|
error_msg = f"not implement: grant role {role_name} actions: {actions} on {resource}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def revoke_role_permission(role_name: str, actions: list, resource: str) -> Dict[str, Any]:
|
||||||
|
error_msg = f"not implement: revoke role {role_name} actions: {actions} on {resource}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def update_user_role(user_name: str, role_name: str) -> Dict[str, Any]:
|
||||||
|
error_msg = f"not implement: update user role: {user_name} to role {role_name}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_user_permission(user_name: str) -> Dict[str, Any]:
|
||||||
|
error_msg = f"not implement: get user permission: {user_name}"
|
||||||
|
logging.error(error_msg)
|
||||||
|
raise AdminException(error_msg)
|
||||||
382
admin/server/routes.py
Normal file
382
admin/server/routes.py
Normal file
@ -0,0 +1,382 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import secrets
|
||||||
|
|
||||||
|
from flask import Blueprint, request
|
||||||
|
from flask_login import current_user, login_required, logout_user
|
||||||
|
|
||||||
|
from auth import login_verify, login_admin, check_admin_auth
|
||||||
|
from responses import success_response, error_response
|
||||||
|
from services import UserMgr, ServiceMgr, UserServiceMgr
|
||||||
|
from roles import RoleMgr
|
||||||
|
from api.common.exceptions import AdminException
|
||||||
|
from common.versions import get_ragflow_version
|
||||||
|
|
||||||
|
admin_bp = Blueprint('admin', __name__, url_prefix='/api/v1/admin')
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/login', methods=['POST'])
|
||||||
|
def login():
|
||||||
|
if not request.json:
|
||||||
|
return error_response('Authorize admin failed.' ,400)
|
||||||
|
try:
|
||||||
|
email = request.json.get("email", "")
|
||||||
|
password = request.json.get("password", "")
|
||||||
|
return login_admin(email, password)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/logout', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
def logout():
|
||||||
|
try:
|
||||||
|
current_user.access_token = f"INVALID_{secrets.token_hex(16)}"
|
||||||
|
current_user.save()
|
||||||
|
logout_user()
|
||||||
|
return success_response(True)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/auth', methods=['GET'])
|
||||||
|
@login_verify
|
||||||
|
def auth_admin():
|
||||||
|
try:
|
||||||
|
return success_response(None, "Admin is authorized", 0)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def list_users():
|
||||||
|
try:
|
||||||
|
users = UserMgr.get_all_users()
|
||||||
|
return success_response(users, "Get all users", 0)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def create_user():
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
if not data or 'username' not in data or 'password' not in data:
|
||||||
|
return error_response("Username and password are required", 400)
|
||||||
|
|
||||||
|
username = data['username']
|
||||||
|
password = data['password']
|
||||||
|
role = data.get('role', 'user')
|
||||||
|
|
||||||
|
res = UserMgr.create_user(username, password, role)
|
||||||
|
if res["success"]:
|
||||||
|
user_info = res["user_info"]
|
||||||
|
user_info.pop("password") # do not return password
|
||||||
|
return success_response(user_info, "User created successfully")
|
||||||
|
else:
|
||||||
|
return error_response("create user failed")
|
||||||
|
|
||||||
|
except AdminException as e:
|
||||||
|
return error_response(e.message, e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users/<username>', methods=['DELETE'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def delete_user(username):
|
||||||
|
try:
|
||||||
|
res = UserMgr.delete_user(username)
|
||||||
|
if res["success"]:
|
||||||
|
return success_response(None, res["message"])
|
||||||
|
else:
|
||||||
|
return error_response(res["message"])
|
||||||
|
|
||||||
|
except AdminException as e:
|
||||||
|
return error_response(e.message, e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users/<username>/password', methods=['PUT'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def change_password(username):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
if not data or 'new_password' not in data:
|
||||||
|
return error_response("New password is required", 400)
|
||||||
|
|
||||||
|
new_password = data['new_password']
|
||||||
|
msg = UserMgr.update_user_password(username, new_password)
|
||||||
|
return success_response(None, msg)
|
||||||
|
|
||||||
|
except AdminException as e:
|
||||||
|
return error_response(e.message, e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users/<username>/activate', methods=['PUT'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def alter_user_activate_status(username):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
if not data or 'activate_status' not in data:
|
||||||
|
return error_response("Activation status is required", 400)
|
||||||
|
activate_status = data['activate_status']
|
||||||
|
msg = UserMgr.update_user_activate_status(username, activate_status)
|
||||||
|
return success_response(None, msg)
|
||||||
|
except AdminException as e:
|
||||||
|
return error_response(e.message, e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users/<username>', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def get_user_details(username):
|
||||||
|
try:
|
||||||
|
user_details = UserMgr.get_user_details(username)
|
||||||
|
return success_response(user_details)
|
||||||
|
|
||||||
|
except AdminException as e:
|
||||||
|
return error_response(e.message, e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users/<username>/datasets', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def get_user_datasets(username):
|
||||||
|
try:
|
||||||
|
datasets_list = UserServiceMgr.get_user_datasets(username)
|
||||||
|
return success_response(datasets_list)
|
||||||
|
|
||||||
|
except AdminException as e:
|
||||||
|
return error_response(e.message, e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users/<username>/agents', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def get_user_agents(username):
|
||||||
|
try:
|
||||||
|
agents_list = UserServiceMgr.get_user_agents(username)
|
||||||
|
return success_response(agents_list)
|
||||||
|
|
||||||
|
except AdminException as e:
|
||||||
|
return error_response(e.message, e.code)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/services', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def get_services():
|
||||||
|
try:
|
||||||
|
services = ServiceMgr.get_all_services()
|
||||||
|
return success_response(services, "Get all services", 0)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/service_types/<service_type>', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def get_services_by_type(service_type_str):
|
||||||
|
try:
|
||||||
|
services = ServiceMgr.get_services_by_type(service_type_str)
|
||||||
|
return success_response(services)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/services/<service_id>', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def get_service(service_id):
|
||||||
|
try:
|
||||||
|
services = ServiceMgr.get_service_details(service_id)
|
||||||
|
return success_response(services)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/services/<service_id>', methods=['DELETE'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def shutdown_service(service_id):
|
||||||
|
try:
|
||||||
|
services = ServiceMgr.shutdown_service(service_id)
|
||||||
|
return success_response(services)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/services/<service_id>', methods=['PUT'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def restart_service(service_id):
|
||||||
|
try:
|
||||||
|
services = ServiceMgr.restart_service(service_id)
|
||||||
|
return success_response(services)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/roles', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def create_role():
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
if not data or 'role_name' not in data:
|
||||||
|
return error_response("Role name is required", 400)
|
||||||
|
role_name: str = data['role_name']
|
||||||
|
description: str = data['description']
|
||||||
|
res = RoleMgr.create_role(role_name, description)
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/roles/<role_name>', methods=['PUT'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def update_role(role_name: str):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
if not data or 'description' not in data:
|
||||||
|
return error_response("Role description is required", 400)
|
||||||
|
description: str = data['description']
|
||||||
|
res = RoleMgr.update_role_description(role_name, description)
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/roles/<role_name>', methods=['DELETE'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def delete_role(role_name: str):
|
||||||
|
try:
|
||||||
|
res = RoleMgr.delete_role(role_name)
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/roles', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def list_roles():
|
||||||
|
try:
|
||||||
|
res = RoleMgr.list_roles()
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/roles/<role_name>/permission', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def get_role_permission(role_name: str):
|
||||||
|
try:
|
||||||
|
res = RoleMgr.get_role_permission(role_name)
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/roles/<role_name>/permission', methods=['POST'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def grant_role_permission(role_name: str):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
if not data or 'actions' not in data or 'resource' not in data:
|
||||||
|
return error_response("Permission is required", 400)
|
||||||
|
actions: list = data['actions']
|
||||||
|
resource: str = data['resource']
|
||||||
|
res = RoleMgr.grant_role_permission(role_name, actions, resource)
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/roles/<role_name>/permission', methods=['DELETE'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def revoke_role_permission(role_name: str):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
if not data or 'actions' not in data or 'resource' not in data:
|
||||||
|
return error_response("Permission is required", 400)
|
||||||
|
actions: list = data['actions']
|
||||||
|
resource: str = data['resource']
|
||||||
|
res = RoleMgr.revoke_role_permission(role_name, actions, resource)
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users/<user_name>/role', methods=['PUT'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def update_user_role(user_name: str):
|
||||||
|
try:
|
||||||
|
data = request.get_json()
|
||||||
|
if not data or 'role_name' not in data:
|
||||||
|
return error_response("Role name is required", 400)
|
||||||
|
role_name: str = data['role_name']
|
||||||
|
res = RoleMgr.update_user_role(user_name, role_name)
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_bp.route('/users/<user_name>/permission', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def get_user_permission(user_name: str):
|
||||||
|
try:
|
||||||
|
res = RoleMgr.get_user_permission(user_name)
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
|
|
||||||
|
@admin_bp.route('/version', methods=['GET'])
|
||||||
|
@login_required
|
||||||
|
@check_admin_auth
|
||||||
|
def show_version():
|
||||||
|
try:
|
||||||
|
res = {"version": get_ragflow_version()}
|
||||||
|
return success_response(res)
|
||||||
|
except Exception as e:
|
||||||
|
return error_response(str(e), 500)
|
||||||
227
admin/server/services.py
Normal file
227
admin/server/services.py
Normal file
@ -0,0 +1,227 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from werkzeug.security import check_password_hash
|
||||||
|
from common.constants import ActiveEnum
|
||||||
|
from api.db.services import UserService
|
||||||
|
from api.db.joint_services.user_account_service import create_new_user, delete_user_data
|
||||||
|
from api.db.services.canvas_service import UserCanvasService
|
||||||
|
from api.db.services.user_service import TenantService
|
||||||
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
|
from api.utils.crypt import decrypt
|
||||||
|
from api.utils import health_utils
|
||||||
|
|
||||||
|
from api.common.exceptions import AdminException, UserAlreadyExistsError, UserNotFoundError
|
||||||
|
from config import SERVICE_CONFIGS
|
||||||
|
|
||||||
|
|
||||||
|
class UserMgr:
|
||||||
|
@staticmethod
|
||||||
|
def get_all_users():
|
||||||
|
users = UserService.get_all_users()
|
||||||
|
result = []
|
||||||
|
for user in users:
|
||||||
|
result.append({
|
||||||
|
'email': user.email,
|
||||||
|
'nickname': user.nickname,
|
||||||
|
'create_date': user.create_date,
|
||||||
|
'is_active': user.is_active,
|
||||||
|
'is_superuser': user.is_superuser,
|
||||||
|
})
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_user_details(username):
|
||||||
|
# use email to query
|
||||||
|
users = UserService.query_user_by_email(username)
|
||||||
|
result = []
|
||||||
|
for user in users:
|
||||||
|
result.append({
|
||||||
|
'avatar': user.avatar,
|
||||||
|
'email': user.email,
|
||||||
|
'language': user.language,
|
||||||
|
'last_login_time': user.last_login_time,
|
||||||
|
'is_active': user.is_active,
|
||||||
|
'is_anonymous': user.is_anonymous,
|
||||||
|
'login_channel': user.login_channel,
|
||||||
|
'status': user.status,
|
||||||
|
'is_superuser': user.is_superuser,
|
||||||
|
'create_date': user.create_date,
|
||||||
|
'update_date': user.update_date
|
||||||
|
})
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_user(username, password, role="user") -> dict:
|
||||||
|
# Validate the email address
|
||||||
|
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", username):
|
||||||
|
raise AdminException(f"Invalid email address: {username}!")
|
||||||
|
# Check if the email address is already used
|
||||||
|
if UserService.query(email=username):
|
||||||
|
raise UserAlreadyExistsError(username)
|
||||||
|
# Construct user info data
|
||||||
|
user_info_dict = {
|
||||||
|
"email": username,
|
||||||
|
"nickname": "", # ask user to edit it manually in settings.
|
||||||
|
"password": decrypt(password),
|
||||||
|
"login_channel": "password",
|
||||||
|
"is_superuser": role == "admin",
|
||||||
|
}
|
||||||
|
return create_new_user(user_info_dict)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def delete_user(username):
|
||||||
|
# use email to delete
|
||||||
|
user_list = UserService.query_user_by_email(username)
|
||||||
|
if not user_list:
|
||||||
|
raise UserNotFoundError(username)
|
||||||
|
if len(user_list) > 1:
|
||||||
|
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||||
|
usr = user_list[0]
|
||||||
|
return delete_user_data(usr.id)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def update_user_password(username, new_password) -> str:
|
||||||
|
# use email to find user. check exist and unique.
|
||||||
|
user_list = UserService.query_user_by_email(username)
|
||||||
|
if not user_list:
|
||||||
|
raise UserNotFoundError(username)
|
||||||
|
elif len(user_list) > 1:
|
||||||
|
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||||
|
# check new_password different from old.
|
||||||
|
usr = user_list[0]
|
||||||
|
psw = decrypt(new_password)
|
||||||
|
if check_password_hash(usr.password, psw):
|
||||||
|
return "Same password, no need to update!"
|
||||||
|
# update password
|
||||||
|
UserService.update_user_password(usr.id, psw)
|
||||||
|
return "Password updated successfully!"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def update_user_activate_status(username, activate_status: str):
|
||||||
|
# use email to find user. check exist and unique.
|
||||||
|
user_list = UserService.query_user_by_email(username)
|
||||||
|
if not user_list:
|
||||||
|
raise UserNotFoundError(username)
|
||||||
|
elif len(user_list) > 1:
|
||||||
|
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||||
|
# check activate status different from new
|
||||||
|
usr = user_list[0]
|
||||||
|
# format activate_status before handle
|
||||||
|
_activate_status = activate_status.lower()
|
||||||
|
target_status = {
|
||||||
|
'on': ActiveEnum.ACTIVE.value,
|
||||||
|
'off': ActiveEnum.INACTIVE.value,
|
||||||
|
}.get(_activate_status)
|
||||||
|
if not target_status:
|
||||||
|
raise AdminException(f"Invalid activate_status: {activate_status}")
|
||||||
|
if target_status == usr.is_active:
|
||||||
|
return f"User activate status is already {_activate_status}!"
|
||||||
|
# update is_active
|
||||||
|
UserService.update_user(usr.id, {"is_active": target_status})
|
||||||
|
return f"Turn {_activate_status} user activate status successfully!"
|
||||||
|
|
||||||
|
|
||||||
|
class UserServiceMgr:
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_user_datasets(username):
|
||||||
|
# use email to find user.
|
||||||
|
user_list = UserService.query_user_by_email(username)
|
||||||
|
if not user_list:
|
||||||
|
raise UserNotFoundError(username)
|
||||||
|
elif len(user_list) > 1:
|
||||||
|
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||||
|
# find tenants
|
||||||
|
usr = user_list[0]
|
||||||
|
tenants = TenantService.get_joined_tenants_by_user_id(usr.id)
|
||||||
|
tenant_ids = [m["tenant_id"] for m in tenants]
|
||||||
|
# filter permitted kb and owned kb
|
||||||
|
return KnowledgebaseService.get_all_kb_by_tenant_ids(tenant_ids, usr.id)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_user_agents(username):
|
||||||
|
# use email to find user.
|
||||||
|
user_list = UserService.query_user_by_email(username)
|
||||||
|
if not user_list:
|
||||||
|
raise UserNotFoundError(username)
|
||||||
|
elif len(user_list) > 1:
|
||||||
|
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||||
|
# find tenants
|
||||||
|
usr = user_list[0]
|
||||||
|
tenants = TenantService.get_joined_tenants_by_user_id(usr.id)
|
||||||
|
tenant_ids = [m["tenant_id"] for m in tenants]
|
||||||
|
# filter permitted agents and owned agents
|
||||||
|
res = UserCanvasService.get_all_agents_by_tenant_ids(tenant_ids, usr.id)
|
||||||
|
return [{
|
||||||
|
'title': r['title'],
|
||||||
|
'permission': r['permission'],
|
||||||
|
'canvas_category': r['canvas_category'].split('_')[0],
|
||||||
|
'avatar': r['avatar']
|
||||||
|
} for r in res]
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceMgr:
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_all_services():
|
||||||
|
result = []
|
||||||
|
configs = SERVICE_CONFIGS.configs
|
||||||
|
for service_id, config in enumerate(configs):
|
||||||
|
config_dict = config.to_dict()
|
||||||
|
try:
|
||||||
|
service_detail = ServiceMgr.get_service_details(service_id)
|
||||||
|
if "status" in service_detail:
|
||||||
|
config_dict['status'] = service_detail['status']
|
||||||
|
else:
|
||||||
|
config_dict['status'] = 'timeout'
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning(f"Can't get service details, error: {e}")
|
||||||
|
config_dict['status'] = 'timeout'
|
||||||
|
if not config_dict['host']:
|
||||||
|
config_dict['host'] = '-'
|
||||||
|
if not config_dict['port']:
|
||||||
|
config_dict['port'] = '-'
|
||||||
|
result.append(config_dict)
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_services_by_type(service_type_str: str):
|
||||||
|
raise AdminException("get_services_by_type: not implemented")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_service_details(service_id: int):
|
||||||
|
service_idx = int(service_id)
|
||||||
|
configs = SERVICE_CONFIGS.configs
|
||||||
|
if service_idx < 0 or service_idx >= len(configs):
|
||||||
|
raise AdminException(f"invalid service_index: {service_idx}")
|
||||||
|
|
||||||
|
service_config = configs[service_idx]
|
||||||
|
service_info = {'name': service_config.name, 'detail_func_name': service_config.detail_func_name}
|
||||||
|
|
||||||
|
detail_func = getattr(health_utils, service_info.get('detail_func_name'))
|
||||||
|
res = detail_func()
|
||||||
|
res.update({'service_name': service_info.get('name')})
|
||||||
|
return res
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def shutdown_service(service_id: int):
|
||||||
|
raise AdminException("shutdown_service: not implemented")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def restart_service(service_id: int):
|
||||||
|
raise AdminException("restart_service: not implemented")
|
||||||
@ -14,5 +14,5 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from beartype.claw import beartype_this_package
|
# from beartype.claw import beartype_this_package
|
||||||
beartype_this_package()
|
# beartype_this_package()
|
||||||
|
|||||||
646
agent/canvas.py
646
agent/canvas.py
@ -13,9 +13,13 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import asyncio
|
||||||
import base64
|
import base64
|
||||||
|
import inspect
|
||||||
|
import binascii
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
@ -25,99 +29,68 @@ from typing import Any, Union, Tuple
|
|||||||
from agent.component import component_class
|
from agent.component import component_class
|
||||||
from agent.component.base import ComponentBase
|
from agent.component.base import ComponentBase
|
||||||
from api.db.services.file_service import FileService
|
from api.db.services.file_service import FileService
|
||||||
from api.utils import get_uuid, hash_str2int
|
from api.db.services.llm_service import LLMBundle
|
||||||
from rag.prompts.prompts import chunks_format
|
from api.db.services.task_service import has_canceled
|
||||||
|
from common.constants import LLMType
|
||||||
|
from common.misc_utils import get_uuid, hash_str2int
|
||||||
|
from common.exceptions import TaskCanceledException
|
||||||
|
from rag.prompts.generator import chunks_format
|
||||||
from rag.utils.redis_conn import REDIS_CONN
|
from rag.utils.redis_conn import REDIS_CONN
|
||||||
|
|
||||||
|
class Graph:
|
||||||
class Canvas:
|
|
||||||
"""
|
"""
|
||||||
dsl = {
|
dsl = {
|
||||||
"components": {
|
|
||||||
"begin": {
|
|
||||||
"obj":{
|
|
||||||
"component_name": "Begin",
|
|
||||||
"params": {},
|
|
||||||
},
|
|
||||||
"downstream": ["answer_0"],
|
|
||||||
"upstream": [],
|
|
||||||
},
|
|
||||||
"retrieval_0": {
|
|
||||||
"obj": {
|
|
||||||
"component_name": "Retrieval",
|
|
||||||
"params": {}
|
|
||||||
},
|
|
||||||
"downstream": ["generate_0"],
|
|
||||||
"upstream": ["answer_0"],
|
|
||||||
},
|
|
||||||
"generate_0": {
|
|
||||||
"obj": {
|
|
||||||
"component_name": "Generate",
|
|
||||||
"params": {}
|
|
||||||
},
|
|
||||||
"downstream": ["answer_0"],
|
|
||||||
"upstream": ["retrieval_0"],
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"history": [],
|
|
||||||
"path": ["begin"],
|
|
||||||
"retrieval": {"chunks": [], "doc_aggs": []},
|
|
||||||
"globals": {
|
|
||||||
"sys.query": "",
|
|
||||||
"sys.user_id": tenant_id,
|
|
||||||
"sys.conversation_turns": 0,
|
|
||||||
"sys.files": []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, dsl: str, tenant_id=None, task_id=None):
|
|
||||||
self.path = []
|
|
||||||
self.history = []
|
|
||||||
self.components = {}
|
|
||||||
self.error = ""
|
|
||||||
self.globals = {
|
|
||||||
"sys.query": "",
|
|
||||||
"sys.user_id": tenant_id,
|
|
||||||
"sys.conversation_turns": 0,
|
|
||||||
"sys.files": []
|
|
||||||
}
|
|
||||||
self.dsl = json.loads(dsl) if dsl else {
|
|
||||||
"components": {
|
"components": {
|
||||||
"begin": {
|
"begin": {
|
||||||
"obj": {
|
"obj":{
|
||||||
"component_name": "Begin",
|
"component_name": "Begin",
|
||||||
"params": {
|
"params": {},
|
||||||
"prologue": "Hi there!"
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
"downstream": [],
|
"downstream": ["answer_0"],
|
||||||
"upstream": [],
|
"upstream": [],
|
||||||
"parent_id": ""
|
},
|
||||||
|
"retrieval_0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Retrieval",
|
||||||
|
"params": {}
|
||||||
|
},
|
||||||
|
"downstream": ["generate_0"],
|
||||||
|
"upstream": ["answer_0"],
|
||||||
|
},
|
||||||
|
"generate_0": {
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Generate",
|
||||||
|
"params": {}
|
||||||
|
},
|
||||||
|
"downstream": ["answer_0"],
|
||||||
|
"upstream": ["retrieval_0"],
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"history": [],
|
"history": [],
|
||||||
"path": [],
|
"path": ["begin"],
|
||||||
"retrieval": [],
|
"retrieval": {"chunks": [], "doc_aggs": []},
|
||||||
"globals": {
|
"globals": {
|
||||||
"sys.query": "",
|
"sys.query": "",
|
||||||
"sys.user_id": "",
|
"sys.user_id": tenant_id,
|
||||||
"sys.conversation_turns": 0,
|
"sys.conversation_turns": 0,
|
||||||
"sys.files": []
|
"sys.files": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dsl: str, tenant_id=None, task_id=None):
|
||||||
|
self.path = []
|
||||||
|
self.components = {}
|
||||||
|
self.error = ""
|
||||||
|
self.dsl = json.loads(dsl)
|
||||||
self._tenant_id = tenant_id
|
self._tenant_id = tenant_id
|
||||||
self.task_id = task_id if task_id else get_uuid()
|
self.task_id = task_id if task_id else get_uuid()
|
||||||
|
self._thread_pool = ThreadPoolExecutor(max_workers=5)
|
||||||
self.load()
|
self.load()
|
||||||
|
|
||||||
def load(self):
|
def load(self):
|
||||||
self.components = self.dsl["components"]
|
self.components = self.dsl["components"]
|
||||||
cpn_nms = set([])
|
cpn_nms = set([])
|
||||||
for k, cpn in self.components.items():
|
|
||||||
cpn_nms.add(cpn["obj"]["component_name"])
|
|
||||||
|
|
||||||
assert "Begin" in cpn_nms, "There have to be an 'Begin' component."
|
|
||||||
|
|
||||||
for k, cpn in self.components.items():
|
for k, cpn in self.components.items():
|
||||||
cpn_nms.add(cpn["obj"]["component_name"])
|
cpn_nms.add(cpn["obj"]["component_name"])
|
||||||
param = component_class(cpn["obj"]["component_name"] + "Param")()
|
param = component_class(cpn["obj"]["component_name"] + "Param")()
|
||||||
@ -130,18 +103,10 @@ class Canvas:
|
|||||||
cpn["obj"] = component_class(cpn["obj"]["component_name"])(self, k, param)
|
cpn["obj"] = component_class(cpn["obj"]["component_name"])(self, k, param)
|
||||||
|
|
||||||
self.path = self.dsl["path"]
|
self.path = self.dsl["path"]
|
||||||
self.history = self.dsl["history"]
|
|
||||||
self.globals = self.dsl["globals"]
|
|
||||||
self.retrieval = self.dsl["retrieval"]
|
|
||||||
self.memory = self.dsl.get("memory", [])
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
self.dsl["path"] = self.path
|
self.dsl["path"] = self.path
|
||||||
self.dsl["history"] = self.history
|
|
||||||
self.dsl["globals"] = self.globals
|
|
||||||
self.dsl["task_id"] = self.task_id
|
self.dsl["task_id"] = self.task_id
|
||||||
self.dsl["retrieval"] = self.retrieval
|
|
||||||
self.dsl["memory"] = self.memory
|
|
||||||
dsl = {
|
dsl = {
|
||||||
"components": {}
|
"components": {}
|
||||||
}
|
}
|
||||||
@ -160,31 +125,13 @@ class Canvas:
|
|||||||
dsl["components"][k][c] = deepcopy(cpn[c])
|
dsl["components"][k][c] = deepcopy(cpn[c])
|
||||||
return json.dumps(dsl, ensure_ascii=False)
|
return json.dumps(dsl, ensure_ascii=False)
|
||||||
|
|
||||||
def reset(self, mem=False):
|
def reset(self):
|
||||||
self.path = []
|
self.path = []
|
||||||
if not mem:
|
|
||||||
self.history = []
|
|
||||||
self.retrieval = []
|
|
||||||
self.memory = []
|
|
||||||
for k, cpn in self.components.items():
|
for k, cpn in self.components.items():
|
||||||
self.components[k]["obj"].reset()
|
self.components[k]["obj"].reset()
|
||||||
|
|
||||||
for k in self.globals.keys():
|
|
||||||
if isinstance(self.globals[k], str):
|
|
||||||
self.globals[k] = ""
|
|
||||||
elif isinstance(self.globals[k], int):
|
|
||||||
self.globals[k] = 0
|
|
||||||
elif isinstance(self.globals[k], float):
|
|
||||||
self.globals[k] = 0
|
|
||||||
elif isinstance(self.globals[k], list):
|
|
||||||
self.globals[k] = []
|
|
||||||
elif isinstance(self.globals[k], dict):
|
|
||||||
self.globals[k] = {}
|
|
||||||
else:
|
|
||||||
self.globals[k] = None
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
REDIS_CONN.delete(f"{self.task_id}-logs")
|
REDIS_CONN.delete(f"{self.task_id}-logs")
|
||||||
|
REDIS_CONN.delete(f"{self.task_id}-cancel")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
|
|
||||||
@ -195,15 +142,240 @@ class Canvas:
|
|||||||
return ""
|
return ""
|
||||||
|
|
||||||
def run(self, **kwargs):
|
def run(self, **kwargs):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def get_component(self, cpn_id) -> Union[None, dict[str, Any]]:
|
||||||
|
return self.components.get(cpn_id)
|
||||||
|
|
||||||
|
def get_component_obj(self, cpn_id) -> ComponentBase:
|
||||||
|
return self.components.get(cpn_id)["obj"]
|
||||||
|
|
||||||
|
def get_component_type(self, cpn_id) -> str:
|
||||||
|
return self.components.get(cpn_id)["obj"].component_name
|
||||||
|
|
||||||
|
def get_component_input_form(self, cpn_id) -> dict:
|
||||||
|
return self.components.get(cpn_id)["obj"].get_input_form()
|
||||||
|
|
||||||
|
def get_tenant_id(self):
|
||||||
|
return self._tenant_id
|
||||||
|
|
||||||
|
def get_value_with_variable(self,value: str) -> Any:
|
||||||
|
pat = re.compile(r"\{* *\{([a-zA-Z:0-9]+@[A-Za-z0-9_.]+|sys\.[A-Za-z0-9_.]+|env\.[A-Za-z0-9_.]+)\} *\}*")
|
||||||
|
out_parts = []
|
||||||
|
last = 0
|
||||||
|
|
||||||
|
for m in pat.finditer(value):
|
||||||
|
out_parts.append(value[last:m.start()])
|
||||||
|
key = m.group(1)
|
||||||
|
v = self.get_variable_value(key)
|
||||||
|
if v is None:
|
||||||
|
rep = ""
|
||||||
|
elif isinstance(v, partial):
|
||||||
|
buf = []
|
||||||
|
for chunk in v():
|
||||||
|
buf.append(chunk)
|
||||||
|
rep = "".join(buf)
|
||||||
|
elif isinstance(v, str):
|
||||||
|
rep = v
|
||||||
|
else:
|
||||||
|
rep = json.dumps(v, ensure_ascii=False)
|
||||||
|
|
||||||
|
out_parts.append(rep)
|
||||||
|
last = m.end()
|
||||||
|
|
||||||
|
out_parts.append(value[last:])
|
||||||
|
return("".join(out_parts))
|
||||||
|
|
||||||
|
def get_variable_value(self, exp: str) -> Any:
|
||||||
|
exp = exp.strip("{").strip("}").strip(" ").strip("{").strip("}")
|
||||||
|
if exp.find("@") < 0:
|
||||||
|
return self.globals[exp]
|
||||||
|
cpn_id, var_nm = exp.split("@")
|
||||||
|
cpn = self.get_component(cpn_id)
|
||||||
|
if not cpn:
|
||||||
|
raise Exception(f"Can't find variable: '{cpn_id}@{var_nm}'")
|
||||||
|
parts = var_nm.split(".", 1)
|
||||||
|
root_key = parts[0]
|
||||||
|
rest = parts[1] if len(parts) > 1 else ""
|
||||||
|
root_val = cpn["obj"].output(root_key)
|
||||||
|
|
||||||
|
if not rest:
|
||||||
|
return root_val
|
||||||
|
return self.get_variable_param_value(root_val,rest)
|
||||||
|
|
||||||
|
def get_variable_param_value(self, obj: Any, path: str) -> Any:
|
||||||
|
cur = obj
|
||||||
|
if not path:
|
||||||
|
return cur
|
||||||
|
for key in path.split('.'):
|
||||||
|
if cur is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if isinstance(cur, str):
|
||||||
|
try:
|
||||||
|
cur = json.loads(cur)
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if isinstance(cur, dict):
|
||||||
|
cur = cur.get(key)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if isinstance(cur, (list, tuple)):
|
||||||
|
try:
|
||||||
|
idx = int(key)
|
||||||
|
cur = cur[idx]
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
continue
|
||||||
|
|
||||||
|
cur = getattr(cur, key, None)
|
||||||
|
return cur
|
||||||
|
|
||||||
|
def set_variable_value(self, exp: str,value):
|
||||||
|
exp = exp.strip("{").strip("}").strip(" ").strip("{").strip("}")
|
||||||
|
if exp.find("@") < 0:
|
||||||
|
self.globals[exp] = value
|
||||||
|
return
|
||||||
|
cpn_id, var_nm = exp.split("@")
|
||||||
|
cpn = self.get_component(cpn_id)
|
||||||
|
if not cpn:
|
||||||
|
raise Exception(f"Can't find variable: '{cpn_id}@{var_nm}'")
|
||||||
|
parts = var_nm.split(".", 1)
|
||||||
|
root_key = parts[0]
|
||||||
|
rest = parts[1] if len(parts) > 1 else ""
|
||||||
|
if not rest:
|
||||||
|
cpn["obj"].set_output(root_key, value)
|
||||||
|
return
|
||||||
|
root_val = cpn["obj"].output(root_key)
|
||||||
|
if not root_val:
|
||||||
|
root_val = {}
|
||||||
|
cpn["obj"].set_output(root_key, self.set_variable_param_value(root_val,rest,value))
|
||||||
|
|
||||||
|
def set_variable_param_value(self, obj: Any, path: str, value) -> Any:
|
||||||
|
cur = obj
|
||||||
|
keys = path.split('.')
|
||||||
|
if not path:
|
||||||
|
return value
|
||||||
|
for key in keys:
|
||||||
|
if key not in cur or not isinstance(cur[key], dict):
|
||||||
|
cur[key] = {}
|
||||||
|
cur = cur[key]
|
||||||
|
cur[keys[-1]] = value
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def is_canceled(self) -> bool:
|
||||||
|
return has_canceled(self.task_id)
|
||||||
|
|
||||||
|
def cancel_task(self) -> bool:
|
||||||
|
try:
|
||||||
|
REDIS_CONN.set(f"{self.task_id}-cancel", "x")
|
||||||
|
except Exception as e:
|
||||||
|
logging.exception(e)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class Canvas(Graph):
|
||||||
|
|
||||||
|
def __init__(self, dsl: str, tenant_id=None, task_id=None):
|
||||||
|
self.globals = {
|
||||||
|
"sys.query": "",
|
||||||
|
"sys.user_id": tenant_id,
|
||||||
|
"sys.conversation_turns": 0,
|
||||||
|
"sys.files": []
|
||||||
|
}
|
||||||
|
self.variables = {}
|
||||||
|
super().__init__(dsl, tenant_id, task_id)
|
||||||
|
|
||||||
|
def load(self):
|
||||||
|
super().load()
|
||||||
|
self.history = self.dsl["history"]
|
||||||
|
if "globals" in self.dsl:
|
||||||
|
self.globals = self.dsl["globals"]
|
||||||
|
else:
|
||||||
|
self.globals = {
|
||||||
|
"sys.query": "",
|
||||||
|
"sys.user_id": "",
|
||||||
|
"sys.conversation_turns": 0,
|
||||||
|
"sys.files": []
|
||||||
|
}
|
||||||
|
if "variables" in self.dsl:
|
||||||
|
self.variables = self.dsl["variables"]
|
||||||
|
else:
|
||||||
|
self.variables = {}
|
||||||
|
|
||||||
|
self.retrieval = self.dsl["retrieval"]
|
||||||
|
self.memory = self.dsl.get("memory", [])
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
self.dsl["history"] = self.history
|
||||||
|
self.dsl["retrieval"] = self.retrieval
|
||||||
|
self.dsl["memory"] = self.memory
|
||||||
|
return super().__str__()
|
||||||
|
|
||||||
|
def reset(self, mem=False):
|
||||||
|
super().reset()
|
||||||
|
if not mem:
|
||||||
|
self.history = []
|
||||||
|
self.retrieval = []
|
||||||
|
self.memory = []
|
||||||
|
print(self.variables)
|
||||||
|
for k in self.globals.keys():
|
||||||
|
if k.startswith("sys."):
|
||||||
|
if isinstance(self.globals[k], str):
|
||||||
|
self.globals[k] = ""
|
||||||
|
elif isinstance(self.globals[k], int):
|
||||||
|
self.globals[k] = 0
|
||||||
|
elif isinstance(self.globals[k], float):
|
||||||
|
self.globals[k] = 0
|
||||||
|
elif isinstance(self.globals[k], list):
|
||||||
|
self.globals[k] = []
|
||||||
|
elif isinstance(self.globals[k], dict):
|
||||||
|
self.globals[k] = {}
|
||||||
|
else:
|
||||||
|
self.globals[k] = None
|
||||||
|
if k.startswith("env."):
|
||||||
|
key = k[4:]
|
||||||
|
if key in self.variables:
|
||||||
|
variable = self.variables[key]
|
||||||
|
if variable["value"]:
|
||||||
|
self.globals[k] = variable["value"]
|
||||||
|
else:
|
||||||
|
if variable["type"] == "string":
|
||||||
|
self.globals[k] = ""
|
||||||
|
elif variable["type"] == "number":
|
||||||
|
self.globals[k] = 0
|
||||||
|
elif variable["type"] == "boolean":
|
||||||
|
self.globals[k] = False
|
||||||
|
elif variable["type"] == "object":
|
||||||
|
self.globals[k] = {}
|
||||||
|
elif variable["type"].startswith("array"):
|
||||||
|
self.globals[k] = []
|
||||||
|
else:
|
||||||
|
self.globals[k] = ""
|
||||||
|
else:
|
||||||
|
self.globals[k] = ""
|
||||||
|
|
||||||
|
async def run(self, **kwargs):
|
||||||
st = time.perf_counter()
|
st = time.perf_counter()
|
||||||
|
self._loop = asyncio.get_running_loop()
|
||||||
self.message_id = get_uuid()
|
self.message_id = get_uuid()
|
||||||
created_at = int(time.time())
|
created_at = int(time.time())
|
||||||
self.add_user_input(kwargs.get("query"))
|
self.add_user_input(kwargs.get("query"))
|
||||||
|
for k, cpn in self.components.items():
|
||||||
|
self.components[k]["obj"].reset(True)
|
||||||
|
|
||||||
|
if kwargs.get("webhook_payload"):
|
||||||
|
for k, cpn in self.components.items():
|
||||||
|
if self.components[k]["obj"].component_name.lower() == "webhook":
|
||||||
|
for kk, vv in kwargs["webhook_payload"].items():
|
||||||
|
self.components[k]["obj"].set_output(kk, vv)
|
||||||
|
|
||||||
for k in kwargs.keys():
|
for k in kwargs.keys():
|
||||||
if k in ["query", "user_id", "files"] and kwargs[k]:
|
if k in ["query", "user_id", "files"] and kwargs[k]:
|
||||||
if k == "files":
|
if k == "files":
|
||||||
self.globals[f"sys.{k}"] = self.get_files(kwargs[k])
|
self.globals[f"sys.{k}"] = await self.get_files_async(kwargs[k])
|
||||||
else:
|
else:
|
||||||
self.globals[f"sys.{k}"] = kwargs[k]
|
self.globals[f"sys.{k}"] = kwargs[k]
|
||||||
if not self.globals["sys.conversation_turns"] :
|
if not self.globals["sys.conversation_turns"] :
|
||||||
@ -225,20 +397,58 @@ class Canvas:
|
|||||||
self.path.append("begin")
|
self.path.append("begin")
|
||||||
self.retrieval.append({"chunks": [], "doc_aggs": []})
|
self.retrieval.append({"chunks": [], "doc_aggs": []})
|
||||||
|
|
||||||
|
if self.is_canceled():
|
||||||
|
msg = f"Task {self.task_id} has been canceled before starting."
|
||||||
|
logging.info(msg)
|
||||||
|
raise TaskCanceledException(msg)
|
||||||
|
|
||||||
yield decorate("workflow_started", {"inputs": kwargs.get("inputs")})
|
yield decorate("workflow_started", {"inputs": kwargs.get("inputs")})
|
||||||
self.retrieval.append({"chunks": {}, "doc_aggs": {}})
|
self.retrieval.append({"chunks": {}, "doc_aggs": {}})
|
||||||
|
|
||||||
def _run_batch(f, t):
|
async def _run_batch(f, t):
|
||||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
if self.is_canceled():
|
||||||
thr = []
|
msg = f"Task {self.task_id} has been canceled during batch execution."
|
||||||
for i in range(f, t):
|
logging.info(msg)
|
||||||
cpn = self.get_component_obj(self.path[i])
|
raise TaskCanceledException(msg)
|
||||||
if cpn.component_name.lower() in ["begin", "userfillup"]:
|
|
||||||
thr.append(executor.submit(cpn.invoke, inputs=kwargs.get("inputs", {})))
|
loop = asyncio.get_running_loop()
|
||||||
|
tasks = []
|
||||||
|
|
||||||
|
def _run_async_in_thread(coro_func, **call_kwargs):
|
||||||
|
return asyncio.run(coro_func(**call_kwargs))
|
||||||
|
|
||||||
|
i = f
|
||||||
|
while i < t:
|
||||||
|
cpn = self.get_component_obj(self.path[i])
|
||||||
|
task_fn = None
|
||||||
|
call_kwargs = None
|
||||||
|
|
||||||
|
if cpn.component_name.lower() in ["begin", "userfillup"]:
|
||||||
|
call_kwargs = {"inputs": kwargs.get("inputs", {})}
|
||||||
|
task_fn = cpn.invoke
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
|
for _, ele in cpn.get_input_elements().items():
|
||||||
|
if isinstance(ele, dict) and ele.get("_cpn_id") and ele.get("_cpn_id") not in self.path[:i] and self.path[0].lower().find("userfillup") < 0:
|
||||||
|
self.path.pop(i)
|
||||||
|
t -= 1
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
thr.append(executor.submit(cpn.invoke, **cpn.get_input()))
|
call_kwargs = cpn.get_input()
|
||||||
for t in thr:
|
task_fn = cpn.invoke
|
||||||
t.result()
|
i += 1
|
||||||
|
|
||||||
|
if task_fn is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
invoke_async = getattr(cpn, "invoke_async", None)
|
||||||
|
if invoke_async and asyncio.iscoroutinefunction(invoke_async):
|
||||||
|
tasks.append(loop.run_in_executor(self._thread_pool, partial(_run_async_in_thread, invoke_async, **(call_kwargs or {}))))
|
||||||
|
else:
|
||||||
|
tasks.append(loop.run_in_executor(self._thread_pool, partial(task_fn, **(call_kwargs or {}))))
|
||||||
|
|
||||||
|
if tasks:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
def _node_finished(cpn_obj):
|
def _node_finished(cpn_obj):
|
||||||
return decorate("node_finished",{
|
return decorate("node_finished",{
|
||||||
@ -255,6 +465,7 @@ class Canvas:
|
|||||||
self.error = ""
|
self.error = ""
|
||||||
idx = len(self.path) - 1
|
idx = len(self.path) - 1
|
||||||
partials = []
|
partials = []
|
||||||
|
tts_mdl = None
|
||||||
while idx < len(self.path):
|
while idx < len(self.path):
|
||||||
to = len(self.path)
|
to = len(self.path)
|
||||||
for i in range(idx, to):
|
for i in range(idx, to):
|
||||||
@ -265,29 +476,70 @@ class Canvas:
|
|||||||
"component_type": self.get_component_type(self.path[i]),
|
"component_type": self.get_component_type(self.path[i]),
|
||||||
"thoughts": self.get_component_thoughts(self.path[i])
|
"thoughts": self.get_component_thoughts(self.path[i])
|
||||||
})
|
})
|
||||||
_run_batch(idx, to)
|
await _run_batch(idx, to)
|
||||||
|
to = len(self.path)
|
||||||
# post processing of components invocation
|
# post-processing of components invocation
|
||||||
for i in range(idx, to):
|
for i in range(idx, to):
|
||||||
cpn = self.get_component(self.path[i])
|
cpn = self.get_component(self.path[i])
|
||||||
cpn_obj = self.get_component_obj(self.path[i])
|
cpn_obj = self.get_component_obj(self.path[i])
|
||||||
if cpn_obj.component_name.lower() == "message":
|
if cpn_obj.component_name.lower() == "message":
|
||||||
|
if cpn_obj.get_param("auto_play"):
|
||||||
|
tts_mdl = LLMBundle(self._tenant_id, LLMType.TTS)
|
||||||
if isinstance(cpn_obj.output("content"), partial):
|
if isinstance(cpn_obj.output("content"), partial):
|
||||||
_m = ""
|
_m = ""
|
||||||
for m in cpn_obj.output("content")():
|
buff_m = ""
|
||||||
|
stream = cpn_obj.output("content")()
|
||||||
|
async def _process_stream(m):
|
||||||
|
nonlocal buff_m, _m, tts_mdl
|
||||||
if not m:
|
if not m:
|
||||||
continue
|
return
|
||||||
if m == "<think>":
|
if m == "<think>":
|
||||||
yield decorate("message", {"content": "", "start_to_think": True})
|
return decorate("message", {"content": "", "start_to_think": True})
|
||||||
|
|
||||||
elif m == "</think>":
|
elif m == "</think>":
|
||||||
yield decorate("message", {"content": "", "end_to_think": True})
|
return decorate("message", {"content": "", "end_to_think": True})
|
||||||
else:
|
|
||||||
yield decorate("message", {"content": m})
|
buff_m += m
|
||||||
_m += m
|
_m += m
|
||||||
|
|
||||||
|
if len(buff_m) > 16:
|
||||||
|
ev = decorate(
|
||||||
|
"message",
|
||||||
|
{
|
||||||
|
"content": m,
|
||||||
|
"audio_binary": self.tts(tts_mdl, buff_m)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
buff_m = ""
|
||||||
|
return ev
|
||||||
|
|
||||||
|
return decorate("message", {"content": m})
|
||||||
|
|
||||||
|
if inspect.isasyncgen(stream):
|
||||||
|
async for m in stream:
|
||||||
|
ev= await _process_stream(m)
|
||||||
|
if ev:
|
||||||
|
yield ev
|
||||||
|
else:
|
||||||
|
for m in stream:
|
||||||
|
ev= await _process_stream(m)
|
||||||
|
if ev:
|
||||||
|
yield ev
|
||||||
|
if buff_m:
|
||||||
|
yield decorate("message", {"content": "", "audio_binary": self.tts(tts_mdl, buff_m)})
|
||||||
|
buff_m = ""
|
||||||
cpn_obj.set_output("content", _m)
|
cpn_obj.set_output("content", _m)
|
||||||
|
cite = re.search(r"\[ID:[ 0-9]+\]", _m)
|
||||||
else:
|
else:
|
||||||
yield decorate("message", {"content": cpn_obj.output("content")})
|
yield decorate("message", {"content": cpn_obj.output("content")})
|
||||||
yield decorate("message_end", {"reference": self.get_reference()})
|
cite = re.search(r"\[ID:[ 0-9]+\]", cpn_obj.output("content"))
|
||||||
|
|
||||||
|
message_end = {}
|
||||||
|
if isinstance(cpn_obj.output("attachment"), dict):
|
||||||
|
message_end["attachment"] = cpn_obj.output("attachment")
|
||||||
|
if cite:
|
||||||
|
message_end["reference"] = self.get_reference()
|
||||||
|
yield decorate("message_end", message_end)
|
||||||
|
|
||||||
while partials:
|
while partials:
|
||||||
_cpn_obj = self.get_component_obj(partials[0])
|
_cpn_obj = self.get_component_obj(partials[0])
|
||||||
@ -308,7 +560,7 @@ class Canvas:
|
|||||||
else:
|
else:
|
||||||
self.error = cpn_obj.error()
|
self.error = cpn_obj.error()
|
||||||
|
|
||||||
if cpn_obj.component_name.lower() != "iteration":
|
if cpn_obj.component_name.lower() not in ("iteration","loop"):
|
||||||
if isinstance(cpn_obj.output("content"), partial):
|
if isinstance(cpn_obj.output("content"), partial):
|
||||||
if self.error:
|
if self.error:
|
||||||
cpn_obj.set_output("content", None)
|
cpn_obj.set_output("content", None)
|
||||||
@ -333,14 +585,16 @@ class Canvas:
|
|||||||
for cpn_id in cpn_ids:
|
for cpn_id in cpn_ids:
|
||||||
_append_path(cpn_id)
|
_append_path(cpn_id)
|
||||||
|
|
||||||
if cpn_obj.component_name.lower() == "iterationitem" and cpn_obj.end():
|
if cpn_obj.component_name.lower() in ("iterationitem","loopitem") and cpn_obj.end():
|
||||||
iter = cpn_obj.get_parent()
|
iter = cpn_obj.get_parent()
|
||||||
yield _node_finished(iter)
|
yield _node_finished(iter)
|
||||||
_extend_path(self.get_component(cpn["parent_id"])["downstream"])
|
_extend_path(self.get_component(cpn["parent_id"])["downstream"])
|
||||||
elif cpn_obj.component_name.lower() in ["categorize", "switch"]:
|
elif cpn_obj.component_name.lower() in ["categorize", "switch"]:
|
||||||
_extend_path(cpn_obj.output("_next"))
|
_extend_path(cpn_obj.output("_next"))
|
||||||
elif cpn_obj.component_name.lower() == "iteration":
|
elif cpn_obj.component_name.lower() in ("iteration", "loop"):
|
||||||
_append_path(cpn_obj.get_start())
|
_append_path(cpn_obj.get_start())
|
||||||
|
elif cpn_obj.component_name.lower() == "exitloop" and cpn_obj.get_parent().component_name.lower() == "loop":
|
||||||
|
_extend_path(self.get_component(cpn["parent_id"])["downstream"])
|
||||||
elif not cpn["downstream"] and cpn_obj.get_parent():
|
elif not cpn["downstream"] and cpn_obj.get_parent():
|
||||||
_append_path(cpn_obj.get_parent().get_start())
|
_append_path(cpn_obj.get_parent().get_start())
|
||||||
else:
|
else:
|
||||||
@ -359,13 +613,13 @@ class Canvas:
|
|||||||
for c in path:
|
for c in path:
|
||||||
o = self.get_component_obj(c)
|
o = self.get_component_obj(c)
|
||||||
if o.component_name.lower() == "userfillup":
|
if o.component_name.lower() == "userfillup":
|
||||||
|
o.invoke()
|
||||||
another_inputs.update(o.get_input_elements())
|
another_inputs.update(o.get_input_elements())
|
||||||
if o.get_param("enable_tips"):
|
if o.get_param("enable_tips"):
|
||||||
tips = o.get_param("tips")
|
tips = o.output("tips")
|
||||||
self.path = path
|
self.path = path
|
||||||
yield decorate("user_inputs", {"inputs": another_inputs, "tips": tips})
|
yield decorate("user_inputs", {"inputs": another_inputs, "tips": tips})
|
||||||
return
|
return
|
||||||
|
|
||||||
self.path = self.path[:idx]
|
self.path = self.path[:idx]
|
||||||
if not self.error:
|
if not self.error:
|
||||||
yield decorate("workflow_finished",
|
yield decorate("workflow_finished",
|
||||||
@ -376,18 +630,14 @@ class Canvas:
|
|||||||
"created_at": st,
|
"created_at": st,
|
||||||
})
|
})
|
||||||
self.history.append(("assistant", self.get_component_obj(self.path[-1]).output()))
|
self.history.append(("assistant", self.get_component_obj(self.path[-1]).output()))
|
||||||
|
elif "Task has been canceled" in self.error:
|
||||||
def get_component(self, cpn_id) -> Union[None, dict[str, Any]]:
|
yield decorate("workflow_finished",
|
||||||
return self.components.get(cpn_id)
|
{
|
||||||
|
"inputs": kwargs.get("inputs"),
|
||||||
def get_component_obj(self, cpn_id) -> ComponentBase:
|
"outputs": "Task has been canceled",
|
||||||
return self.components.get(cpn_id)["obj"]
|
"elapsed_time": time.perf_counter() - st,
|
||||||
|
"created_at": st,
|
||||||
def get_component_type(self, cpn_id) -> str:
|
})
|
||||||
return self.components.get(cpn_id)["obj"].component_name
|
|
||||||
|
|
||||||
def get_component_input_form(self, cpn_id) -> dict:
|
|
||||||
return self.components.get(cpn_id)["obj"].get_input_form()
|
|
||||||
|
|
||||||
def is_reff(self, exp: str) -> bool:
|
def is_reff(self, exp: str) -> bool:
|
||||||
exp = exp.strip("{").strip("}")
|
exp = exp.strip("{").strip("}")
|
||||||
@ -400,24 +650,55 @@ class Canvas:
|
|||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def get_variable_value(self, exp: str) -> Any:
|
|
||||||
exp = exp.strip("{").strip("}").strip(" ").strip("{").strip("}")
|
|
||||||
if exp.find("@") < 0:
|
|
||||||
return self.globals[exp]
|
|
||||||
cpn_id, var_nm = exp.split("@")
|
|
||||||
cpn = self.get_component(cpn_id)
|
|
||||||
if not cpn:
|
|
||||||
raise Exception(f"Can't find variable: '{cpn_id}@{var_nm}'")
|
|
||||||
return cpn["obj"].output(var_nm)
|
|
||||||
|
|
||||||
def get_tenant_id(self):
|
def tts(self,tts_mdl, text):
|
||||||
return self._tenant_id
|
def clean_tts_text(text: str) -> str:
|
||||||
|
if not text:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
text = text.encode("utf-8", "ignore").decode("utf-8", "ignore")
|
||||||
|
|
||||||
|
text = re.sub(r"[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]", "", text)
|
||||||
|
|
||||||
|
emoji_pattern = re.compile(
|
||||||
|
"[\U0001F600-\U0001F64F"
|
||||||
|
"\U0001F300-\U0001F5FF"
|
||||||
|
"\U0001F680-\U0001F6FF"
|
||||||
|
"\U0001F1E0-\U0001F1FF"
|
||||||
|
"\U00002700-\U000027BF"
|
||||||
|
"\U0001F900-\U0001F9FF"
|
||||||
|
"\U0001FA70-\U0001FAFF"
|
||||||
|
"\U0001FAD0-\U0001FAFF]+",
|
||||||
|
flags=re.UNICODE
|
||||||
|
)
|
||||||
|
text = emoji_pattern.sub("", text)
|
||||||
|
|
||||||
|
text = re.sub(r"\s+", " ", text).strip()
|
||||||
|
|
||||||
|
MAX_LEN = 500
|
||||||
|
if len(text) > MAX_LEN:
|
||||||
|
text = text[:MAX_LEN]
|
||||||
|
|
||||||
|
return text
|
||||||
|
if not tts_mdl or not text:
|
||||||
|
return None
|
||||||
|
text = clean_tts_text(text)
|
||||||
|
if not text:
|
||||||
|
return None
|
||||||
|
bin = b""
|
||||||
|
try:
|
||||||
|
for chunk in tts_mdl.tts(text):
|
||||||
|
bin += chunk
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"TTS failed: {e}, text={text!r}")
|
||||||
|
return None
|
||||||
|
return binascii.hexlify(bin).decode("utf-8")
|
||||||
|
|
||||||
def get_history(self, window_size):
|
def get_history(self, window_size):
|
||||||
convs = []
|
convs = []
|
||||||
if window_size <= 0:
|
if window_size <= 0:
|
||||||
return convs
|
return convs
|
||||||
for role, obj in self.history[window_size * -1:]:
|
for role, obj in self.history[window_size * -2:]:
|
||||||
if isinstance(obj, dict):
|
if isinstance(obj, dict):
|
||||||
convs.append({"role": role, "content": obj.get("content", "")})
|
convs.append({"role": role, "content": obj.get("content", "")})
|
||||||
else:
|
else:
|
||||||
@ -427,39 +708,12 @@ class Canvas:
|
|||||||
def add_user_input(self, question):
|
def add_user_input(self, question):
|
||||||
self.history.append(("user", question))
|
self.history.append(("user", question))
|
||||||
|
|
||||||
def _find_loop(self, max_loops=6):
|
|
||||||
path = self.path[-1][::-1]
|
|
||||||
if len(path) < 2:
|
|
||||||
return False
|
|
||||||
|
|
||||||
for i in range(len(path)):
|
|
||||||
if path[i].lower().find("answer") == 0 or path[i].lower().find("iterationitem") == 0:
|
|
||||||
path = path[:i]
|
|
||||||
break
|
|
||||||
|
|
||||||
if len(path) < 2:
|
|
||||||
return False
|
|
||||||
|
|
||||||
for loc in range(2, len(path) // 2):
|
|
||||||
pat = ",".join(path[0:loc])
|
|
||||||
path_str = ",".join(path)
|
|
||||||
if len(pat) >= len(path_str):
|
|
||||||
return False
|
|
||||||
loop = max_loops
|
|
||||||
while path_str.find(pat) == 0 and loop >= 0:
|
|
||||||
loop -= 1
|
|
||||||
if len(pat)+1 >= len(path_str):
|
|
||||||
return False
|
|
||||||
path_str = path_str[len(pat)+1:]
|
|
||||||
if loop < 0:
|
|
||||||
pat = " => ".join([p.split(":")[0] for p in path[0:loc]])
|
|
||||||
return pat + " => " + pat
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_prologue(self):
|
def get_prologue(self):
|
||||||
return self.components["begin"]["obj"]._param.prologue
|
return self.components["begin"]["obj"]._param.prologue
|
||||||
|
|
||||||
|
def get_mode(self):
|
||||||
|
return self.components["begin"]["obj"]._param.mode
|
||||||
|
|
||||||
def set_global_param(self, **kwargs):
|
def set_global_param(self, **kwargs):
|
||||||
self.globals.update(kwargs)
|
self.globals.update(kwargs)
|
||||||
|
|
||||||
@ -469,22 +723,32 @@ class Canvas:
|
|||||||
def get_component_input_elements(self, cpnnm):
|
def get_component_input_elements(self, cpnnm):
|
||||||
return self.components[cpnnm]["obj"].get_input_elements()
|
return self.components[cpnnm]["obj"].get_input_elements()
|
||||||
|
|
||||||
def get_files(self, files: Union[None, list[dict]]) -> list[str]:
|
async def get_files_async(self, files: Union[None, list[dict]]) -> list[str]:
|
||||||
if not files:
|
if not files:
|
||||||
return []
|
return []
|
||||||
def image_to_base64(file):
|
def image_to_base64(file):
|
||||||
return "data:{};base64,{}".format(file["mime_type"],
|
return "data:{};base64,{}".format(file["mime_type"],
|
||||||
base64.b64encode(FileService.get_blob(file["created_by"], file["id"])).decode("utf-8"))
|
base64.b64encode(FileService.get_blob(file["created_by"], file["id"])).decode("utf-8"))
|
||||||
exe = ThreadPoolExecutor(max_workers=5)
|
loop = asyncio.get_running_loop()
|
||||||
threads = []
|
tasks = []
|
||||||
for file in files:
|
for file in files:
|
||||||
if file["mime_type"].find("image") >=0:
|
if file["mime_type"].find("image") >=0:
|
||||||
threads.append(exe.submit(image_to_base64, file))
|
tasks.append(loop.run_in_executor(self._thread_pool, image_to_base64, file))
|
||||||
continue
|
continue
|
||||||
threads.append(exe.submit(FileService.parse, file["name"], FileService.get_blob(file["created_by"], file["id"]), True, file["created_by"]))
|
tasks.append(loop.run_in_executor(self._thread_pool, FileService.parse, file["name"], FileService.get_blob(file["created_by"], file["id"]), True, file["created_by"]))
|
||||||
return [th.result() for th in threads]
|
return await asyncio.gather(*tasks)
|
||||||
|
|
||||||
def tool_use_callback(self, agent_id: str, func_name: str, params: dict, result: Any):
|
def get_files(self, files: Union[None, list[dict]]) -> list[str]:
|
||||||
|
"""
|
||||||
|
Synchronous wrapper for get_files_async, used by sync component invoke paths.
|
||||||
|
"""
|
||||||
|
loop = getattr(self, "_loop", None)
|
||||||
|
if loop and loop.is_running():
|
||||||
|
return asyncio.run_coroutine_threadsafe(self.get_files_async(files), loop).result()
|
||||||
|
|
||||||
|
return asyncio.run(self.get_files_async(files))
|
||||||
|
|
||||||
|
def tool_use_callback(self, agent_id: str, func_name: str, params: dict, result: Any, elapsed_time=None):
|
||||||
agent_ids = agent_id.split("-->")
|
agent_ids = agent_id.split("-->")
|
||||||
agent_name = self.get_component_name(agent_ids[0])
|
agent_name = self.get_component_name(agent_ids[0])
|
||||||
path = agent_name if len(agent_ids) < 2 else agent_name+"-->"+"-->".join(agent_ids[1:])
|
path = agent_name if len(agent_ids) < 2 else agent_name+"-->"+"-->".join(agent_ids[1:])
|
||||||
@ -493,28 +757,29 @@ class Canvas:
|
|||||||
if bin:
|
if bin:
|
||||||
obj = json.loads(bin.encode("utf-8"))
|
obj = json.loads(bin.encode("utf-8"))
|
||||||
if obj[-1]["component_id"] == agent_ids[0]:
|
if obj[-1]["component_id"] == agent_ids[0]:
|
||||||
obj[-1]["trace"].append({"path": path, "tool_name": func_name, "arguments": params, "result": result})
|
obj[-1]["trace"].append({"path": path, "tool_name": func_name, "arguments": params, "result": result, "elapsed_time": elapsed_time})
|
||||||
else:
|
else:
|
||||||
obj.append({
|
obj.append({
|
||||||
"component_id": agent_ids[0],
|
"component_id": agent_ids[0],
|
||||||
"trace": [{"path": path, "tool_name": func_name, "arguments": params, "result": result}]
|
"trace": [{"path": path, "tool_name": func_name, "arguments": params, "result": result, "elapsed_time": elapsed_time}]
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
obj = [{
|
obj = [{
|
||||||
"component_id": agent_ids[0],
|
"component_id": agent_ids[0],
|
||||||
"trace": [{"path": path, "tool_name": func_name, "arguments": params, "result": result}]
|
"trace": [{"path": path, "tool_name": func_name, "arguments": params, "result": result, "elapsed_time": elapsed_time}]
|
||||||
}]
|
}]
|
||||||
REDIS_CONN.set_obj(f"{self.task_id}-{self.message_id}-logs", obj, 60*10)
|
REDIS_CONN.set_obj(f"{self.task_id}-{self.message_id}-logs", obj, 60*10)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
|
|
||||||
def add_refernce(self, chunks: list[object], doc_infos: list[object]):
|
def add_reference(self, chunks: list[object], doc_infos: list[object]):
|
||||||
if not self.retrieval:
|
if not self.retrieval:
|
||||||
self.retrieval = [{"chunks": {}, "doc_aggs": {}}]
|
self.retrieval = [{"chunks": {}, "doc_aggs": {}}]
|
||||||
|
|
||||||
r = self.retrieval[-1]
|
r = self.retrieval[-1]
|
||||||
for ck in chunks_format({"chunks": chunks}):
|
for ck in chunks_format({"chunks": chunks}):
|
||||||
cid = hash_str2int(ck["id"], 100)
|
cid = hash_str2int(ck["id"], 500)
|
||||||
|
# cid = uuid.uuid5(uuid.NAMESPACE_DNS, ck["id"])
|
||||||
if cid not in r:
|
if cid not in r:
|
||||||
r["chunks"][cid] = ck
|
r["chunks"][cid] = ck
|
||||||
|
|
||||||
@ -535,4 +800,3 @@ class Canvas:
|
|||||||
|
|
||||||
def get_component_thoughts(self, cpn_id) -> str:
|
def get_component_thoughts(self, cpn_id) -> str:
|
||||||
return self.components.get(cpn_id)["obj"].thoughts()
|
return self.components.get(cpn_id)["obj"].thoughts()
|
||||||
|
|
||||||
|
|||||||
@ -13,7 +13,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import importlib
|
import importlib
|
||||||
import inspect
|
import inspect
|
||||||
@ -50,8 +49,10 @@ del _package_path, _import_submodules, _extract_classes_from_module
|
|||||||
|
|
||||||
|
|
||||||
def component_class(class_name):
|
def component_class(class_name):
|
||||||
m = importlib.import_module("agent.component")
|
for module_name in ["agent.component", "agent.tools", "rag.flow"]:
|
||||||
try:
|
try:
|
||||||
return getattr(m, class_name)
|
return getattr(importlib.import_module(module_name), class_name)
|
||||||
except Exception:
|
except Exception:
|
||||||
return getattr(importlib.import_module("agent.tools"), class_name)
|
# logging.warning(f"Can't import module: {module_name}, error: {e}")
|
||||||
|
pass
|
||||||
|
assert False, f"Can't import {class_name}"
|
||||||
|
|||||||
@ -13,24 +13,25 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
import json_repair
|
import json_repair
|
||||||
|
from timeit import default_timer as timer
|
||||||
from agent.tools.base import LLMToolPluginCallSession, ToolParamBase, ToolBase, ToolMeta
|
from agent.tools.base import LLMToolPluginCallSession, ToolParamBase, ToolBase, ToolMeta
|
||||||
from api.db.services.llm_service import LLMBundle, TenantLLMService
|
from api.db.services.llm_service import LLMBundle
|
||||||
|
from api.db.services.tenant_llm_service import TenantLLMService
|
||||||
from api.db.services.mcp_server_service import MCPServerService
|
from api.db.services.mcp_server_service import MCPServerService
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
from rag.prompts import message_fit_in
|
from rag.prompts.generator import next_step_async, COMPLETE_TASK, analyze_task_async, \
|
||||||
from rag.prompts.prompts import next_step, COMPLETE_TASK, analyze_task, \
|
citation_prompt, reflect_async, kb_prompt, citation_plus, full_question, message_fit_in, structured_output_prompt
|
||||||
citation_prompt, reflect, rank_memories, kb_prompt, citation_plus, full_question
|
from common.mcp_tool_call_conn import MCPToolCallSession, mcp_tool_metadata_to_openai_tool
|
||||||
from rag.utils.mcp_tool_call_conn import MCPToolCallSession, mcp_tool_metadata_to_openai_tool
|
|
||||||
from agent.component.llm import LLMParam, LLM
|
from agent.component.llm import LLMParam, LLM
|
||||||
|
|
||||||
|
|
||||||
@ -137,8 +138,37 @@ class Agent(LLM, ToolBase):
|
|||||||
res.update(cpn.get_input_form())
|
res.update(cpn.get_input_form())
|
||||||
return res
|
return res
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 20*60))
|
def _get_output_schema(self):
|
||||||
|
try:
|
||||||
|
cand = self._param.outputs.get("structured")
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if isinstance(cand, dict):
|
||||||
|
if isinstance(cand.get("properties"), dict) and len(cand["properties"]) > 0:
|
||||||
|
return cand
|
||||||
|
for k in ("schema", "structured"):
|
||||||
|
if isinstance(cand.get(k), dict) and isinstance(cand[k].get("properties"), dict) and len(cand[k]["properties"]) > 0:
|
||||||
|
return cand[k]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def _force_format_to_schema_async(self, text: str, schema_prompt: str) -> str:
|
||||||
|
fmt_msgs = [
|
||||||
|
{"role": "system", "content": schema_prompt + "\nIMPORTANT: Output ONLY valid JSON. No markdown, no extra text."},
|
||||||
|
{"role": "user", "content": text},
|
||||||
|
]
|
||||||
|
_, fmt_msgs = message_fit_in(fmt_msgs, int(self.chat_mdl.max_length * 0.97))
|
||||||
|
return await self._generate_async(fmt_msgs)
|
||||||
|
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
return asyncio.run(self._invoke_async(**kwargs))
|
||||||
|
|
||||||
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 20*60)))
|
||||||
|
async def _invoke_async(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Agent processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if kwargs.get("user_prompt"):
|
if kwargs.get("user_prompt"):
|
||||||
usr_pmt = ""
|
usr_pmt = ""
|
||||||
if kwargs.get("reasoning"):
|
if kwargs.get("reasoning"):
|
||||||
@ -152,20 +182,29 @@ class Agent(LLM, ToolBase):
|
|||||||
self._param.prompts = [{"role": "user", "content": usr_pmt}]
|
self._param.prompts = [{"role": "user", "content": usr_pmt}]
|
||||||
|
|
||||||
if not self.tools:
|
if not self.tools:
|
||||||
return LLM._invoke(self, **kwargs)
|
if self.check_if_canceled("Agent processing"):
|
||||||
|
return
|
||||||
|
return await LLM._invoke_async(self, **kwargs)
|
||||||
|
|
||||||
prompt, msg = self._prepare_prompt_variables()
|
prompt, msg, user_defined_prompt = self._prepare_prompt_variables()
|
||||||
|
output_schema = self._get_output_schema()
|
||||||
|
schema_prompt = ""
|
||||||
|
if output_schema:
|
||||||
|
schema = json.dumps(output_schema, ensure_ascii=False, indent=2)
|
||||||
|
schema_prompt = structured_output_prompt(schema)
|
||||||
|
|
||||||
downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
|
downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
|
||||||
ex = self.exception_handler()
|
ex = self.exception_handler()
|
||||||
if any([self._canvas.get_component_obj(cid).component_name.lower()=="message" for cid in downstreams]) and not self._param.output_structure and not (ex and ex["goto"]):
|
if any([self._canvas.get_component_obj(cid).component_name.lower()=="message" for cid in downstreams]) and not (ex and ex["goto"]) and not output_schema:
|
||||||
self.set_output("content", partial(self.stream_output_with_tools, prompt, msg))
|
self.set_output("content", partial(self.stream_output_with_tools_async, prompt, deepcopy(msg), user_defined_prompt))
|
||||||
return
|
return
|
||||||
|
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||||
use_tools = []
|
use_tools = []
|
||||||
ans = ""
|
ans = ""
|
||||||
for delta_ans, tk in self._react_with_tools_streamly(msg, use_tools):
|
async for delta_ans, _tk in self._react_with_tools_streamly_async(prompt, msg, use_tools, user_defined_prompt,schema_prompt=schema_prompt):
|
||||||
|
if self.check_if_canceled("Agent processing"):
|
||||||
|
return
|
||||||
ans += delta_ans
|
ans += delta_ans
|
||||||
|
|
||||||
if ans.find("**ERROR**") >= 0:
|
if ans.find("**ERROR**") >= 0:
|
||||||
@ -176,22 +215,48 @@ class Agent(LLM, ToolBase):
|
|||||||
self.set_output("_ERROR", ans)
|
self.set_output("_ERROR", ans)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if output_schema:
|
||||||
|
error = ""
|
||||||
|
for _ in range(self._param.max_retries + 1):
|
||||||
|
try:
|
||||||
|
def clean_formated_answer(ans: str) -> str:
|
||||||
|
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||||
|
ans = re.sub(r"^.*```json", "", ans, flags=re.DOTALL)
|
||||||
|
return re.sub(r"```\n*$", "", ans, flags=re.DOTALL)
|
||||||
|
obj = json_repair.loads(clean_formated_answer(ans))
|
||||||
|
self.set_output("structured", obj)
|
||||||
|
if use_tools:
|
||||||
|
self.set_output("use_tools", use_tools)
|
||||||
|
return obj
|
||||||
|
except Exception:
|
||||||
|
error = "The answer cannot be parsed as JSON"
|
||||||
|
ans = await self._force_format_to_schema_async(ans, schema_prompt)
|
||||||
|
if ans.find("**ERROR**") >= 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
self.set_output("_ERROR", error)
|
||||||
|
return
|
||||||
|
|
||||||
self.set_output("content", ans)
|
self.set_output("content", ans)
|
||||||
if use_tools:
|
if use_tools:
|
||||||
self.set_output("use_tools", use_tools)
|
self.set_output("use_tools", use_tools)
|
||||||
return ans
|
return ans
|
||||||
|
|
||||||
def stream_output_with_tools(self, prompt, msg):
|
async def stream_output_with_tools_async(self, prompt, msg, user_defined_prompt={}):
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||||
answer_without_toolcall = ""
|
answer_without_toolcall = ""
|
||||||
use_tools = []
|
use_tools = []
|
||||||
for delta_ans,_ in self._react_with_tools_streamly(msg, use_tools):
|
async for delta_ans, _ in self._react_with_tools_streamly_async(prompt, msg, use_tools, user_defined_prompt):
|
||||||
|
if self.check_if_canceled("Agent streaming"):
|
||||||
|
return
|
||||||
|
|
||||||
if delta_ans.find("**ERROR**") >= 0:
|
if delta_ans.find("**ERROR**") >= 0:
|
||||||
if self.get_exception_default_value():
|
if self.get_exception_default_value():
|
||||||
self.set_output("content", self.get_exception_default_value())
|
self.set_output("content", self.get_exception_default_value())
|
||||||
yield self.get_exception_default_value()
|
yield self.get_exception_default_value()
|
||||||
else:
|
else:
|
||||||
self.set_output("_ERROR", delta_ans)
|
self.set_output("_ERROR", delta_ans)
|
||||||
|
return
|
||||||
answer_without_toolcall += delta_ans
|
answer_without_toolcall += delta_ans
|
||||||
yield delta_ans
|
yield delta_ans
|
||||||
|
|
||||||
@ -199,54 +264,43 @@ class Agent(LLM, ToolBase):
|
|||||||
if use_tools:
|
if use_tools:
|
||||||
self.set_output("use_tools", use_tools)
|
self.set_output("use_tools", use_tools)
|
||||||
|
|
||||||
def _gen_citations(self, text):
|
async def _react_with_tools_streamly_async(self, prompt, history: list[dict], use_tools, user_defined_prompt={}, schema_prompt: str = ""):
|
||||||
retrievals = self._canvas.get_reference()
|
|
||||||
retrievals = {"chunks": list(retrievals["chunks"].values()), "doc_aggs": list(retrievals["doc_aggs"].values())}
|
|
||||||
formated_refer = kb_prompt(retrievals, self.chat_mdl.max_length, True)
|
|
||||||
for delta_ans in self._generate_streamly([{"role": "system", "content": citation_plus("\n\n".join(formated_refer))},
|
|
||||||
{"role": "user", "content": text}
|
|
||||||
]):
|
|
||||||
yield delta_ans
|
|
||||||
|
|
||||||
def _react_with_tools_streamly(self, history: list[dict], use_tools):
|
|
||||||
token_count = 0
|
token_count = 0
|
||||||
tool_metas = self.tool_meta
|
tool_metas = self.tool_meta
|
||||||
hist = deepcopy(history)
|
hist = deepcopy(history)
|
||||||
last_calling = ""
|
last_calling = ""
|
||||||
if len(hist) > 3:
|
if len(hist) > 3:
|
||||||
user_request = full_question(messages=history, chat_mdl=self.chat_mdl)
|
st = timer()
|
||||||
self.callback("Multi-turn conversation optimization", {}, user_request)
|
user_request = await asyncio.to_thread(full_question, messages=history, chat_mdl=self.chat_mdl)
|
||||||
|
self.callback("Multi-turn conversation optimization", {}, user_request, elapsed_time=timer()-st)
|
||||||
else:
|
else:
|
||||||
user_request = history[-1]["content"]
|
user_request = history[-1]["content"]
|
||||||
|
|
||||||
def use_tool(name, args):
|
async def use_tool_async(name, args):
|
||||||
nonlocal hist, use_tools, token_count,last_calling,user_request
|
nonlocal hist, use_tools, last_calling
|
||||||
print(f"{last_calling=} == {name=}", )
|
logging.info(f"{last_calling=} == {name=}")
|
||||||
# Summarize of function calling
|
|
||||||
#if all([
|
|
||||||
# isinstance(self.toolcall_session.get_tool_obj(name), Agent),
|
|
||||||
# last_calling,
|
|
||||||
# last_calling != name
|
|
||||||
#]):
|
|
||||||
# self.toolcall_session.get_tool_obj(name).add2system_prompt(f"The chat history with other agents are as following: \n" + self.get_useful_memory(user_request, str(args["user_prompt"])))
|
|
||||||
last_calling = name
|
last_calling = name
|
||||||
tool_response = self.toolcall_session.tool_call(name, args)
|
tool_response = await self.toolcall_session.tool_call_async(name, args)
|
||||||
use_tools.append({
|
use_tools.append({
|
||||||
"name": name,
|
"name": name,
|
||||||
"arguments": args,
|
"arguments": args,
|
||||||
"results": tool_response
|
"results": tool_response
|
||||||
})
|
})
|
||||||
# self.callback("add_memory", {}, "...")
|
# self.callback("add_memory", {}, "...")
|
||||||
#self.add_memory(hist[-2]["content"], hist[-1]["content"], name, args, str(tool_response))
|
#self.add_memory(hist[-2]["content"], hist[-1]["content"], name, args, str(tool_response), user_defined_prompt)
|
||||||
|
|
||||||
return name, tool_response
|
return name, tool_response
|
||||||
|
|
||||||
def complete():
|
async def complete():
|
||||||
nonlocal hist
|
nonlocal hist
|
||||||
need2cite = self._canvas.get_reference()["chunks"] and self._id.find("-->") < 0
|
need2cite = self._param.cite and self._canvas.get_reference()["chunks"] and self._id.find("-->") < 0
|
||||||
|
if schema_prompt:
|
||||||
|
need2cite = False
|
||||||
cited = False
|
cited = False
|
||||||
if hist[0]["role"] == "system" and need2cite:
|
if hist and hist[0]["role"] == "system":
|
||||||
if len(hist) < 7:
|
if schema_prompt:
|
||||||
|
hist[0]["content"] += "\n" + schema_prompt
|
||||||
|
if need2cite and len(hist) < 7:
|
||||||
hist[0]["content"] += citation_prompt()
|
hist[0]["content"] += citation_prompt()
|
||||||
cited = True
|
cited = True
|
||||||
yield "", token_count
|
yield "", token_count
|
||||||
@ -255,19 +309,22 @@ class Agent(LLM, ToolBase):
|
|||||||
if len(hist) > 12:
|
if len(hist) > 12:
|
||||||
_hist = [hist[0], hist[1], *hist[-10:]]
|
_hist = [hist[0], hist[1], *hist[-10:]]
|
||||||
entire_txt = ""
|
entire_txt = ""
|
||||||
for delta_ans in self._generate_streamly(_hist):
|
async for delta_ans in self._generate_streamly_async(_hist):
|
||||||
if not need2cite or cited:
|
if not need2cite or cited:
|
||||||
yield delta_ans, 0
|
yield delta_ans, 0
|
||||||
entire_txt += delta_ans
|
entire_txt += delta_ans
|
||||||
if not need2cite or cited:
|
if not need2cite or cited:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
st = timer()
|
||||||
txt = ""
|
txt = ""
|
||||||
for delta_ans in self._gen_citations(entire_txt):
|
async for delta_ans in self._gen_citations_async(entire_txt):
|
||||||
|
if self.check_if_canceled("Agent streaming"):
|
||||||
|
return
|
||||||
yield delta_ans, 0
|
yield delta_ans, 0
|
||||||
txt += delta_ans
|
txt += delta_ans
|
||||||
|
|
||||||
self.callback("gen_citations", {}, txt)
|
self.callback("gen_citations", {}, txt, elapsed_time=timer()-st)
|
||||||
|
|
||||||
def append_user_content(hist, content):
|
def append_user_content(hist, content):
|
||||||
if hist[-1]["role"] == "user":
|
if hist[-1]["role"] == "user":
|
||||||
@ -275,12 +332,15 @@ class Agent(LLM, ToolBase):
|
|||||||
else:
|
else:
|
||||||
hist.append({"role": "user", "content": content})
|
hist.append({"role": "user", "content": content})
|
||||||
|
|
||||||
task_desc = analyze_task(self.chat_mdl, user_request, tool_metas)
|
st = timer()
|
||||||
self.callback("analyze_task", {}, task_desc)
|
task_desc = await analyze_task_async(self.chat_mdl, prompt, user_request, tool_metas, user_defined_prompt)
|
||||||
|
self.callback("analyze_task", {}, task_desc, elapsed_time=timer()-st)
|
||||||
for _ in range(self._param.max_rounds + 1):
|
for _ in range(self._param.max_rounds + 1):
|
||||||
response, tk = next_step(self.chat_mdl, hist, tool_metas, task_desc)
|
if self.check_if_canceled("Agent streaming"):
|
||||||
|
return
|
||||||
|
response, tk = await next_step_async(self.chat_mdl, hist, tool_metas, task_desc, user_defined_prompt)
|
||||||
# self.callback("next_step", {}, str(response)[:256]+"...")
|
# self.callback("next_step", {}, str(response)[:256]+"...")
|
||||||
token_count += tk
|
token_count += tk or 0
|
||||||
hist.append({"role": "assistant", "content": response})
|
hist.append({"role": "assistant", "content": response})
|
||||||
try:
|
try:
|
||||||
functions = json_repair.loads(re.sub(r"```.*", "", response))
|
functions = json_repair.loads(re.sub(r"```.*", "", response))
|
||||||
@ -289,22 +349,24 @@ class Agent(LLM, ToolBase):
|
|||||||
for f in functions:
|
for f in functions:
|
||||||
if not isinstance(f, dict):
|
if not isinstance(f, dict):
|
||||||
raise TypeError(f"An object type should be returned, but `{f}`")
|
raise TypeError(f"An object type should be returned, but `{f}`")
|
||||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
|
||||||
thr = []
|
|
||||||
for func in functions:
|
|
||||||
name = func["name"]
|
|
||||||
args = func["arguments"]
|
|
||||||
if name == COMPLETE_TASK:
|
|
||||||
append_user_content(hist, f"Respond with a formal answer. FORGET(DO NOT mention) about `{COMPLETE_TASK}`. The language for the response MUST be as the same as the first user request.\n")
|
|
||||||
for txt, tkcnt in complete():
|
|
||||||
yield txt, tkcnt
|
|
||||||
return
|
|
||||||
|
|
||||||
thr.append(executor.submit(use_tool, name, args))
|
tool_tasks = []
|
||||||
|
for func in functions:
|
||||||
|
name = func["name"]
|
||||||
|
args = func["arguments"]
|
||||||
|
if name == COMPLETE_TASK:
|
||||||
|
append_user_content(hist, f"Respond with a formal answer. FORGET(DO NOT mention) about `{COMPLETE_TASK}`. The language for the response MUST be as the same as the first user request.\n")
|
||||||
|
async for txt, tkcnt in complete():
|
||||||
|
yield txt, tkcnt
|
||||||
|
return
|
||||||
|
|
||||||
reflection = reflect(self.chat_mdl, hist, [th.result() for th in thr])
|
tool_tasks.append(asyncio.create_task(use_tool_async(name, args)))
|
||||||
append_user_content(hist, reflection)
|
|
||||||
self.callback("reflection", {}, str(reflection))
|
results = await asyncio.gather(*tool_tasks) if tool_tasks else []
|
||||||
|
st = timer()
|
||||||
|
reflection = await reflect_async(self.chat_mdl, hist, results, user_defined_prompt)
|
||||||
|
append_user_content(hist, reflection)
|
||||||
|
self.callback("reflection", {}, str(reflection), elapsed_time=timer()-st)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(msg=f"Wrong JSON argument format in LLM ReAct response: {e}")
|
logging.exception(msg=f"Wrong JSON argument format in LLM ReAct response: {e}")
|
||||||
@ -324,21 +386,34 @@ Instructions:
|
|||||||
6. Focus on delivering VALUE with the information already gathered
|
6. Focus on delivering VALUE with the information already gathered
|
||||||
Respond immediately with your final comprehensive answer.
|
Respond immediately with your final comprehensive answer.
|
||||||
"""
|
"""
|
||||||
|
if self.check_if_canceled("Agent final instruction"):
|
||||||
|
return
|
||||||
append_user_content(hist, final_instruction)
|
append_user_content(hist, final_instruction)
|
||||||
|
|
||||||
for txt, tkcnt in complete():
|
async for txt, tkcnt in complete():
|
||||||
yield txt, tkcnt
|
yield txt, tkcnt
|
||||||
|
|
||||||
def get_useful_memory(self, goal: str, sub_goal:str, topn=3) -> str:
|
async def _gen_citations_async(self, text):
|
||||||
# self.callback("get_useful_memory", {"topn": 3}, "...")
|
retrievals = self._canvas.get_reference()
|
||||||
mems = self._canvas.get_memory()
|
retrievals = {"chunks": list(retrievals["chunks"].values()), "doc_aggs": list(retrievals["doc_aggs"].values())}
|
||||||
rank = rank_memories(self.chat_mdl, goal, sub_goal, [summ for (user, assist, summ) in mems])
|
formated_refer = kb_prompt(retrievals, self.chat_mdl.max_length, True)
|
||||||
try:
|
async for delta_ans in self._generate_streamly_async([{"role": "system", "content": citation_plus("\n\n".join(formated_refer))},
|
||||||
rank = json_repair.loads(re.sub(r"```.*", "", rank))[:topn]
|
{"role": "user", "content": text}
|
||||||
mems = [mems[r] for r in rank]
|
]):
|
||||||
return "\n\n".join([f"User: {u}\nAgent: {a}" for u, a,_ in mems])
|
yield delta_ans
|
||||||
except Exception as e:
|
|
||||||
logging.exception(e)
|
|
||||||
|
|
||||||
return "Error occurred."
|
def reset(self, only_output=False):
|
||||||
|
"""
|
||||||
|
Reset all tools if they have a reset method. This avoids errors for tools like MCPToolCallSession.
|
||||||
|
"""
|
||||||
|
for k in self._param.outputs.keys():
|
||||||
|
self._param.outputs[k]["value"] = None
|
||||||
|
|
||||||
|
for k, cpn in self.tools.items():
|
||||||
|
if hasattr(cpn, "reset") and callable(cpn.reset):
|
||||||
|
cpn.reset()
|
||||||
|
if only_output:
|
||||||
|
return
|
||||||
|
for k in self._param.inputs.keys():
|
||||||
|
self._param.inputs[k]["value"] = None
|
||||||
|
self._param.debug_inputs = {}
|
||||||
|
|||||||
@ -14,9 +14,10 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
import asyncio
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC
|
||||||
import builtins
|
import builtins
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
@ -25,7 +26,7 @@ from typing import Any, List, Union
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import trio
|
import trio
|
||||||
from agent import settings
|
from agent import settings
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
||||||
@ -36,7 +37,7 @@ _IS_RAW_CONF = "_is_raw_conf"
|
|||||||
|
|
||||||
class ComponentParamBase(ABC):
|
class ComponentParamBase(ABC):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.message_history_window_size = 22
|
self.message_history_window_size = 13
|
||||||
self.inputs = {}
|
self.inputs = {}
|
||||||
self.outputs = {}
|
self.outputs = {}
|
||||||
self.description = ""
|
self.description = ""
|
||||||
@ -244,7 +245,7 @@ class ComponentParamBase(ABC):
|
|||||||
|
|
||||||
if not value_legal:
|
if not value_legal:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Plase check runtime conf, {} = {} does not match user-parameter restriction".format(
|
"Please check runtime conf, {} = {} does not match user-parameter restriction".format(
|
||||||
variable, value
|
variable, value
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@ -393,7 +394,7 @@ class ComponentParamBase(ABC):
|
|||||||
class ComponentBase(ABC):
|
class ComponentBase(ABC):
|
||||||
component_name: str
|
component_name: str
|
||||||
thread_limiter = trio.CapacityLimiter(int(os.environ.get('MAX_CONCURRENT_CHATS', 10)))
|
thread_limiter = trio.CapacityLimiter(int(os.environ.get('MAX_CONCURRENT_CHATS', 10)))
|
||||||
variable_ref_patt = r"\{* *\{([a-zA-Z:0-9]+@[A-Za-z:0-9_.-]+|sys\.[a-z_]+)\} *\}*"
|
variable_ref_patt = r"\{* *\{([a-zA-Z:0-9]+@[A-Za-z0-9_.]+|sys\.[A-Za-z0-9_.]+|env\.[A-Za-z0-9_.]+)\} *\}*"
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
"""
|
"""
|
||||||
@ -410,13 +411,27 @@ class ComponentBase(ABC):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||||
from agent.canvas import Canvas # Local import to avoid cyclic dependency
|
from agent.canvas import Graph # Local import to avoid cyclic dependency
|
||||||
assert isinstance(canvas, Canvas), "canvas must be an instance of Canvas"
|
assert isinstance(canvas, Graph), "canvas must be an instance of Canvas"
|
||||||
self._canvas = canvas
|
self._canvas = canvas
|
||||||
self._id = id
|
self._id = id
|
||||||
self._param = param
|
self._param = param
|
||||||
self._param.check()
|
self._param.check()
|
||||||
|
|
||||||
|
def is_canceled(self) -> bool:
|
||||||
|
return self._canvas.is_canceled()
|
||||||
|
|
||||||
|
def check_if_canceled(self, message: str = "") -> bool:
|
||||||
|
if self.is_canceled():
|
||||||
|
task_id = getattr(self._canvas, 'task_id', 'unknown')
|
||||||
|
log_message = f"Task {task_id} has been canceled"
|
||||||
|
if message:
|
||||||
|
log_message += f" during {message}"
|
||||||
|
logging.info(log_message)
|
||||||
|
self.set_output("_ERROR", "Task has been canceled")
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def invoke(self, **kwargs) -> dict[str, Any]:
|
def invoke(self, **kwargs) -> dict[str, Any]:
|
||||||
self.set_output("_created_time", time.perf_counter())
|
self.set_output("_created_time", time.perf_counter())
|
||||||
try:
|
try:
|
||||||
@ -431,7 +446,35 @@ class ComponentBase(ABC):
|
|||||||
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
|
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
|
||||||
return self.output()
|
return self.output()
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))
|
async def invoke_async(self, **kwargs) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Async wrapper for component invocation.
|
||||||
|
Prefers coroutine `_invoke_async` if present; otherwise falls back to `_invoke`.
|
||||||
|
Handles timing and error recording consistently with `invoke`.
|
||||||
|
"""
|
||||||
|
self.set_output("_created_time", time.perf_counter())
|
||||||
|
try:
|
||||||
|
if self.check_if_canceled("Component processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
fn_async = getattr(self, "_invoke_async", None)
|
||||||
|
if fn_async and asyncio.iscoroutinefunction(fn_async):
|
||||||
|
await fn_async(**kwargs)
|
||||||
|
elif asyncio.iscoroutinefunction(self._invoke):
|
||||||
|
await self._invoke(**kwargs)
|
||||||
|
else:
|
||||||
|
await asyncio.to_thread(self._invoke, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
if self.get_exception_default_value():
|
||||||
|
self.set_exception_default_value()
|
||||||
|
else:
|
||||||
|
self.set_output("_ERROR", str(e))
|
||||||
|
logging.exception(e)
|
||||||
|
self._param.debug_inputs = {}
|
||||||
|
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
|
||||||
|
return self.output()
|
||||||
|
|
||||||
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@ -448,11 +491,16 @@ class ComponentBase(ABC):
|
|||||||
def error(self):
|
def error(self):
|
||||||
return self._param.outputs.get("_ERROR", {}).get("value")
|
return self._param.outputs.get("_ERROR", {}).get("value")
|
||||||
|
|
||||||
def reset(self):
|
def reset(self, only_output=False):
|
||||||
for k in self._param.outputs.keys():
|
outputs: dict = self._param.outputs # for better performance
|
||||||
self._param.outputs[k]["value"] = None
|
for k in outputs.keys():
|
||||||
for k in self._param.inputs.keys():
|
outputs[k]["value"] = None
|
||||||
self._param.inputs[k]["value"] = None
|
if only_output:
|
||||||
|
return
|
||||||
|
|
||||||
|
inputs: dict = self._param.inputs # for better performance
|
||||||
|
for k in inputs.keys():
|
||||||
|
inputs[k]["value"] = None
|
||||||
self._param.debug_inputs = {}
|
self._param.debug_inputs = {}
|
||||||
|
|
||||||
def get_input(self, key: str=None) -> Union[Any, dict[str, Any]]:
|
def get_input(self, key: str=None) -> Union[Any, dict[str, Any]]:
|
||||||
@ -479,7 +527,7 @@ class ComponentBase(ABC):
|
|||||||
|
|
||||||
def get_input_elements_from_text(self, txt: str) -> dict[str, dict[str, str]]:
|
def get_input_elements_from_text(self, txt: str) -> dict[str, dict[str, str]]:
|
||||||
res = {}
|
res = {}
|
||||||
for r in re.finditer(self.variable_ref_patt, txt, flags=re.IGNORECASE):
|
for r in re.finditer(self.variable_ref_patt, txt, flags=re.IGNORECASE|re.DOTALL):
|
||||||
exp = r.group(1)
|
exp = r.group(1)
|
||||||
cpn_id, var_nm = exp.split("@") if exp.find("@")>0 else ("", exp)
|
cpn_id, var_nm = exp.split("@") if exp.find("@")>0 else ("", exp)
|
||||||
res[exp] = {
|
res[exp] = {
|
||||||
@ -512,6 +560,7 @@ class ComponentBase(ABC):
|
|||||||
def get_param(self, name):
|
def get_param(self, name):
|
||||||
if hasattr(self._param, name):
|
if hasattr(self._param, name):
|
||||||
return getattr(self._param, name)
|
return getattr(self._param, name)
|
||||||
|
return None
|
||||||
|
|
||||||
def debug(self, **kwargs):
|
def debug(self, **kwargs):
|
||||||
return self._invoke(**kwargs)
|
return self._invoke(**kwargs)
|
||||||
@ -519,24 +568,32 @@ class ComponentBase(ABC):
|
|||||||
def get_parent(self) -> Union[object, None]:
|
def get_parent(self) -> Union[object, None]:
|
||||||
pid = self._canvas.get_component(self._id).get("parent_id")
|
pid = self._canvas.get_component(self._id).get("parent_id")
|
||||||
if not pid:
|
if not pid:
|
||||||
return
|
return None
|
||||||
return self._canvas.get_component(pid)["obj"]
|
return self._canvas.get_component(pid)["obj"]
|
||||||
|
|
||||||
def get_upstream(self) -> List[str]:
|
def get_upstream(self) -> List[str]:
|
||||||
cpn_nms = self._canvas.get_component(self._id)['upstream']
|
cpn_nms = self._canvas.get_component(self._id)['upstream']
|
||||||
return cpn_nms
|
return cpn_nms
|
||||||
|
|
||||||
|
def get_downstream(self) -> List[str]:
|
||||||
|
cpn_nms = self._canvas.get_component(self._id)['downstream']
|
||||||
|
return cpn_nms
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def string_format(content: str, kv: dict[str, str]) -> str:
|
def string_format(content: str, kv: dict[str, str]) -> str:
|
||||||
for n, v in kv.items():
|
for n, v in kv.items():
|
||||||
|
def repl(_match, val=v):
|
||||||
|
return str(val) if val is not None else ""
|
||||||
content = re.sub(
|
content = re.sub(
|
||||||
r"\{%s\}" % re.escape(n), v, content
|
r"\{%s\}" % re.escape(n),
|
||||||
|
repl,
|
||||||
|
content
|
||||||
)
|
)
|
||||||
return content
|
return content
|
||||||
|
|
||||||
def exception_handler(self):
|
def exception_handler(self):
|
||||||
if not self._param.exception_method:
|
if not self._param.exception_method:
|
||||||
return
|
return None
|
||||||
return {
|
return {
|
||||||
"goto": self._param.exception_goto,
|
"goto": self._param.exception_goto,
|
||||||
"default_value": self._param.exception_default_value
|
"default_value": self._param.exception_default_value
|
||||||
@ -550,6 +607,5 @@ class ComponentBase(ABC):
|
|||||||
def set_exception_default_value(self):
|
def set_exception_default_value(self):
|
||||||
self.set_output("result", self.get_exception_default_value())
|
self.set_output("result", self.get_exception_default_value())
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
...
|
raise NotImplementedError()
|
||||||
|
|||||||
@ -14,6 +14,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from agent.component.fillup import UserFillUpParam, UserFillUp
|
from agent.component.fillup import UserFillUpParam, UserFillUp
|
||||||
|
from api.db.services.file_service import FileService
|
||||||
|
|
||||||
|
|
||||||
class BeginParam(UserFillUpParam):
|
class BeginParam(UserFillUpParam):
|
||||||
@ -37,9 +38,18 @@ class Begin(UserFillUp):
|
|||||||
component_name = "Begin"
|
component_name = "Begin"
|
||||||
|
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Begin processing"):
|
||||||
|
return
|
||||||
|
|
||||||
for k, v in kwargs.get("inputs", {}).items():
|
for k, v in kwargs.get("inputs", {}).items():
|
||||||
|
if self.check_if_canceled("Begin processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if isinstance(v, dict) and v.get("type", "").lower().find("file") >=0:
|
if isinstance(v, dict) and v.get("type", "").lower().find("file") >=0:
|
||||||
v = self._canvas.get_files([v["value"]])
|
if v.get("optional") and v.get("value", None) is None:
|
||||||
|
v = None
|
||||||
|
else:
|
||||||
|
v = FileService.get_files([v["value"]])
|
||||||
else:
|
else:
|
||||||
v = v.get("value")
|
v = v.get("value")
|
||||||
self.set_output(k, v)
|
self.set_output(k, v)
|
||||||
|
|||||||
@ -18,17 +18,17 @@ import os
|
|||||||
import re
|
import re
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
|
|
||||||
from api.db import LLMType
|
from common.constants import LLMType
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from agent.component.llm import LLMParam, LLM
|
from agent.component.llm import LLMParam, LLM
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
from rag.llm.chat_model import ERROR_PREFIX
|
from rag.llm.chat_model import ERROR_PREFIX
|
||||||
|
|
||||||
|
|
||||||
class CategorizeParam(LLMParam):
|
class CategorizeParam(LLMParam):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Define the Categorize component parameters.
|
Define the categorize component parameters.
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@ -80,7 +80,7 @@ Here's description of each category:
|
|||||||
- Prioritize the most specific applicable category
|
- Prioritize the most specific applicable category
|
||||||
- Return only the category name without explanations
|
- Return only the category name without explanations
|
||||||
- Use "Other" only when no other category fits
|
- Use "Other" only when no other category fits
|
||||||
|
|
||||||
""".format(
|
""".format(
|
||||||
"\n - ".join(list(self.category_description.keys())),
|
"\n - ".join(list(self.category_description.keys())),
|
||||||
"\n".join(descriptions)
|
"\n".join(descriptions)
|
||||||
@ -96,8 +96,11 @@ Here's description of each category:
|
|||||||
class Categorize(LLM, ABC):
|
class Categorize(LLM, ABC):
|
||||||
component_name = "Categorize"
|
component_name = "Categorize"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Categorize processing"):
|
||||||
|
return
|
||||||
|
|
||||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||||
if not msg:
|
if not msg:
|
||||||
msg = [{"role": "user", "content": ""}]
|
msg = [{"role": "user", "content": ""}]
|
||||||
@ -112,12 +115,20 @@ class Categorize(LLM, ABC):
|
|||||||
|
|
||||||
user_prompt = """
|
user_prompt = """
|
||||||
---- Real Data ----
|
---- Real Data ----
|
||||||
{} →
|
{} →
|
||||||
""".format(" | ".join(["{}: \"{}\"".format(c["role"].upper(), re.sub(r"\n", "", c["content"], flags=re.DOTALL)) for c in msg]))
|
""".format(" | ".join(["{}: \"{}\"".format(c["role"].upper(), re.sub(r"\n", "", c["content"], flags=re.DOTALL)) for c in msg]))
|
||||||
|
|
||||||
|
if self.check_if_canceled("Categorize processing"):
|
||||||
|
return
|
||||||
|
|
||||||
ans = chat_mdl.chat(self._param.sys_prompt, [{"role": "user", "content": user_prompt}], self._param.gen_conf())
|
ans = chat_mdl.chat(self._param.sys_prompt, [{"role": "user", "content": user_prompt}], self._param.gen_conf())
|
||||||
logging.info(f"input: {user_prompt}, answer: {str(ans)}")
|
logging.info(f"input: {user_prompt}, answer: {str(ans)}")
|
||||||
if ERROR_PREFIX in ans:
|
if ERROR_PREFIX in ans:
|
||||||
raise Exception(ans)
|
raise Exception(ans)
|
||||||
|
|
||||||
|
if self.check_if_canceled("Categorize processing"):
|
||||||
|
return
|
||||||
|
|
||||||
# Count the number of times each category appears in the answer.
|
# Count the number of times each category appears in the answer.
|
||||||
category_counts = {}
|
category_counts = {}
|
||||||
for c in self._param.category_description.keys():
|
for c in self._param.category_description.keys():
|
||||||
@ -134,4 +145,4 @@ class Categorize(LLM, ABC):
|
|||||||
self.set_output("_next", cpn_ids)
|
self.set_output("_next", cpn_ids)
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Which should it falls into {}? ...".format(",".join([f"`{c}`" for c, _ in self._param.category_description.items()]))
|
return "Which should it falls into {}? ...".format(",".join([f"`{c}`" for c, _ in self._param.category_description.items()]))
|
||||||
|
|||||||
218
agent/component/data_operations.py
Normal file
218
agent/component/data_operations.py
Normal file
@ -0,0 +1,218 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import ast
|
||||||
|
import os
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from api.utils.api_utils import timeout
|
||||||
|
|
||||||
|
class DataOperationsParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Data Operations component parameters.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.query = []
|
||||||
|
self.operations = "literal_eval"
|
||||||
|
self.select_keys = []
|
||||||
|
self.filter_values=[]
|
||||||
|
self.updates=[]
|
||||||
|
self.remove_keys=[]
|
||||||
|
self.rename_keys=[]
|
||||||
|
self.outputs = {
|
||||||
|
"result": {
|
||||||
|
"value": [],
|
||||||
|
"type": "Array of Object"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_valid_value(self.operations, "Support operations", ["select_keys", "literal_eval","combine","filter_values","append_or_update","remove_keys","rename_keys"])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class DataOperations(ComponentBase,ABC):
|
||||||
|
component_name = "DataOperations"
|
||||||
|
|
||||||
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
|
return {
|
||||||
|
k: {"name": o.get("name", ""), "type": "line"}
|
||||||
|
for input_item in (self._param.query or [])
|
||||||
|
for k, o in self.get_input_elements_from_text(input_item).items()
|
||||||
|
}
|
||||||
|
|
||||||
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
self.input_objects=[]
|
||||||
|
inputs = getattr(self._param, "query", None)
|
||||||
|
if not isinstance(inputs, (list, tuple)):
|
||||||
|
inputs = [inputs]
|
||||||
|
for input_ref in inputs:
|
||||||
|
input_object=self._canvas.get_variable_value(input_ref)
|
||||||
|
self.set_input_value(input_ref, input_object)
|
||||||
|
if input_object is None:
|
||||||
|
continue
|
||||||
|
if isinstance(input_object,dict):
|
||||||
|
self.input_objects.append(input_object)
|
||||||
|
elif isinstance(input_object,list):
|
||||||
|
self.input_objects.extend(x for x in input_object if isinstance(x, dict))
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
if self._param.operations == "select_keys":
|
||||||
|
self._select_keys()
|
||||||
|
elif self._param.operations == "recursive_eval":
|
||||||
|
self._literal_eval()
|
||||||
|
elif self._param.operations == "combine":
|
||||||
|
self._combine()
|
||||||
|
elif self._param.operations == "filter_values":
|
||||||
|
self._filter_values()
|
||||||
|
elif self._param.operations == "append_or_update":
|
||||||
|
self._append_or_update()
|
||||||
|
elif self._param.operations == "remove_keys":
|
||||||
|
self._remove_keys()
|
||||||
|
else:
|
||||||
|
self._rename_keys()
|
||||||
|
|
||||||
|
def _select_keys(self):
|
||||||
|
filter_criteria: list[str] = self._param.select_keys
|
||||||
|
results = [{key: value for key, value in data_dict.items() if key in filter_criteria} for data_dict in self.input_objects]
|
||||||
|
self.set_output("result", results)
|
||||||
|
|
||||||
|
|
||||||
|
def _recursive_eval(self, data):
|
||||||
|
if isinstance(data, dict):
|
||||||
|
return {k: self.recursive_eval(v) for k, v in data.items()}
|
||||||
|
if isinstance(data, list):
|
||||||
|
return [self.recursive_eval(item) for item in data]
|
||||||
|
if isinstance(data, str):
|
||||||
|
try:
|
||||||
|
if (
|
||||||
|
data.strip().startswith(("{", "[", "(", "'", '"'))
|
||||||
|
or data.strip().lower() in ("true", "false", "none")
|
||||||
|
or data.strip().replace(".", "").isdigit()
|
||||||
|
):
|
||||||
|
return ast.literal_eval(data)
|
||||||
|
except (ValueError, SyntaxError, TypeError, MemoryError):
|
||||||
|
return data
|
||||||
|
else:
|
||||||
|
return data
|
||||||
|
return data
|
||||||
|
|
||||||
|
def _literal_eval(self):
|
||||||
|
self.set_output("result", self._recursive_eval(self.input_objects))
|
||||||
|
|
||||||
|
def _combine(self):
|
||||||
|
result={}
|
||||||
|
for obj in self.input_objects:
|
||||||
|
for key, value in obj.items():
|
||||||
|
if key not in result:
|
||||||
|
result[key] = value
|
||||||
|
elif isinstance(result[key], list):
|
||||||
|
if isinstance(value, list):
|
||||||
|
result[key].extend(value)
|
||||||
|
else:
|
||||||
|
result[key].append(value)
|
||||||
|
else:
|
||||||
|
result[key] = (
|
||||||
|
[result[key], value] if not isinstance(value, list) else [result[key], *value]
|
||||||
|
)
|
||||||
|
self.set_output("result", result)
|
||||||
|
|
||||||
|
def norm(self,v):
|
||||||
|
s = "" if v is None else str(v)
|
||||||
|
return s
|
||||||
|
|
||||||
|
def match_rule(self, obj, rule):
|
||||||
|
key = rule.get("key")
|
||||||
|
op = (rule.get("operator") or "equals").lower()
|
||||||
|
target = self.norm(rule.get("value"))
|
||||||
|
target = self._canvas.get_value_with_variable(target) or target
|
||||||
|
if key not in obj:
|
||||||
|
return False
|
||||||
|
val = obj.get(key, None)
|
||||||
|
v = self.norm(val)
|
||||||
|
if op == "=":
|
||||||
|
return v == target
|
||||||
|
if op == "≠":
|
||||||
|
return v != target
|
||||||
|
if op == "contains":
|
||||||
|
return target in v
|
||||||
|
if op == "start with":
|
||||||
|
return v.startswith(target)
|
||||||
|
if op == "end with":
|
||||||
|
return v.endswith(target)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _filter_values(self):
|
||||||
|
results=[]
|
||||||
|
rules = (getattr(self._param, "filter_values", None) or [])
|
||||||
|
for obj in self.input_objects:
|
||||||
|
if not rules:
|
||||||
|
results.append(obj)
|
||||||
|
continue
|
||||||
|
if all(self.match_rule(obj, r) for r in rules):
|
||||||
|
results.append(obj)
|
||||||
|
self.set_output("result", results)
|
||||||
|
|
||||||
|
|
||||||
|
def _append_or_update(self):
|
||||||
|
results=[]
|
||||||
|
updates = getattr(self._param, "updates", []) or []
|
||||||
|
for obj in self.input_objects:
|
||||||
|
new_obj = dict(obj)
|
||||||
|
for item in updates:
|
||||||
|
if not isinstance(item, dict):
|
||||||
|
continue
|
||||||
|
k = (item.get("key") or "").strip()
|
||||||
|
if not k:
|
||||||
|
continue
|
||||||
|
new_obj[k] = self._canvas.get_value_with_variable(item.get("value")) or item.get("value")
|
||||||
|
results.append(new_obj)
|
||||||
|
self.set_output("result", results)
|
||||||
|
|
||||||
|
def _remove_keys(self):
|
||||||
|
results = []
|
||||||
|
remove_keys = getattr(self._param, "remove_keys", []) or []
|
||||||
|
|
||||||
|
for obj in (self.input_objects or []):
|
||||||
|
new_obj = dict(obj)
|
||||||
|
for k in remove_keys:
|
||||||
|
if not isinstance(k, str):
|
||||||
|
continue
|
||||||
|
new_obj.pop(k, None)
|
||||||
|
results.append(new_obj)
|
||||||
|
self.set_output("result", results)
|
||||||
|
|
||||||
|
def _rename_keys(self):
|
||||||
|
results = []
|
||||||
|
rename_pairs = getattr(self._param, "rename_keys", []) or []
|
||||||
|
|
||||||
|
for obj in (self.input_objects or []):
|
||||||
|
new_obj = dict(obj)
|
||||||
|
for pair in rename_pairs:
|
||||||
|
if not isinstance(pair, dict):
|
||||||
|
continue
|
||||||
|
old = (pair.get("old_key") or "").strip()
|
||||||
|
new = (pair.get("new_key") or "").strip()
|
||||||
|
if not old or not new or old == new:
|
||||||
|
continue
|
||||||
|
if old in new_obj:
|
||||||
|
new_obj[new] = new_obj.pop(old)
|
||||||
|
results.append(new_obj)
|
||||||
|
self.set_output("result", results)
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return "DataOperation in progress"
|
||||||
32
agent/component/exit_loop.py
Normal file
32
agent/component/exit_loop.py
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class ExitLoopParam(ComponentParamBase, ABC):
|
||||||
|
def check(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class ExitLoop(ComponentBase, ABC):
|
||||||
|
component_name = "ExitLoop"
|
||||||
|
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return ""
|
||||||
@ -13,7 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
import json
|
||||||
|
import re
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from agent.component.base import ComponentParamBase, ComponentBase
|
||||||
|
from api.db.services.file_service import FileService
|
||||||
|
|
||||||
|
|
||||||
class UserFillUpParam(ComponentParamBase):
|
class UserFillUpParam(ComponentParamBase):
|
||||||
@ -31,10 +36,42 @@ class UserFillUp(ComponentBase):
|
|||||||
component_name = "UserFillUp"
|
component_name = "UserFillUp"
|
||||||
|
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("UserFillUp processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
if self._param.enable_tips:
|
||||||
|
content = self._param.tips
|
||||||
|
for k, v in self.get_input_elements_from_text(self._param.tips).items():
|
||||||
|
v = v["value"]
|
||||||
|
ans = ""
|
||||||
|
if isinstance(v, partial):
|
||||||
|
for t in v():
|
||||||
|
ans += t
|
||||||
|
elif isinstance(v, list):
|
||||||
|
ans = ",".join([str(vv) for vv in v])
|
||||||
|
elif not isinstance(v, str):
|
||||||
|
try:
|
||||||
|
ans = json.dumps(v, ensure_ascii=False)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
ans = v
|
||||||
|
if not ans:
|
||||||
|
ans = ""
|
||||||
|
content = re.sub(r"\{%s\}"%k, ans, content)
|
||||||
|
|
||||||
|
self.set_output("tips", content)
|
||||||
for k, v in kwargs.get("inputs", {}).items():
|
for k, v in kwargs.get("inputs", {}).items():
|
||||||
|
if self.check_if_canceled("UserFillUp processing"):
|
||||||
|
return
|
||||||
|
if isinstance(v, dict) and v.get("type", "").lower().find("file") >=0:
|
||||||
|
if v.get("optional") and v.get("value", None) is None:
|
||||||
|
v = None
|
||||||
|
else:
|
||||||
|
v = FileService.get_files([v["value"]])
|
||||||
|
else:
|
||||||
|
v = v.get("value")
|
||||||
self.set_output(k, v)
|
self.set_output(k, v)
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Waiting for your input..."
|
return "Waiting for your input..."
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -19,11 +19,12 @@ import os
|
|||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from api.utils.api_utils import timeout
|
|
||||||
from deepdoc.parser import HtmlParser
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from common.connection_utils import timeout
|
||||||
|
from deepdoc.parser import HtmlParser
|
||||||
|
|
||||||
|
|
||||||
class InvokeParam(ComponentParamBase):
|
class InvokeParam(ComponentParamBase):
|
||||||
@ -43,26 +44,41 @@ class InvokeParam(ComponentParamBase):
|
|||||||
self.datatype = "json" # New parameter to determine data posting type
|
self.datatype = "json" # New parameter to determine data posting type
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ["get", "post", "put"])
|
||||||
self.check_empty(self.url, "End point URL")
|
self.check_empty(self.url, "End point URL")
|
||||||
self.check_positive_integer(self.timeout, "Timeout time in second")
|
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||||
self.check_boolean(self.clean_html, "Clean HTML")
|
self.check_boolean(self.clean_html, "Clean HTML")
|
||||||
self.check_valid_value(self.datatype.lower(), "Data post type", ['json', 'formdata']) # Check for valid datapost value
|
self.check_valid_value(self.datatype.lower(), "Data post type", ["json", "formdata"]) # Check for valid datapost value
|
||||||
|
|
||||||
|
|
||||||
class Invoke(ComponentBase, ABC):
|
class Invoke(ComponentBase, ABC):
|
||||||
component_name = "Invoke"
|
component_name = "Invoke"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Invoke processing"):
|
||||||
|
return
|
||||||
|
|
||||||
args = {}
|
args = {}
|
||||||
for para in self._param.variables:
|
for para in self._param.variables:
|
||||||
if para.get("value") is not None:
|
if para.get("value"):
|
||||||
args[para["key"]] = para["value"]
|
args[para["key"]] = para["value"]
|
||||||
else:
|
else:
|
||||||
args[para["key"]] = self._canvas.get_variable_value(para["ref"])
|
args[para["key"]] = self._canvas.get_variable_value(para["ref"])
|
||||||
|
|
||||||
url = self._param.url.strip()
|
url = self._param.url.strip()
|
||||||
|
|
||||||
|
def replace_variable(match):
|
||||||
|
var_name = match.group(1)
|
||||||
|
try:
|
||||||
|
value = self._canvas.get_variable_value(var_name)
|
||||||
|
return str(value or "")
|
||||||
|
except Exception:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
# {base_url} or {component_id@variable_name}
|
||||||
|
url = re.sub(r"\{([a-zA-Z_][a-zA-Z0-9_.@-]*)\}", replace_variable, url)
|
||||||
|
|
||||||
if url.find("http") != 0:
|
if url.find("http") != 0:
|
||||||
url = "http://" + url
|
url = "http://" + url
|
||||||
|
|
||||||
@ -75,52 +91,35 @@ class Invoke(ComponentBase, ABC):
|
|||||||
proxies = {"http": self._param.proxy, "https": self._param.proxy}
|
proxies = {"http": self._param.proxy, "https": self._param.proxy}
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries + 1):
|
||||||
|
if self.check_if_canceled("Invoke processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if method == 'get':
|
if method == "get":
|
||||||
response = requests.get(url=url,
|
response = requests.get(url=url, params=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||||
params=args,
|
|
||||||
headers=headers,
|
|
||||||
proxies=proxies,
|
|
||||||
timeout=self._param.timeout)
|
|
||||||
if self._param.clean_html:
|
if self._param.clean_html:
|
||||||
sections = HtmlParser()(None, response.content)
|
sections = HtmlParser()(None, response.content)
|
||||||
self.set_output("result", "\n".join(sections))
|
self.set_output("result", "\n".join(sections))
|
||||||
else:
|
else:
|
||||||
self.set_output("result", response.text)
|
self.set_output("result", response.text)
|
||||||
|
|
||||||
if method == 'put':
|
if method == "put":
|
||||||
if self._param.datatype.lower() == 'json':
|
if self._param.datatype.lower() == "json":
|
||||||
response = requests.put(url=url,
|
response = requests.put(url=url, json=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||||
json=args,
|
|
||||||
headers=headers,
|
|
||||||
proxies=proxies,
|
|
||||||
timeout=self._param.timeout)
|
|
||||||
else:
|
else:
|
||||||
response = requests.put(url=url,
|
response = requests.put(url=url, data=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||||
data=args,
|
|
||||||
headers=headers,
|
|
||||||
proxies=proxies,
|
|
||||||
timeout=self._param.timeout)
|
|
||||||
if self._param.clean_html:
|
if self._param.clean_html:
|
||||||
sections = HtmlParser()(None, response.content)
|
sections = HtmlParser()(None, response.content)
|
||||||
self.set_output("result", "\n".join(sections))
|
self.set_output("result", "\n".join(sections))
|
||||||
else:
|
else:
|
||||||
self.set_output("result", response.text)
|
self.set_output("result", response.text)
|
||||||
|
|
||||||
if method == 'post':
|
if method == "post":
|
||||||
if self._param.datatype.lower() == 'json':
|
if self._param.datatype.lower() == "json":
|
||||||
response = requests.post(url=url,
|
response = requests.post(url=url, json=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||||
json=args,
|
|
||||||
headers=headers,
|
|
||||||
proxies=proxies,
|
|
||||||
timeout=self._param.timeout)
|
|
||||||
else:
|
else:
|
||||||
response = requests.post(url=url,
|
response = requests.post(url=url, data=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||||
data=args,
|
|
||||||
headers=headers,
|
|
||||||
proxies=proxies,
|
|
||||||
timeout=self._param.timeout)
|
|
||||||
if self._param.clean_html:
|
if self._param.clean_html:
|
||||||
self.set_output("result", "\n".join(sections))
|
self.set_output("result", "\n".join(sections))
|
||||||
else:
|
else:
|
||||||
@ -128,6 +127,9 @@ class Invoke(ComponentBase, ABC):
|
|||||||
|
|
||||||
return self.output("result")
|
return self.output("result")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("Invoke processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"Http request error: {e}")
|
logging.exception(f"Http request error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -139,4 +141,4 @@ class Invoke(ComponentBase, ABC):
|
|||||||
assert False, self.output()
|
assert False, self.output()
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Waiting for the server respond..."
|
return "Waiting for the server respond..."
|
||||||
|
|||||||
@ -16,6 +16,13 @@
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
"""
|
||||||
|
class VariableModel(BaseModel):
|
||||||
|
data_type: Annotated[Literal["string", "number", "Object", "Boolean", "Array<string>", "Array<number>", "Array<object>", "Array<boolean>"], Field(default="Array<string>")]
|
||||||
|
input_mode: Annotated[Literal["constant", "variable"], Field(default="constant")]
|
||||||
|
value: Annotated[Any, Field(default=None)]
|
||||||
|
model_config = ConfigDict(extra="forbid")
|
||||||
|
"""
|
||||||
|
|
||||||
class IterationParam(ComponentParamBase):
|
class IterationParam(ComponentParamBase):
|
||||||
"""
|
"""
|
||||||
@ -25,6 +32,7 @@ class IterationParam(ComponentParamBase):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.items_ref = ""
|
self.items_ref = ""
|
||||||
|
self.variable={}
|
||||||
|
|
||||||
def get_input_form(self) -> dict[str, dict]:
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
return {
|
return {
|
||||||
@ -49,6 +57,9 @@ class Iteration(ComponentBase, ABC):
|
|||||||
return cid
|
return cid
|
||||||
|
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Iteration processing"):
|
||||||
|
return
|
||||||
|
|
||||||
arr = self._canvas.get_variable_value(self._param.items_ref)
|
arr = self._canvas.get_variable_value(self._param.items_ref)
|
||||||
if not isinstance(arr, list):
|
if not isinstance(arr, list):
|
||||||
self.set_output("_ERROR", self._param.items_ref + " must be an array, but its type is "+str(type(arr)))
|
self.set_output("_ERROR", self._param.items_ref + " must be an array, but its type is "+str(type(arr)))
|
||||||
|
|||||||
@ -33,6 +33,9 @@ class IterationItem(ComponentBase, ABC):
|
|||||||
self._idx = 0
|
self._idx = 0
|
||||||
|
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("IterationItem processing"):
|
||||||
|
return
|
||||||
|
|
||||||
parent = self.get_parent()
|
parent = self.get_parent()
|
||||||
arr = self._canvas.get_variable_value(parent._param.items_ref)
|
arr = self._canvas.get_variable_value(parent._param.items_ref)
|
||||||
if not isinstance(arr, list):
|
if not isinstance(arr, list):
|
||||||
@ -40,12 +43,17 @@ class IterationItem(ComponentBase, ABC):
|
|||||||
raise Exception(parent._param.items_ref + " must be an array, but its type is "+str(type(arr)))
|
raise Exception(parent._param.items_ref + " must be an array, but its type is "+str(type(arr)))
|
||||||
|
|
||||||
if self._idx > 0:
|
if self._idx > 0:
|
||||||
|
if self.check_if_canceled("IterationItem processing"):
|
||||||
|
return
|
||||||
self.output_collation()
|
self.output_collation()
|
||||||
|
|
||||||
if self._idx >= len(arr):
|
if self._idx >= len(arr):
|
||||||
self._idx = -1
|
self._idx = -1
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if self.check_if_canceled("IterationItem processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self.set_output("item", arr[self._idx])
|
self.set_output("item", arr[self._idx])
|
||||||
self.set_output("index", self._idx)
|
self.set_output("index", self._idx)
|
||||||
|
|
||||||
@ -80,4 +88,4 @@ class IterationItem(ComponentBase, ABC):
|
|||||||
return self._idx == -1
|
return self._idx == -1
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Next turn..."
|
return "Next turn..."
|
||||||
|
|||||||
168
agent/component/list_operations.py
Normal file
168
agent/component/list_operations.py
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
from abc import ABC
|
||||||
|
import os
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from api.utils.api_utils import timeout
|
||||||
|
|
||||||
|
class ListOperationsParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the List Operations component parameters.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.query = ""
|
||||||
|
self.operations = "topN"
|
||||||
|
self.n=0
|
||||||
|
self.sort_method = "asc"
|
||||||
|
self.filter = {
|
||||||
|
"operator": "=",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
self.outputs = {
|
||||||
|
"result": {
|
||||||
|
"value": [],
|
||||||
|
"type": "Array of ?"
|
||||||
|
},
|
||||||
|
"first": {
|
||||||
|
"value": "",
|
||||||
|
"type": "?"
|
||||||
|
},
|
||||||
|
"last": {
|
||||||
|
"value": "",
|
||||||
|
"type": "?"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_empty(self.query, "query")
|
||||||
|
self.check_valid_value(self.operations, "Support operations", ["topN","head","tail","filter","sort","drop_duplicates"])
|
||||||
|
|
||||||
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class ListOperations(ComponentBase,ABC):
|
||||||
|
component_name = "ListOperations"
|
||||||
|
|
||||||
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
self.input_objects=[]
|
||||||
|
inputs = getattr(self._param, "query", None)
|
||||||
|
self.inputs = self._canvas.get_variable_value(inputs)
|
||||||
|
if not isinstance(self.inputs, list):
|
||||||
|
raise TypeError("The input of List Operations should be an array.")
|
||||||
|
self.set_input_value(inputs, self.inputs)
|
||||||
|
if self._param.operations == "topN":
|
||||||
|
self._topN()
|
||||||
|
elif self._param.operations == "head":
|
||||||
|
self._head()
|
||||||
|
elif self._param.operations == "tail":
|
||||||
|
self._tail()
|
||||||
|
elif self._param.operations == "filter":
|
||||||
|
self._filter()
|
||||||
|
elif self._param.operations == "sort":
|
||||||
|
self._sort()
|
||||||
|
elif self._param.operations == "drop_duplicates":
|
||||||
|
self._drop_duplicates()
|
||||||
|
|
||||||
|
|
||||||
|
def _coerce_n(self):
|
||||||
|
try:
|
||||||
|
return int(getattr(self._param, "n", 0))
|
||||||
|
except Exception:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def _set_outputs(self, outputs):
|
||||||
|
self._param.outputs["result"]["value"] = outputs
|
||||||
|
self._param.outputs["first"]["value"] = outputs[0] if outputs else None
|
||||||
|
self._param.outputs["last"]["value"] = outputs[-1] if outputs else None
|
||||||
|
|
||||||
|
def _topN(self):
|
||||||
|
n = self._coerce_n()
|
||||||
|
if n < 1:
|
||||||
|
outputs = []
|
||||||
|
else:
|
||||||
|
n = min(n, len(self.inputs))
|
||||||
|
outputs = self.inputs[:n]
|
||||||
|
self._set_outputs(outputs)
|
||||||
|
|
||||||
|
def _head(self):
|
||||||
|
n = self._coerce_n()
|
||||||
|
if 1 <= n <= len(self.inputs):
|
||||||
|
outputs = [self.inputs[n - 1]]
|
||||||
|
else:
|
||||||
|
outputs = []
|
||||||
|
self._set_outputs(outputs)
|
||||||
|
|
||||||
|
def _tail(self):
|
||||||
|
n = self._coerce_n()
|
||||||
|
if 1 <= n <= len(self.inputs):
|
||||||
|
outputs = [self.inputs[-n]]
|
||||||
|
else:
|
||||||
|
outputs = []
|
||||||
|
self._set_outputs(outputs)
|
||||||
|
|
||||||
|
def _filter(self):
|
||||||
|
self._set_outputs([i for i in self.inputs if self._eval(self._norm(i),self._param.filter["operator"],self._param.filter["value"])])
|
||||||
|
|
||||||
|
def _norm(self,v):
|
||||||
|
s = "" if v is None else str(v)
|
||||||
|
return s
|
||||||
|
|
||||||
|
def _eval(self, v, operator, value):
|
||||||
|
if operator == "=":
|
||||||
|
return v == value
|
||||||
|
elif operator == "≠":
|
||||||
|
return v != value
|
||||||
|
elif operator == "contains":
|
||||||
|
return value in v
|
||||||
|
elif operator == "start with":
|
||||||
|
return v.startswith(value)
|
||||||
|
elif operator == "end with":
|
||||||
|
return v.endswith(value)
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _sort(self):
|
||||||
|
items = self.inputs or []
|
||||||
|
method = getattr(self._param, "sort_method", "asc") or "asc"
|
||||||
|
reverse = method == "desc"
|
||||||
|
|
||||||
|
if not items:
|
||||||
|
self._set_outputs([])
|
||||||
|
return
|
||||||
|
|
||||||
|
first = items[0]
|
||||||
|
|
||||||
|
if isinstance(first, dict):
|
||||||
|
outputs = sorted(
|
||||||
|
items,
|
||||||
|
key=lambda x: self._hashable(x),
|
||||||
|
reverse=reverse,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
outputs = sorted(items, reverse=reverse)
|
||||||
|
|
||||||
|
self._set_outputs(outputs)
|
||||||
|
|
||||||
|
def _drop_duplicates(self):
|
||||||
|
seen = set()
|
||||||
|
outs = []
|
||||||
|
for item in self.inputs:
|
||||||
|
k = self._hashable(item)
|
||||||
|
if k in seen:
|
||||||
|
continue
|
||||||
|
seen.add(k)
|
||||||
|
outs.append(item)
|
||||||
|
self._set_outputs(outs)
|
||||||
|
|
||||||
|
def _hashable(self,x):
|
||||||
|
if isinstance(x, dict):
|
||||||
|
return tuple(sorted((k, self._hashable(v)) for k, v in x.items()))
|
||||||
|
if isinstance(x, (list, tuple)):
|
||||||
|
return tuple(self._hashable(v) for v in x)
|
||||||
|
if isinstance(x, set):
|
||||||
|
return tuple(sorted(self._hashable(v) for v in x))
|
||||||
|
return x
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return "ListOperation in progress"
|
||||||
@ -13,22 +13,22 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import asyncio
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from typing import Any
|
import threading
|
||||||
|
|
||||||
import json_repair
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from typing import Any, Generator, AsyncGenerator
|
||||||
|
import json_repair
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from common.constants import LLMType
|
||||||
from api.db import LLMType
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api.db.services.llm_service import LLMBundle, TenantLLMService
|
from api.db.services.tenant_llm_service import TenantLLMService
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
from rag.prompts import message_fit_in, citation_prompt
|
from rag.prompts.generator import tool_call_summary, message_fit_in, citation_prompt, structured_output_prompt
|
||||||
from rag.prompts.prompts import tool_call_summary
|
|
||||||
|
|
||||||
|
|
||||||
class LLMParam(ComponentParamBase):
|
class LLMParam(ComponentParamBase):
|
||||||
@ -83,9 +83,9 @@ class LLMParam(ComponentParamBase):
|
|||||||
|
|
||||||
class LLM(ComponentBase):
|
class LLM(ComponentBase):
|
||||||
component_name = "LLM"
|
component_name = "LLM"
|
||||||
|
|
||||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
def __init__(self, canvas, component_id, param: ComponentParamBase):
|
||||||
super().__init__(canvas, id, param)
|
super().__init__(canvas, component_id, param)
|
||||||
self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), TenantLLMService.llm_id2llm_type(self._param.llm_id),
|
self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), TenantLLMService.llm_id2llm_type(self._param.llm_id),
|
||||||
self._param.llm_id, max_retries=self._param.max_retries,
|
self._param.llm_id, max_retries=self._param.max_retries,
|
||||||
retry_interval=self._param.delay_after_error
|
retry_interval=self._param.delay_after_error
|
||||||
@ -103,6 +103,8 @@ class LLM(ComponentBase):
|
|||||||
|
|
||||||
def get_input_elements(self) -> dict[str, Any]:
|
def get_input_elements(self) -> dict[str, Any]:
|
||||||
res = self.get_input_elements_from_text(self._param.sys_prompt)
|
res = self.get_input_elements_from_text(self._param.sys_prompt)
|
||||||
|
if isinstance(self._param.prompts, str):
|
||||||
|
self._param.prompts = [{"role": "user", "content": self._param.prompts}]
|
||||||
for prompt in self._param.prompts:
|
for prompt in self._param.prompts:
|
||||||
d = self.get_input_elements_from_text(prompt["content"])
|
d = self.get_input_elements_from_text(prompt["content"])
|
||||||
res.update(d)
|
res.update(d)
|
||||||
@ -114,6 +116,17 @@ class LLM(ComponentBase):
|
|||||||
def add2system_prompt(self, txt):
|
def add2system_prompt(self, txt):
|
||||||
self._param.sys_prompt += txt
|
self._param.sys_prompt += txt
|
||||||
|
|
||||||
|
def _sys_prompt_and_msg(self, msg, args):
|
||||||
|
if isinstance(self._param.prompts, str):
|
||||||
|
self._param.prompts = [{"role": "user", "content": self._param.prompts}]
|
||||||
|
for p in self._param.prompts:
|
||||||
|
if msg and msg[-1]["role"] == p["role"]:
|
||||||
|
continue
|
||||||
|
p = deepcopy(p)
|
||||||
|
p["content"] = self.string_format(p["content"], args)
|
||||||
|
msg.append(p)
|
||||||
|
return msg, self.string_format(self._param.sys_prompt, args)
|
||||||
|
|
||||||
def _prepare_prompt_variables(self):
|
def _prepare_prompt_variables(self):
|
||||||
if self._param.visual_files_var:
|
if self._param.visual_files_var:
|
||||||
self.imgs = self._canvas.get_variable_value(self._param.visual_files_var)
|
self.imgs = self._canvas.get_variable_value(self._param.visual_files_var)
|
||||||
@ -129,7 +142,6 @@ class LLM(ComponentBase):
|
|||||||
|
|
||||||
args = {}
|
args = {}
|
||||||
vars = self.get_input_elements() if not self._param.debug_inputs else self._param.debug_inputs
|
vars = self.get_input_elements() if not self._param.debug_inputs else self._param.debug_inputs
|
||||||
prompt = self._param.sys_prompt
|
|
||||||
for k, o in vars.items():
|
for k, o in vars.items():
|
||||||
args[k] = o["value"]
|
args[k] = o["value"]
|
||||||
if not isinstance(args[k], str):
|
if not isinstance(args[k], str):
|
||||||
@ -139,22 +151,36 @@ class LLM(ComponentBase):
|
|||||||
args[k] = str(args[k])
|
args[k] = str(args[k])
|
||||||
self.set_input_value(k, args[k])
|
self.set_input_value(k, args[k])
|
||||||
|
|
||||||
msg = self._canvas.get_history(self._param.message_history_window_size)[:-1]
|
msg, sys_prompt = self._sys_prompt_and_msg(self._canvas.get_history(self._param.message_history_window_size)[:-1], args)
|
||||||
msg.extend(deepcopy(self._param.prompts))
|
user_defined_prompt, sys_prompt = self._extract_prompts(sys_prompt)
|
||||||
prompt = self.string_format(prompt, args)
|
if self._param.cite and self._canvas.get_reference()["chunks"]:
|
||||||
for m in msg:
|
sys_prompt += citation_prompt(user_defined_prompt)
|
||||||
m["content"] = self.string_format(m["content"], args)
|
|
||||||
if self._canvas.get_reference()["chunks"]:
|
|
||||||
prompt += citation_prompt()
|
|
||||||
|
|
||||||
return prompt, msg
|
return sys_prompt, msg, user_defined_prompt
|
||||||
|
|
||||||
|
def _extract_prompts(self, sys_prompt):
|
||||||
|
pts = {}
|
||||||
|
for tag in ["TASK_ANALYSIS", "PLAN_GENERATION", "REFLECTION", "CONTEXT_SUMMARY", "CONTEXT_RANKING", "CITATION_GUIDELINES"]:
|
||||||
|
r = re.search(rf"<{tag}>(.*?)</{tag}>", sys_prompt, flags=re.DOTALL|re.IGNORECASE)
|
||||||
|
if not r:
|
||||||
|
continue
|
||||||
|
pts[tag.lower()] = r.group(1)
|
||||||
|
sys_prompt = re.sub(rf"<{tag}>(.*?)</{tag}>", "", sys_prompt, flags=re.DOTALL|re.IGNORECASE)
|
||||||
|
return pts, sys_prompt
|
||||||
|
|
||||||
def _generate(self, msg:list[dict], **kwargs) -> str:
|
def _generate(self, msg:list[dict], **kwargs) -> str:
|
||||||
if not self.imgs:
|
if not self.imgs:
|
||||||
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)
|
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)
|
||||||
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)
|
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)
|
||||||
|
|
||||||
def _generate_streamly(self, msg:list[dict], **kwargs) -> str:
|
async def _generate_async(self, msg: list[dict], **kwargs) -> str:
|
||||||
|
if not self.imgs and hasattr(self.chat_mdl, "async_chat"):
|
||||||
|
return await self.chat_mdl.async_chat(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)
|
||||||
|
if self.imgs and hasattr(self.chat_mdl, "async_chat"):
|
||||||
|
return await self.chat_mdl.async_chat(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)
|
||||||
|
return await asyncio.to_thread(self._generate, msg, **kwargs)
|
||||||
|
|
||||||
|
def _generate_streamly(self, msg:list[dict], **kwargs) -> Generator[str, None, None]:
|
||||||
ans = ""
|
ans = ""
|
||||||
last_idx = 0
|
last_idx = 0
|
||||||
endswith_think = False
|
endswith_think = False
|
||||||
@ -188,33 +214,158 @@ class LLM(ComponentBase):
|
|||||||
for txt in self.chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs):
|
for txt in self.chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs):
|
||||||
yield delta(txt)
|
yield delta(txt)
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))
|
async def _generate_streamly_async(self, msg: list[dict], **kwargs) -> AsyncGenerator[str, None]:
|
||||||
def _invoke(self, **kwargs):
|
async def delta_wrapper(txt_iter):
|
||||||
|
ans = ""
|
||||||
|
last_idx = 0
|
||||||
|
endswith_think = False
|
||||||
|
|
||||||
|
def delta(txt):
|
||||||
|
nonlocal ans, last_idx, endswith_think
|
||||||
|
delta_ans = txt[last_idx:]
|
||||||
|
ans = txt
|
||||||
|
|
||||||
|
if delta_ans.find("<think>") == 0:
|
||||||
|
last_idx += len("<think>")
|
||||||
|
return "<think>"
|
||||||
|
elif delta_ans.find("<think>") > 0:
|
||||||
|
delta_ans = txt[last_idx:last_idx + delta_ans.find("<think>")]
|
||||||
|
last_idx += delta_ans.find("<think>")
|
||||||
|
return delta_ans
|
||||||
|
elif delta_ans.endswith("</think>"):
|
||||||
|
endswith_think = True
|
||||||
|
elif endswith_think:
|
||||||
|
endswith_think = False
|
||||||
|
return "</think>"
|
||||||
|
|
||||||
|
last_idx = len(ans)
|
||||||
|
if ans.endswith("</think>"):
|
||||||
|
last_idx -= len("</think>")
|
||||||
|
return re.sub(r"(<think>|</think>)", "", delta_ans)
|
||||||
|
|
||||||
|
async for t in txt_iter:
|
||||||
|
yield delta(t)
|
||||||
|
|
||||||
|
if not self.imgs and hasattr(self.chat_mdl, "async_chat_streamly"):
|
||||||
|
async for t in delta_wrapper(self.chat_mdl.async_chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)):
|
||||||
|
yield t
|
||||||
|
return
|
||||||
|
if self.imgs and hasattr(self.chat_mdl, "async_chat_streamly"):
|
||||||
|
async for t in delta_wrapper(self.chat_mdl.async_chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)):
|
||||||
|
yield t
|
||||||
|
return
|
||||||
|
|
||||||
|
# fallback
|
||||||
|
loop = asyncio.get_running_loop()
|
||||||
|
queue: asyncio.Queue = asyncio.Queue()
|
||||||
|
|
||||||
|
def worker():
|
||||||
|
try:
|
||||||
|
for item in self._generate_streamly(msg, **kwargs):
|
||||||
|
loop.call_soon_threadsafe(queue.put_nowait, item)
|
||||||
|
except Exception as e:
|
||||||
|
loop.call_soon_threadsafe(queue.put_nowait, e)
|
||||||
|
finally:
|
||||||
|
loop.call_soon_threadsafe(queue.put_nowait, StopAsyncIteration)
|
||||||
|
|
||||||
|
threading.Thread(target=worker, daemon=True).start()
|
||||||
|
while True:
|
||||||
|
item = await queue.get()
|
||||||
|
if item is StopAsyncIteration:
|
||||||
|
break
|
||||||
|
if isinstance(item, Exception):
|
||||||
|
raise item
|
||||||
|
yield item
|
||||||
|
|
||||||
|
async def _stream_output_async(self, prompt, msg):
|
||||||
|
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||||
|
answer = ""
|
||||||
|
last_idx = 0
|
||||||
|
endswith_think = False
|
||||||
|
|
||||||
|
def delta(txt):
|
||||||
|
nonlocal answer, last_idx, endswith_think
|
||||||
|
delta_ans = txt[last_idx:]
|
||||||
|
answer = txt
|
||||||
|
|
||||||
|
if delta_ans.find("<think>") == 0:
|
||||||
|
last_idx += len("<think>")
|
||||||
|
return "<think>"
|
||||||
|
elif delta_ans.find("<think>") > 0:
|
||||||
|
delta_ans = txt[last_idx:last_idx + delta_ans.find("<think>")]
|
||||||
|
last_idx += delta_ans.find("<think>")
|
||||||
|
return delta_ans
|
||||||
|
elif delta_ans.endswith("</think>"):
|
||||||
|
endswith_think = True
|
||||||
|
elif endswith_think:
|
||||||
|
endswith_think = False
|
||||||
|
return "</think>"
|
||||||
|
|
||||||
|
last_idx = len(answer)
|
||||||
|
if answer.endswith("</think>"):
|
||||||
|
last_idx -= len("</think>")
|
||||||
|
return re.sub(r"(<think>|</think>)", "", delta_ans)
|
||||||
|
|
||||||
|
stream_kwargs = {"images": self.imgs} if self.imgs else {}
|
||||||
|
async for ans in self.chat_mdl.async_chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), **stream_kwargs):
|
||||||
|
if self.check_if_canceled("LLM streaming"):
|
||||||
|
return
|
||||||
|
|
||||||
|
if isinstance(ans, int):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if ans.find("**ERROR**") >= 0:
|
||||||
|
if self.get_exception_default_value():
|
||||||
|
self.set_output("content", self.get_exception_default_value())
|
||||||
|
yield self.get_exception_default_value()
|
||||||
|
else:
|
||||||
|
self.set_output("_ERROR", ans)
|
||||||
|
return
|
||||||
|
|
||||||
|
yield delta(ans)
|
||||||
|
|
||||||
|
self.set_output("content", answer)
|
||||||
|
|
||||||
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
|
async def _invoke_async(self, **kwargs):
|
||||||
|
if self.check_if_canceled("LLM processing"):
|
||||||
|
return
|
||||||
|
|
||||||
def clean_formated_answer(ans: str) -> str:
|
def clean_formated_answer(ans: str) -> str:
|
||||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||||
ans = re.sub(r"^.*```json", "", ans, flags=re.DOTALL)
|
ans = re.sub(r"^.*```json", "", ans, flags=re.DOTALL)
|
||||||
return re.sub(r"```\n*$", "", ans, flags=re.DOTALL)
|
return re.sub(r"```\n*$", "", ans, flags=re.DOTALL)
|
||||||
|
|
||||||
prompt, msg = self._prepare_prompt_variables()
|
prompt, msg, _ = self._prepare_prompt_variables()
|
||||||
error = ""
|
error: str = ""
|
||||||
|
output_structure = None
|
||||||
|
try:
|
||||||
|
output_structure = self._param.outputs["structured"]
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
if output_structure and isinstance(output_structure, dict) and output_structure.get("properties") and len(output_structure["properties"]) > 0:
|
||||||
|
schema = json.dumps(output_structure, ensure_ascii=False, indent=2)
|
||||||
|
prompt_with_schema = prompt + structured_output_prompt(schema)
|
||||||
|
for _ in range(self._param.max_retries + 1):
|
||||||
|
if self.check_if_canceled("LLM processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if self._param.output_structure:
|
_, msg_fit = message_fit_in(
|
||||||
prompt += "\nThe output MUST follow this JSON format:\n"+json.dumps(self._param.output_structure, ensure_ascii=False, indent=2)
|
[{"role": "system", "content": prompt_with_schema}, *deepcopy(msg)],
|
||||||
prompt += "\nRedundant information is FORBIDDEN."
|
int(self.chat_mdl.max_length * 0.97),
|
||||||
for _ in range(self._param.max_retries+1):
|
)
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
|
||||||
error = ""
|
error = ""
|
||||||
ans = self._generate(msg)
|
ans = await self._generate_async(msg_fit)
|
||||||
msg.pop(0)
|
msg_fit.pop(0)
|
||||||
if ans.find("**ERROR**") >= 0:
|
if ans.find("**ERROR**") >= 0:
|
||||||
logging.error(f"LLM response error: {ans}")
|
logging.error(f"LLM response error: {ans}")
|
||||||
error = ans
|
error = ans
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
self.set_output("structured_content", json_repair.loads(clean_formated_answer(ans)))
|
self.set_output("structured", json_repair.loads(clean_formated_answer(ans)))
|
||||||
return
|
return
|
||||||
except Exception:
|
except Exception:
|
||||||
msg.append({"role": "user", "content": "The answer can't not be parsed as JSON"})
|
msg_fit.append({"role": "user", "content": "The answer can't not be parsed as JSON"})
|
||||||
error = "The answer can't not be parsed as JSON"
|
error = "The answer can't not be parsed as JSON"
|
||||||
if error:
|
if error:
|
||||||
self.set_output("_ERROR", error)
|
self.set_output("_ERROR", error)
|
||||||
@ -222,15 +373,23 @@ class LLM(ComponentBase):
|
|||||||
|
|
||||||
downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
|
downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
|
||||||
ex = self.exception_handler()
|
ex = self.exception_handler()
|
||||||
if any([self._canvas.get_component_obj(cid).component_name.lower()=="message" for cid in downstreams]) and not self._param.output_structure and not (ex and ex["goto"]):
|
if any([self._canvas.get_component_obj(cid).component_name.lower() == "message" for cid in downstreams]) and not (
|
||||||
self.set_output("content", partial(self._stream_output, prompt, msg))
|
ex and ex["goto"]
|
||||||
|
):
|
||||||
|
self.set_output("content", partial(self._stream_output_async, prompt, deepcopy(msg)))
|
||||||
return
|
return
|
||||||
|
|
||||||
for _ in range(self._param.max_retries+1):
|
error = ""
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
for _ in range(self._param.max_retries + 1):
|
||||||
|
if self.check_if_canceled("LLM processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
_, msg_fit = message_fit_in(
|
||||||
|
[{"role": "system", "content": prompt}, *deepcopy(msg)], int(self.chat_mdl.max_length * 0.97)
|
||||||
|
)
|
||||||
error = ""
|
error = ""
|
||||||
ans = self._generate(msg)
|
ans = await self._generate_async(msg_fit)
|
||||||
msg.pop(0)
|
msg_fit.pop(0)
|
||||||
if ans.find("**ERROR**") >= 0:
|
if ans.find("**ERROR**") >= 0:
|
||||||
logging.error(f"LLM response error: {ans}")
|
logging.error(f"LLM response error: {ans}")
|
||||||
error = ans
|
error = ans
|
||||||
@ -244,26 +403,15 @@ class LLM(ComponentBase):
|
|||||||
else:
|
else:
|
||||||
self.set_output("_ERROR", error)
|
self.set_output("_ERROR", error)
|
||||||
|
|
||||||
def _stream_output(self, prompt, msg):
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
def _invoke(self, **kwargs):
|
||||||
answer = ""
|
return asyncio.run(self._invoke_async(**kwargs))
|
||||||
for ans in self._generate_streamly(msg):
|
|
||||||
if ans.find("**ERROR**") >= 0:
|
|
||||||
if self.get_exception_default_value():
|
|
||||||
self.set_output("content", self.get_exception_default_value())
|
|
||||||
yield self.get_exception_default_value()
|
|
||||||
else:
|
|
||||||
self.set_output("_ERROR", ans)
|
|
||||||
return
|
|
||||||
yield ans
|
|
||||||
answer += ans
|
|
||||||
self.set_output("content", answer)
|
|
||||||
|
|
||||||
def add_memory(self, user:str, assist:str, func_name: str, params: dict, results: str):
|
def add_memory(self, user:str, assist:str, func_name: str, params: dict, results: str, user_defined_prompt:dict={}):
|
||||||
summ = tool_call_summary(self.chat_mdl, func_name, params, results)
|
summ = tool_call_summary(self.chat_mdl, func_name, params, results, user_defined_prompt)
|
||||||
logging.info(f"[MEMORY]: {summ}")
|
logging.info(f"[MEMORY]: {summ}")
|
||||||
self._canvas.add_memory(user, assist, summ)
|
self._canvas.add_memory(user, assist, summ)
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
_, msg = self._prepare_prompt_variables()
|
_, msg,_ = self._prepare_prompt_variables()
|
||||||
return "⌛Give me a moment—starting from: \n\n" + re.sub(r"(User's query:|[\\]+)", '', msg[-1]['content'], flags=re.DOTALL) + "\n\nI’ll figure out our best next move."
|
return "⌛Give me a moment—starting from: \n\n" + re.sub(r"(User's query:|[\\]+)", '', msg[-1]['content'], flags=re.DOTALL) + "\n\nI’ll figure out our best next move."
|
||||||
|
|||||||
80
agent/component/loop.py
Normal file
80
agent/component/loop.py
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class LoopParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Loop component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.loop_variables = []
|
||||||
|
self.loop_termination_condition=[]
|
||||||
|
self.maximum_loop_count = 0
|
||||||
|
|
||||||
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
|
return {
|
||||||
|
"items": {
|
||||||
|
"type": "json",
|
||||||
|
"name": "Items"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class Loop(ComponentBase, ABC):
|
||||||
|
component_name = "Loop"
|
||||||
|
|
||||||
|
def get_start(self):
|
||||||
|
for cid in self._canvas.components.keys():
|
||||||
|
if self._canvas.get_component(cid)["obj"].component_name.lower() != "loopitem":
|
||||||
|
continue
|
||||||
|
if self._canvas.get_component(cid)["parent_id"] == self._id:
|
||||||
|
return cid
|
||||||
|
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Loop processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
for item in self._param.loop_variables:
|
||||||
|
if any([not item.get("variable"), not item.get("input_mode"), not item.get("value"),not item.get("type")]):
|
||||||
|
assert "Loop Variable is not complete."
|
||||||
|
if item["input_mode"]=="variable":
|
||||||
|
self.set_output(item["variable"],self._canvas.get_variable_value(item["value"]))
|
||||||
|
elif item["input_mode"]=="constant":
|
||||||
|
self.set_output(item["variable"],item["value"])
|
||||||
|
else:
|
||||||
|
if item["type"] == "number":
|
||||||
|
self.set_output(item["variable"], 0)
|
||||||
|
elif item["type"] == "string":
|
||||||
|
self.set_output(item["variable"], "")
|
||||||
|
elif item["type"] == "boolean":
|
||||||
|
self.set_output(item["variable"], False)
|
||||||
|
elif item["type"].startswith("object"):
|
||||||
|
self.set_output(item["variable"], {})
|
||||||
|
elif item["type"].startswith("array"):
|
||||||
|
self.set_output(item["variable"], [])
|
||||||
|
else:
|
||||||
|
self.set_output(item["variable"], "")
|
||||||
|
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return "Loop from canvas."
|
||||||
163
agent/component/loopitem.py
Normal file
163
agent/component/loopitem.py
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class LoopItemParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the LoopItem component parameters.
|
||||||
|
"""
|
||||||
|
def check(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
class LoopItem(ComponentBase, ABC):
|
||||||
|
component_name = "LoopItem"
|
||||||
|
|
||||||
|
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||||
|
super().__init__(canvas, id, param)
|
||||||
|
self._idx = 0
|
||||||
|
|
||||||
|
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("LoopItem processing"):
|
||||||
|
return
|
||||||
|
parent = self.get_parent()
|
||||||
|
maximum_loop_count = parent._param.maximum_loop_count
|
||||||
|
if self._idx >= maximum_loop_count:
|
||||||
|
self._idx = -1
|
||||||
|
return
|
||||||
|
if self._idx > 0:
|
||||||
|
if self.check_if_canceled("LoopItem processing"):
|
||||||
|
return
|
||||||
|
self._idx += 1
|
||||||
|
|
||||||
|
def evaluate_condition(self,var, operator, value):
|
||||||
|
if isinstance(var, str):
|
||||||
|
if operator == "contains":
|
||||||
|
return value in var
|
||||||
|
elif operator == "not contains":
|
||||||
|
return value not in var
|
||||||
|
elif operator == "start with":
|
||||||
|
return var.startswith(value)
|
||||||
|
elif operator == "end with":
|
||||||
|
return var.endswith(value)
|
||||||
|
elif operator == "is":
|
||||||
|
return var == value
|
||||||
|
elif operator == "is not":
|
||||||
|
return var != value
|
||||||
|
elif operator == "empty":
|
||||||
|
return var == ""
|
||||||
|
elif operator == "not empty":
|
||||||
|
return var != ""
|
||||||
|
|
||||||
|
elif isinstance(var, (int, float)):
|
||||||
|
if operator == "=":
|
||||||
|
return var == value
|
||||||
|
elif operator == "≠":
|
||||||
|
return var != value
|
||||||
|
elif operator == ">":
|
||||||
|
return var > value
|
||||||
|
elif operator == "<":
|
||||||
|
return var < value
|
||||||
|
elif operator == "≥":
|
||||||
|
return var >= value
|
||||||
|
elif operator == "≤":
|
||||||
|
return var <= value
|
||||||
|
elif operator == "empty":
|
||||||
|
return var is None
|
||||||
|
elif operator == "not empty":
|
||||||
|
return var is not None
|
||||||
|
|
||||||
|
elif isinstance(var, bool):
|
||||||
|
if operator == "is":
|
||||||
|
return var is value
|
||||||
|
elif operator == "is not":
|
||||||
|
return var is not value
|
||||||
|
elif operator == "empty":
|
||||||
|
return var is None
|
||||||
|
elif operator == "not empty":
|
||||||
|
return var is not None
|
||||||
|
|
||||||
|
elif isinstance(var, dict):
|
||||||
|
if operator == "empty":
|
||||||
|
return len(var) == 0
|
||||||
|
elif operator == "not empty":
|
||||||
|
return len(var) > 0
|
||||||
|
|
||||||
|
elif isinstance(var, list):
|
||||||
|
if operator == "contains":
|
||||||
|
return value in var
|
||||||
|
elif operator == "not contains":
|
||||||
|
return value not in var
|
||||||
|
|
||||||
|
elif operator == "is":
|
||||||
|
return var == value
|
||||||
|
elif operator == "is not":
|
||||||
|
return var != value
|
||||||
|
|
||||||
|
elif operator == "empty":
|
||||||
|
return len(var) == 0
|
||||||
|
elif operator == "not empty":
|
||||||
|
return len(var) > 0
|
||||||
|
|
||||||
|
raise Exception(f"Invalid operator: {operator}")
|
||||||
|
|
||||||
|
def end(self):
|
||||||
|
if self._idx == -1:
|
||||||
|
return True
|
||||||
|
parent = self.get_parent()
|
||||||
|
logical_operator = parent._param.logical_operator if hasattr(parent._param, "logical_operator") else "and"
|
||||||
|
conditions = []
|
||||||
|
for item in parent._param.loop_termination_condition:
|
||||||
|
if not item.get("variable") or not item.get("operator"):
|
||||||
|
raise ValueError("Loop condition is incomplete.")
|
||||||
|
var = self._canvas.get_variable_value(item["variable"])
|
||||||
|
operator = item["operator"]
|
||||||
|
input_mode = item.get("input_mode", "constant")
|
||||||
|
|
||||||
|
if input_mode == "variable":
|
||||||
|
value = self._canvas.get_variable_value(item.get("value", ""))
|
||||||
|
elif input_mode == "constant":
|
||||||
|
value = item.get("value", "")
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid input mode.")
|
||||||
|
conditions.append(self.evaluate_condition(var, operator, value))
|
||||||
|
should_end = (
|
||||||
|
all(conditions) if logical_operator == "and"
|
||||||
|
else any(conditions) if logical_operator == "or"
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
if should_end is None:
|
||||||
|
raise ValueError("Invalid logical operator,should be 'and' or 'or'.")
|
||||||
|
|
||||||
|
if should_end:
|
||||||
|
self._idx = -1
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
if self._idx == -1:
|
||||||
|
self._idx = 0
|
||||||
|
else:
|
||||||
|
self._idx += 1
|
||||||
|
if self._idx >= len(self._items):
|
||||||
|
self._idx = -1
|
||||||
|
return False
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return "Next turn..."
|
||||||
@ -13,17 +13,23 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import asyncio
|
||||||
|
import inspect
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
|
import logging
|
||||||
|
import tempfile
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
from jinja2 import Template as Jinja2Template
|
from jinja2 import Template as Jinja2Template
|
||||||
|
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
from common.misc_utils import get_uuid
|
||||||
|
from common import settings
|
||||||
|
|
||||||
|
|
||||||
class MessageParam(ComponentParamBase):
|
class MessageParam(ComponentParamBase):
|
||||||
@ -34,6 +40,8 @@ class MessageParam(ComponentParamBase):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.content = []
|
self.content = []
|
||||||
self.stream = True
|
self.stream = True
|
||||||
|
self.output_format = None # default output format
|
||||||
|
self.auto_play = False
|
||||||
self.outputs = {
|
self.outputs = {
|
||||||
"content": {
|
"content": {
|
||||||
"type": "str"
|
"type": "str"
|
||||||
@ -49,17 +57,26 @@ class MessageParam(ComponentParamBase):
|
|||||||
class Message(ComponentBase):
|
class Message(ComponentBase):
|
||||||
component_name = "Message"
|
component_name = "Message"
|
||||||
|
|
||||||
def get_kwargs(self, script:str, kwargs:dict = {}, delimeter:str=None) -> tuple[str, dict[str, str | list | Any]]:
|
def get_input_elements(self) -> dict[str, Any]:
|
||||||
|
return self.get_input_elements_from_text("".join(self._param.content))
|
||||||
|
|
||||||
|
def get_kwargs(self, script:str, kwargs:dict = {}, delimiter:str=None) -> tuple[str, dict[str, str | list | Any]]:
|
||||||
for k,v in self.get_input_elements_from_text(script).items():
|
for k,v in self.get_input_elements_from_text(script).items():
|
||||||
if k in kwargs:
|
if k in kwargs:
|
||||||
continue
|
continue
|
||||||
v = v["value"]
|
v = v["value"]
|
||||||
|
if not v:
|
||||||
|
v = ""
|
||||||
ans = ""
|
ans = ""
|
||||||
if isinstance(v, partial):
|
if isinstance(v, partial):
|
||||||
for t in v():
|
iter_obj = v()
|
||||||
ans += t
|
if inspect.isasyncgen(iter_obj):
|
||||||
elif isinstance(v, list) and delimeter:
|
ans = asyncio.run(self._consume_async_gen(iter_obj))
|
||||||
ans = delimeter.join([str(vv) for vv in v])
|
else:
|
||||||
|
for t in iter_obj:
|
||||||
|
ans += t
|
||||||
|
elif isinstance(v, list) and delimiter:
|
||||||
|
ans = delimiter.join([str(vv) for vv in v])
|
||||||
elif not isinstance(v, str):
|
elif not isinstance(v, str):
|
||||||
try:
|
try:
|
||||||
ans = json.dumps(v, ensure_ascii=False)
|
ans = json.dumps(v, ensure_ascii=False)
|
||||||
@ -79,11 +96,20 @@ class Message(ComponentBase):
|
|||||||
_kwargs[_n] = v
|
_kwargs[_n] = v
|
||||||
return script, _kwargs
|
return script, _kwargs
|
||||||
|
|
||||||
def _stream(self, rand_cnt:str):
|
async def _consume_async_gen(self, agen):
|
||||||
|
buf = ""
|
||||||
|
async for t in agen:
|
||||||
|
buf += t
|
||||||
|
return buf
|
||||||
|
|
||||||
|
async def _stream(self, rand_cnt:str):
|
||||||
s = 0
|
s = 0
|
||||||
all_content = ""
|
all_content = ""
|
||||||
cache = {}
|
cache = {}
|
||||||
for r in re.finditer(self.variable_ref_patt, rand_cnt, flags=re.DOTALL):
|
for r in re.finditer(self.variable_ref_patt, rand_cnt, flags=re.DOTALL):
|
||||||
|
if self.check_if_canceled("Message streaming"):
|
||||||
|
return
|
||||||
|
|
||||||
all_content += rand_cnt[s: r.start()]
|
all_content += rand_cnt[s: r.start()]
|
||||||
yield rand_cnt[s: r.start()]
|
yield rand_cnt[s: r.start()]
|
||||||
s = r.end()
|
s = r.end()
|
||||||
@ -94,28 +120,50 @@ class Message(ComponentBase):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
v = self._canvas.get_variable_value(exp)
|
v = self._canvas.get_variable_value(exp)
|
||||||
|
if v is None:
|
||||||
|
v = ""
|
||||||
if isinstance(v, partial):
|
if isinstance(v, partial):
|
||||||
cnt = ""
|
cnt = ""
|
||||||
for t in v():
|
iter_obj = v()
|
||||||
all_content += t
|
if inspect.isasyncgen(iter_obj):
|
||||||
cnt += t
|
async for t in iter_obj:
|
||||||
yield t
|
if self.check_if_canceled("Message streaming"):
|
||||||
|
return
|
||||||
|
|
||||||
|
all_content += t
|
||||||
|
cnt += t
|
||||||
|
yield t
|
||||||
|
else:
|
||||||
|
for t in iter_obj:
|
||||||
|
if self.check_if_canceled("Message streaming"):
|
||||||
|
return
|
||||||
|
|
||||||
|
all_content += t
|
||||||
|
cnt += t
|
||||||
|
yield t
|
||||||
|
self.set_input_value(exp, cnt)
|
||||||
continue
|
continue
|
||||||
|
elif inspect.isawaitable(v):
|
||||||
|
v = await v
|
||||||
elif not isinstance(v, str):
|
elif not isinstance(v, str):
|
||||||
try:
|
try:
|
||||||
v = json.dumps(v, ensure_ascii=False, indent=2)
|
v = json.dumps(v, ensure_ascii=False)
|
||||||
except Exception:
|
except Exception:
|
||||||
v = str(v)
|
v = str(v)
|
||||||
yield v
|
yield v
|
||||||
|
self.set_input_value(exp, v)
|
||||||
all_content += v
|
all_content += v
|
||||||
cache[exp] = v
|
cache[exp] = v
|
||||||
|
|
||||||
if s < len(rand_cnt):
|
if s < len(rand_cnt):
|
||||||
|
if self.check_if_canceled("Message streaming"):
|
||||||
|
return
|
||||||
|
|
||||||
all_content += rand_cnt[s: ]
|
all_content += rand_cnt[s: ]
|
||||||
yield rand_cnt[s: ]
|
yield rand_cnt[s: ]
|
||||||
|
|
||||||
self.set_output("content", all_content)
|
self.set_output("content", all_content)
|
||||||
|
self._convert_content(all_content)
|
||||||
|
|
||||||
def _is_jinjia2(self, content:str) -> bool:
|
def _is_jinjia2(self, content:str) -> bool:
|
||||||
patt = [
|
patt = [
|
||||||
@ -123,8 +171,11 @@ class Message(ComponentBase):
|
|||||||
]
|
]
|
||||||
return any([re.search(p, content) for p in patt])
|
return any([re.search(p, content) for p in patt])
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Message processing"):
|
||||||
|
return
|
||||||
|
|
||||||
rand_cnt = random.choice(self._param.content)
|
rand_cnt = random.choice(self._param.content)
|
||||||
if self._param.stream and not self._is_jinjia2(rand_cnt):
|
if self._param.stream and not self._is_jinjia2(rand_cnt):
|
||||||
self.set_output("content", partial(self._stream, rand_cnt))
|
self.set_output("content", partial(self._stream, rand_cnt))
|
||||||
@ -137,10 +188,79 @@ class Message(ComponentBase):
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
if self.check_if_canceled("Message processing"):
|
||||||
|
return
|
||||||
|
|
||||||
for n, v in kwargs.items():
|
for n, v in kwargs.items():
|
||||||
content = re.sub(n, v, content)
|
content = re.sub(n, v, content)
|
||||||
|
|
||||||
self.set_output("content", content)
|
self.set_output("content", content)
|
||||||
|
self._convert_content(content)
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
def _convert_content(self, content):
|
||||||
|
if not self._param.output_format:
|
||||||
|
return
|
||||||
|
|
||||||
|
import pypandoc
|
||||||
|
doc_id = get_uuid()
|
||||||
|
|
||||||
|
if self._param.output_format.lower() not in {"markdown", "html", "pdf", "docx"}:
|
||||||
|
self._param.output_format = "markdown"
|
||||||
|
|
||||||
|
try:
|
||||||
|
if self._param.output_format in {"markdown", "html"}:
|
||||||
|
if isinstance(content, str):
|
||||||
|
converted = pypandoc.convert_text(
|
||||||
|
content,
|
||||||
|
to=self._param.output_format,
|
||||||
|
format="markdown",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
converted = pypandoc.convert_file(
|
||||||
|
content,
|
||||||
|
to=self._param.output_format,
|
||||||
|
format="markdown",
|
||||||
|
)
|
||||||
|
|
||||||
|
binary_content = converted.encode("utf-8")
|
||||||
|
|
||||||
|
else: # pdf, docx
|
||||||
|
with tempfile.NamedTemporaryFile(suffix=f".{self._param.output_format}", delete=False) as tmp:
|
||||||
|
tmp_name = tmp.name
|
||||||
|
|
||||||
|
try:
|
||||||
|
if isinstance(content, str):
|
||||||
|
pypandoc.convert_text(
|
||||||
|
content,
|
||||||
|
to=self._param.output_format,
|
||||||
|
format="markdown",
|
||||||
|
outputfile=tmp_name,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
pypandoc.convert_file(
|
||||||
|
content,
|
||||||
|
to=self._param.output_format,
|
||||||
|
format="markdown",
|
||||||
|
outputfile=tmp_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(tmp_name, "rb") as f:
|
||||||
|
binary_content = f.read()
|
||||||
|
|
||||||
|
finally:
|
||||||
|
if os.path.exists(tmp_name):
|
||||||
|
os.remove(tmp_name)
|
||||||
|
|
||||||
|
settings.STORAGE_IMPL.put(self._canvas._tenant_id, doc_id, binary_content)
|
||||||
|
self.set_output("attachment", {
|
||||||
|
"doc_id":doc_id,
|
||||||
|
"format":self._param.output_format,
|
||||||
|
"file_name":f"{doc_id[:8]}.{self._param.output_format}"})
|
||||||
|
|
||||||
|
logging.info(f"Converted content uploaded as {doc_id} (format={self._param.output_format})")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error converting content to {self._param.output_format}: {e}")
|
||||||
|
|||||||
@ -16,9 +16,11 @@
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from jinja2 import Template as Jinja2Template
|
from jinja2 import Template as Jinja2Template
|
||||||
from agent.component.base import ComponentParamBase
|
from agent.component.base import ComponentParamBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
from .message import Message
|
from .message import Message
|
||||||
|
|
||||||
|
|
||||||
@ -43,6 +45,9 @@ class StringTransformParam(ComponentParamBase):
|
|||||||
class StringTransform(Message, ABC):
|
class StringTransform(Message, ABC):
|
||||||
component_name = "StringTransform"
|
component_name = "StringTransform"
|
||||||
|
|
||||||
|
def get_input_elements(self) -> dict[str, Any]:
|
||||||
|
return self.get_input_elements_from_text(self._param.script)
|
||||||
|
|
||||||
def get_input_form(self) -> dict[str, dict]:
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
if self._param.method == "split":
|
if self._param.method == "split":
|
||||||
return {
|
return {
|
||||||
@ -56,19 +61,26 @@ class StringTransform(Message, ABC):
|
|||||||
"type": "line"
|
"type": "line"
|
||||||
} for k, o in self.get_input_elements_from_text(self._param.script).items()}
|
} for k, o in self.get_input_elements_from_text(self._param.script).items()}
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("StringTransform processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if self._param.method == "split":
|
if self._param.method == "split":
|
||||||
self._split(kwargs.get("line"))
|
self._split(kwargs.get("line"))
|
||||||
else:
|
else:
|
||||||
self._merge(kwargs)
|
self._merge(kwargs)
|
||||||
|
|
||||||
def _split(self, line:str|None = None):
|
def _split(self, line:str|None = None):
|
||||||
|
if self.check_if_canceled("StringTransform split processing"):
|
||||||
|
return
|
||||||
|
|
||||||
var = self._canvas.get_variable_value(self._param.split_ref) if not line else line
|
var = self._canvas.get_variable_value(self._param.split_ref) if not line else line
|
||||||
if not var:
|
if not var:
|
||||||
var = ""
|
var = ""
|
||||||
assert isinstance(var, str), "The input variable is not a string: {}".format(type(var))
|
assert isinstance(var, str), "The input variable is not a string: {}".format(type(var))
|
||||||
self.set_input_value(self._param.split_ref, var)
|
self.set_input_value(self._param.split_ref, var)
|
||||||
|
|
||||||
res = []
|
res = []
|
||||||
for i,s in enumerate(re.split(r"(%s)"%("|".join([re.escape(d) for d in self._param.delimiters])), var, flags=re.DOTALL)):
|
for i,s in enumerate(re.split(r"(%s)"%("|".join([re.escape(d) for d in self._param.delimiters])), var, flags=re.DOTALL)):
|
||||||
if i % 2 == 1:
|
if i % 2 == 1:
|
||||||
@ -77,6 +89,9 @@ class StringTransform(Message, ABC):
|
|||||||
self.set_output("result", res)
|
self.set_output("result", res)
|
||||||
|
|
||||||
def _merge(self, kwargs:dict[str, str] = {}):
|
def _merge(self, kwargs:dict[str, str] = {}):
|
||||||
|
if self.check_if_canceled("StringTransform merge processing"):
|
||||||
|
return
|
||||||
|
|
||||||
script = self._param.script
|
script = self._param.script
|
||||||
script, kwargs = self.get_kwargs(script, kwargs, self._param.delimiters[0])
|
script, kwargs = self.get_kwargs(script, kwargs, self._param.delimiters[0])
|
||||||
|
|
||||||
@ -90,7 +105,7 @@ class StringTransform(Message, ABC):
|
|||||||
for k,v in kwargs.items():
|
for k,v in kwargs.items():
|
||||||
if not v:
|
if not v:
|
||||||
v = ""
|
v = ""
|
||||||
script = re.sub(k, v, script)
|
script = re.sub(k, lambda match: v, script)
|
||||||
|
|
||||||
self.set_output("result", script)
|
self.set_output("result", script)
|
||||||
|
|
||||||
|
|||||||
@ -19,7 +19,7 @@ from abc import ABC
|
|||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from agent.component.base import ComponentBase, ComponentParamBase
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class SwitchParam(ComponentParamBase):
|
class SwitchParam(ComponentParamBase):
|
||||||
@ -61,11 +61,20 @@ class SwitchParam(ComponentParamBase):
|
|||||||
class Switch(ComponentBase, ABC):
|
class Switch(ComponentBase, ABC):
|
||||||
component_name = "Switch"
|
component_name = "Switch"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Switch processing"):
|
||||||
|
return
|
||||||
|
|
||||||
for cond in self._param.conditions:
|
for cond in self._param.conditions:
|
||||||
|
if self.check_if_canceled("Switch processing"):
|
||||||
|
return
|
||||||
|
|
||||||
res = []
|
res = []
|
||||||
for item in cond["items"]:
|
for item in cond["items"]:
|
||||||
|
if self.check_if_canceled("Switch processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not item["cpn_id"]:
|
if not item["cpn_id"]:
|
||||||
continue
|
continue
|
||||||
cpn_v = self._canvas.get_variable_value(item["cpn_id"])
|
cpn_v = self._canvas.get_variable_value(item["cpn_id"])
|
||||||
@ -128,4 +137,4 @@ class Switch(ComponentBase, ABC):
|
|||||||
raise ValueError('Not supported operator' + operator)
|
raise ValueError('Not supported operator' + operator)
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "I’m weighing a few options and will pick the next step shortly."
|
return "I’m weighing a few options and will pick the next step shortly."
|
||||||
|
|||||||
84
agent/component/varaiable_aggregator.py
Normal file
84
agent/component/varaiable_aggregator.py
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
import os
|
||||||
|
|
||||||
|
from common.connection_utils import timeout
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
|
||||||
|
|
||||||
|
class VariableAggregatorParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Parameters for VariableAggregator
|
||||||
|
|
||||||
|
- groups: list of dicts {"group_name": str, "variables": [variable selectors]}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
# each group expects: {"group_name": str, "variables": List[str]}
|
||||||
|
self.groups = []
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
self.check_empty(self.groups, "[VariableAggregator] groups")
|
||||||
|
for g in self.groups:
|
||||||
|
if not g.get("group_name"):
|
||||||
|
raise ValueError("[VariableAggregator] group_name can not be empty!")
|
||||||
|
if not g.get("variables"):
|
||||||
|
raise ValueError(
|
||||||
|
f"[VariableAggregator] variables of group `{g.get('group_name')}` can not be empty"
|
||||||
|
)
|
||||||
|
if not isinstance(g.get("variables"), list):
|
||||||
|
raise ValueError(
|
||||||
|
f"[VariableAggregator] variables of group `{g.get('group_name')}` should be a list of strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
|
return {
|
||||||
|
"variables": {
|
||||||
|
"name": "Variables",
|
||||||
|
"type": "list",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class VariableAggregator(ComponentBase):
|
||||||
|
component_name = "VariableAggregator"
|
||||||
|
|
||||||
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3)))
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
# Group mode: for each group, pick the first available variable
|
||||||
|
for group in self._param.groups:
|
||||||
|
gname = group.get("group_name")
|
||||||
|
|
||||||
|
# record candidate selectors within this group
|
||||||
|
self.set_input_value(f"{gname}.variables", list(group.get("variables", [])))
|
||||||
|
for selector in group.get("variables", []):
|
||||||
|
val = self._canvas.get_variable_value(selector['value'])
|
||||||
|
if val:
|
||||||
|
self.set_output(gname, val)
|
||||||
|
break
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _to_object(value: Any) -> Any:
|
||||||
|
# Try to convert value to serializable object if it has to_object()
|
||||||
|
try:
|
||||||
|
return value.to_object() # type: ignore[attr-defined]
|
||||||
|
except Exception:
|
||||||
|
return value
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return "Aggregating variables from canvas and grouping as configured."
|
||||||
192
agent/component/variable_assigner.py
Normal file
192
agent/component/variable_assigner.py
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from abc import ABC
|
||||||
|
import os
|
||||||
|
import numbers
|
||||||
|
from agent.component.base import ComponentBase, ComponentParamBase
|
||||||
|
from api.utils.api_utils import timeout
|
||||||
|
|
||||||
|
class VariableAssignerParam(ComponentParamBase):
|
||||||
|
"""
|
||||||
|
Define the Variable Assigner component parameters.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.variables=[]
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
|
return {
|
||||||
|
"items": {
|
||||||
|
"type": "json",
|
||||||
|
"name": "Items"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class VariableAssigner(ComponentBase,ABC):
|
||||||
|
component_name = "VariableAssigner"
|
||||||
|
|
||||||
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
if not isinstance(self._param.variables,list):
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
for item in self._param.variables:
|
||||||
|
if any([not item.get("variable"), not item.get("operator"), not item.get("parameter")]):
|
||||||
|
assert "Variable is not complete."
|
||||||
|
variable=item["variable"]
|
||||||
|
operator=item["operator"]
|
||||||
|
parameter=item["parameter"]
|
||||||
|
variable_value=self._canvas.get_variable_value(variable)
|
||||||
|
new_variable=self._operate(variable_value,operator,parameter)
|
||||||
|
self._canvas.set_variable_value(variable, new_variable)
|
||||||
|
|
||||||
|
def _operate(self,variable,operator,parameter):
|
||||||
|
if operator == "overwrite":
|
||||||
|
return self._overwrite(parameter)
|
||||||
|
elif operator == "clear":
|
||||||
|
return self._clear(variable)
|
||||||
|
elif operator == "set":
|
||||||
|
return self._set(variable,parameter)
|
||||||
|
elif operator == "append":
|
||||||
|
return self._append(variable,parameter)
|
||||||
|
elif operator == "extend":
|
||||||
|
return self._extend(variable,parameter)
|
||||||
|
elif operator == "remove_first":
|
||||||
|
return self._remove_first(variable)
|
||||||
|
elif operator == "remove_last":
|
||||||
|
return self._remove_last(variable)
|
||||||
|
elif operator == "+=":
|
||||||
|
return self._add(variable,parameter)
|
||||||
|
elif operator == "-=":
|
||||||
|
return self._subtract(variable,parameter)
|
||||||
|
elif operator == "*=":
|
||||||
|
return self._multiply(variable,parameter)
|
||||||
|
elif operator == "/=":
|
||||||
|
return self._divide(variable,parameter)
|
||||||
|
else:
|
||||||
|
return
|
||||||
|
|
||||||
|
def _overwrite(self,parameter):
|
||||||
|
return self._canvas.get_variable_value(parameter)
|
||||||
|
|
||||||
|
def _clear(self,variable):
|
||||||
|
if isinstance(variable,list):
|
||||||
|
return []
|
||||||
|
elif isinstance(variable,str):
|
||||||
|
return ""
|
||||||
|
elif isinstance(variable,dict):
|
||||||
|
return {}
|
||||||
|
elif isinstance(variable,int):
|
||||||
|
return 0
|
||||||
|
elif isinstance(variable,float):
|
||||||
|
return 0.0
|
||||||
|
elif isinstance(variable,bool):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _set(self,variable,parameter):
|
||||||
|
if variable is None:
|
||||||
|
return self._canvas.get_value_with_variable(parameter)
|
||||||
|
elif isinstance(variable,str):
|
||||||
|
return self._canvas.get_value_with_variable(parameter)
|
||||||
|
elif isinstance(variable,bool):
|
||||||
|
return parameter
|
||||||
|
elif isinstance(variable,int):
|
||||||
|
return parameter
|
||||||
|
elif isinstance(variable,float):
|
||||||
|
return parameter
|
||||||
|
else:
|
||||||
|
return parameter
|
||||||
|
|
||||||
|
def _append(self,variable,parameter):
|
||||||
|
parameter=self._canvas.get_variable_value(parameter)
|
||||||
|
if variable is None:
|
||||||
|
variable=[]
|
||||||
|
if not isinstance(variable,list):
|
||||||
|
return "ERROR:VARIABLE_NOT_LIST"
|
||||||
|
elif len(variable)!=0 and not isinstance(parameter,type(variable[0])):
|
||||||
|
return "ERROR:PARAMETER_NOT_LIST_ELEMENT_TYPE"
|
||||||
|
else:
|
||||||
|
variable.append(parameter)
|
||||||
|
return variable
|
||||||
|
|
||||||
|
def _extend(self,variable,parameter):
|
||||||
|
parameter=self._canvas.get_variable_value(parameter)
|
||||||
|
if variable is None:
|
||||||
|
variable=[]
|
||||||
|
if not isinstance(variable,list):
|
||||||
|
return "ERROR:VARIABLE_NOT_LIST"
|
||||||
|
elif not isinstance(parameter,list):
|
||||||
|
return "ERROR:PARAMETER_NOT_LIST"
|
||||||
|
elif len(variable)!=0 and len(parameter)!=0 and not isinstance(parameter[0],type(variable[0])):
|
||||||
|
return "ERROR:PARAMETER_NOT_LIST_ELEMENT_TYPE"
|
||||||
|
else:
|
||||||
|
return variable + parameter
|
||||||
|
|
||||||
|
def _remove_first(self,variable):
|
||||||
|
if len(variable)==0:
|
||||||
|
return variable
|
||||||
|
if not isinstance(variable,list):
|
||||||
|
return "ERROR:VARIABLE_NOT_LIST"
|
||||||
|
else:
|
||||||
|
return variable[1:]
|
||||||
|
|
||||||
|
def _remove_last(self,variable):
|
||||||
|
if len(variable)==0:
|
||||||
|
return variable
|
||||||
|
if not isinstance(variable,list):
|
||||||
|
return "ERROR:VARIABLE_NOT_LIST"
|
||||||
|
else:
|
||||||
|
return variable[:-1]
|
||||||
|
|
||||||
|
def is_number(self, value):
|
||||||
|
if isinstance(value, bool):
|
||||||
|
return False
|
||||||
|
return isinstance(value, numbers.Number)
|
||||||
|
|
||||||
|
def _add(self,variable,parameter):
|
||||||
|
if self.is_number(variable) and self.is_number(parameter):
|
||||||
|
return variable + parameter
|
||||||
|
else:
|
||||||
|
return "ERROR:VARIABLE_NOT_NUMBER or PARAMETER_NOT_NUMBER"
|
||||||
|
|
||||||
|
def _subtract(self,variable,parameter):
|
||||||
|
if self.is_number(variable) and self.is_number(parameter):
|
||||||
|
return variable - parameter
|
||||||
|
else:
|
||||||
|
return "ERROR:VARIABLE_NOT_NUMBER or PARAMETER_NOT_NUMBER"
|
||||||
|
|
||||||
|
def _multiply(self,variable,parameter):
|
||||||
|
if self.is_number(variable) and self.is_number(parameter):
|
||||||
|
return variable * parameter
|
||||||
|
else:
|
||||||
|
return "ERROR:VARIABLE_NOT_NUMBER or PARAMETER_NOT_NUMBER"
|
||||||
|
|
||||||
|
def _divide(self,variable,parameter):
|
||||||
|
if self.is_number(variable) and self.is_number(parameter):
|
||||||
|
if parameter==0:
|
||||||
|
return "ERROR:DIVIDE_BY_ZERO"
|
||||||
|
else:
|
||||||
|
return variable/parameter
|
||||||
|
else:
|
||||||
|
return "ERROR:VARIABLE_NOT_NUMBER or PARAMETER_NOT_NUMBER"
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return "Assign variables from canvas."
|
||||||
38
agent/component/webhook.py
Normal file
38
agent/component/webhook.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from agent.component.base import ComponentParamBase, ComponentBase
|
||||||
|
|
||||||
|
|
||||||
|
class WebhookParam(ComponentParamBase):
|
||||||
|
|
||||||
|
"""
|
||||||
|
Define the Begin component parameters.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
|
return getattr(self, "inputs")
|
||||||
|
|
||||||
|
|
||||||
|
class Webhook(ComponentBase):
|
||||||
|
component_name = "Webhook"
|
||||||
|
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return ""
|
||||||
728
agent/templates/advanced_ingestion_pipeline.json
Normal file
728
agent/templates/advanced_ingestion_pipeline.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
495
agent/templates/chunk_summary.json
Normal file
495
agent/templates/chunk_summary.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1056
agent/templates/ecommerce_customer_service_workflow.json
Normal file
1056
agent/templates/ecommerce_customer_service_workflow.json
Normal file
File diff suppressed because one or more lines are too long
@ -1,7 +1,13 @@
|
|||||||
{
|
{
|
||||||
"id": 8,
|
"id": 8,
|
||||||
"title": "Generate SEO Blog",
|
"title": {
|
||||||
"description": "This is a multi-agent version of the SEO blog generation workflow. It simulates a small team of AI “writers”, where each agent plays a specialized role — just like a real editorial team.",
|
"en": "Generate SEO Blog",
|
||||||
|
"de": "SEO Blog generieren",
|
||||||
|
"zh": "生成SEO博客"},
|
||||||
|
"description": {
|
||||||
|
"en": "This is a multi-agent version of the SEO blog generation workflow. It simulates a small team of AI “writers”, where each agent plays a specialized role — just like a real editorial team.",
|
||||||
|
"de": "Dies ist eine Multi-Agenten-Version des Workflows zur Erstellung von SEO-Blogs. Sie simuliert ein kleines Team von KI-„Autoren“, in dem jeder Agent eine spezielle Rolle übernimmt – genau wie in einem echten Redaktionsteam.",
|
||||||
|
"zh": "多智能体架构可根据简单的用户输入自动生成完整的SEO博客文章。模拟小型“作家”团队,其中每个智能体扮演一个专业角色——就像真正的编辑团队。"},
|
||||||
"canvas_type": "Agent",
|
"canvas_type": "Agent",
|
||||||
"dsl": {
|
"dsl": {
|
||||||
"components": {
|
"components": {
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
333
agent/templates/knowledge_base_report.json
Normal file
333
agent/templates/knowledge_base_report.json
Normal file
@ -0,0 +1,333 @@
|
|||||||
|
{
|
||||||
|
"id": 20,
|
||||||
|
"title": {
|
||||||
|
"en": "Report Agent Using Knowledge Base",
|
||||||
|
"de": "Berichtsagent mit Wissensdatenbank",
|
||||||
|
"zh": "知识库检索智能体"},
|
||||||
|
"description": {
|
||||||
|
"en": "A report generation assistant using local knowledge base, with advanced capabilities in task planning, reasoning, and reflective analysis. Recommended for academic research paper Q&A",
|
||||||
|
"de": "Ein Berichtsgenerierungsassistent, der eine lokale Wissensdatenbank nutzt, mit erweiterten Fähigkeiten in Aufgabenplanung, Schlussfolgerung und reflektierender Analyse. Empfohlen für akademische Forschungspapier-Fragen und -Antworten.",
|
||||||
|
"zh": "一个使用本地知识库的报告生成助手,具备高级能力,包括任务规划、推理和反思性分析。推荐用于学术研究论文问答。"},
|
||||||
|
"canvas_type": "Agent",
|
||||||
|
"dsl": {
|
||||||
|
"components": {
|
||||||
|
"Agent:NewPumasLick": {
|
||||||
|
"downstream": [
|
||||||
|
"Message:OrangeYearsShine"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Agent",
|
||||||
|
"params": {
|
||||||
|
"delay_after_error": 1,
|
||||||
|
"description": "",
|
||||||
|
"exception_comment": "",
|
||||||
|
"exception_default_value": "",
|
||||||
|
"exception_goto": [],
|
||||||
|
"exception_method": null,
|
||||||
|
"frequencyPenaltyEnabled": false,
|
||||||
|
"frequency_penalty": 0.5,
|
||||||
|
"llm_id": "qwen3-235b-a22b-instruct-2507@Tongyi-Qianwen",
|
||||||
|
"maxTokensEnabled": true,
|
||||||
|
"max_retries": 3,
|
||||||
|
"max_rounds": 3,
|
||||||
|
"max_tokens": 128000,
|
||||||
|
"mcp": [],
|
||||||
|
"message_history_window_size": 12,
|
||||||
|
"outputs": {
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"parameter": "Precise",
|
||||||
|
"presencePenaltyEnabled": false,
|
||||||
|
"presence_penalty": 0.5,
|
||||||
|
"prompts": [
|
||||||
|
{
|
||||||
|
"content": "# User Query\n {sys.query}",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sys_prompt": "## Role & Task\nYou are a **\u201cKnowledge Base Retrieval Q\\&A Agent\u201d** whose goal is to break down the user\u2019s question into retrievable subtasks, and then produce a multi-source-verified, structured, and actionable research report using the internal knowledge base.\n## Execution Framework (Detailed Steps & Key Points)\n1. **Assessment & Decomposition**\n * Actions:\n * Automatically extract: main topic, subtopics, entities (people/organizations/products/technologies), time window, geographic/business scope.\n * Output as a list: N facts/data points that must be collected (*N* ranges from 5\u201320 depending on question complexity).\n2. **Query Type Determination (Rule-Based)**\n * Example rules:\n * If the question involves a single issue but requests \u201cmethod comparison/multiple explanations\u201d \u2192 use **depth-first**.\n * If the question can naturally be split into \u22653 independent sub-questions \u2192 use **breadth-first**.\n * If the question can be answered by a single fact/specification/definition \u2192 use **simple query**.\n3. **Research Plan Formulation**\n * Depth-first: define 3\u20135 perspectives (methodology/stakeholders/time dimension/technical route, etc.), assign search keywords, target document types, and output format for each perspective.\n * Breadth-first: list subtasks, prioritize them, and assign search terms.\n * Simple query: directly provide the search sentence and required fields.\n4. **Retrieval Execution**\n * After retrieval: perform coverage check (does it contain the key facts?) and quality check (source diversity, authority, latest update time).\n * If standards are not met, automatically loop: rewrite queries (synonyms/cross-domain terms) and retry \u22643 times, or flag as requiring external search.\n5. **Integration & Reasoning**\n * Build the answer using a **fact\u2013evidence\u2013reasoning** chain. For each conclusion, attach 1\u20132 strongest pieces of evidence.\n---\n## Quality Gate Checklist (Verify at Each Stage)\n* **Stage 1 (Decomposition)**:\n * [ ] Key concepts and expected outputs identified\n * [ ] Required facts/data points listed\n* **Stage 2 (Retrieval)**:\n * [ ] Meets quality standards (see above)\n * [ ] If not met: execute query iteration\n* **Stage 3 (Generation)**:\n * [ ] Each conclusion has at least one direct evidence source\n * [ ] State assumptions/uncertainties\n * [ ] Provide next-step suggestions or experiment/retrieval plans\n * [ ] Final length and depth match user expectations (comply with word count/format if specified)\n---\n## Core Principles\n1. **Strict reliance on the knowledge base**: answers must be **fully bounded** by the content retrieved from the knowledge base.\n2. **No fabrication**: do not generate, infer, or create information that is not explicitly present in the knowledge base.\n3. **Accuracy first**: prefer incompleteness over inaccurate content.\n4. **Output format**:\n * Hierarchically clear modular structure\n * Logical grouping according to the MECE principle\n * Professionally presented formatting\n * Step-by-step cognitive guidance\n * Reasonable use of headings and dividers for clarity\n * *Italicize* key parameters\n * **Bold** critical information\n5. **LaTeX formula requirements**:\n * Inline formulas: start and end with `$`\n * Block formulas: start and end with `$$`, each `$$` on its own line\n * Block formula content must comply with LaTeX math syntax\n * Verify formula correctness\n---\n## Additional Notes (Interaction & Failure Strategy)\n* If the knowledge base does not cover critical facts: explicitly inform the user (with sample wording)\n* For time-sensitive issues: enforce time filtering in the search request, and indicate the latest retrieval date in the answer.\n* Language requirement: answer in the user\u2019s preferred language\n",
|
||||||
|
"temperature": "0.1",
|
||||||
|
"temperatureEnabled": true,
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"component_name": "Retrieval",
|
||||||
|
"name": "Retrieval",
|
||||||
|
"params": {
|
||||||
|
"cross_languages": [],
|
||||||
|
"description": "",
|
||||||
|
"empty_response": "",
|
||||||
|
"kb_ids": [],
|
||||||
|
"keywords_similarity_weight": 0.7,
|
||||||
|
"outputs": {
|
||||||
|
"formalized_content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rerank_id": "",
|
||||||
|
"similarity_threshold": 0.2,
|
||||||
|
"top_k": 1024,
|
||||||
|
"top_n": 8,
|
||||||
|
"use_kg": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"topPEnabled": false,
|
||||||
|
"top_p": 0.75,
|
||||||
|
"user_prompt": "",
|
||||||
|
"visual_files_var": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"begin"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Message:OrangeYearsShine": {
|
||||||
|
"downstream": [],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Message",
|
||||||
|
"params": {
|
||||||
|
"content": [
|
||||||
|
"{Agent:NewPumasLick@content}"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"Agent:NewPumasLick"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"begin": {
|
||||||
|
"downstream": [
|
||||||
|
"Agent:NewPumasLick"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Begin",
|
||||||
|
"params": {
|
||||||
|
"enablePrologue": true,
|
||||||
|
"inputs": {},
|
||||||
|
"mode": "conversational",
|
||||||
|
"prologue": "\u4f60\u597d\uff01 \u6211\u662f\u4f60\u7684\u52a9\u7406\uff0c\u6709\u4ec0\u4e48\u53ef\u4ee5\u5e2e\u5230\u4f60\u7684\u5417\uff1f"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"globals": {
|
||||||
|
"sys.conversation_turns": 0,
|
||||||
|
"sys.files": [],
|
||||||
|
"sys.query": "",
|
||||||
|
"sys.user_id": ""
|
||||||
|
},
|
||||||
|
"graph": {
|
||||||
|
"edges": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__beginstart-Agent:NewPumasLickend",
|
||||||
|
"source": "begin",
|
||||||
|
"sourceHandle": "start",
|
||||||
|
"target": "Agent:NewPumasLick",
|
||||||
|
"targetHandle": "end"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__Agent:NewPumasLickstart-Message:OrangeYearsShineend",
|
||||||
|
"markerEnd": "logo",
|
||||||
|
"source": "Agent:NewPumasLick",
|
||||||
|
"sourceHandle": "start",
|
||||||
|
"style": {
|
||||||
|
"stroke": "rgba(91, 93, 106, 1)",
|
||||||
|
"strokeWidth": 1
|
||||||
|
},
|
||||||
|
"target": "Message:OrangeYearsShine",
|
||||||
|
"targetHandle": "end",
|
||||||
|
"type": "buttonEdge",
|
||||||
|
"zIndex": 1001
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__Agent:NewPumasLicktool-Tool:AllBirdsNailend",
|
||||||
|
"selected": false,
|
||||||
|
"source": "Agent:NewPumasLick",
|
||||||
|
"sourceHandle": "tool",
|
||||||
|
"target": "Tool:AllBirdsNail",
|
||||||
|
"targetHandle": "end"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"enablePrologue": true,
|
||||||
|
"inputs": {},
|
||||||
|
"mode": "conversational",
|
||||||
|
"prologue": "\u4f60\u597d\uff01 \u6211\u662f\u4f60\u7684\u52a9\u7406\uff0c\u6709\u4ec0\u4e48\u53ef\u4ee5\u5e2e\u5230\u4f60\u7684\u5417\uff1f"
|
||||||
|
},
|
||||||
|
"label": "Begin",
|
||||||
|
"name": "begin"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "begin",
|
||||||
|
"measured": {
|
||||||
|
"height": 48,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": -9.569875358221438,
|
||||||
|
"y": 205.84018385864917
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "left",
|
||||||
|
"targetPosition": "right",
|
||||||
|
"type": "beginNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"content": [
|
||||||
|
"{Agent:NewPumasLick@content}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"label": "Message",
|
||||||
|
"name": "Response"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Message:OrangeYearsShine",
|
||||||
|
"measured": {
|
||||||
|
"height": 56,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 734.4061285881053,
|
||||||
|
"y": 199.9706031723009
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "messageNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"delay_after_error": 1,
|
||||||
|
"description": "",
|
||||||
|
"exception_comment": "",
|
||||||
|
"exception_default_value": "",
|
||||||
|
"exception_goto": [],
|
||||||
|
"exception_method": null,
|
||||||
|
"frequencyPenaltyEnabled": false,
|
||||||
|
"frequency_penalty": 0.5,
|
||||||
|
"llm_id": "qwen3-235b-a22b-instruct-2507@Tongyi-Qianwen",
|
||||||
|
"maxTokensEnabled": true,
|
||||||
|
"max_retries": 3,
|
||||||
|
"max_rounds": 3,
|
||||||
|
"max_tokens": 128000,
|
||||||
|
"mcp": [],
|
||||||
|
"message_history_window_size": 12,
|
||||||
|
"outputs": {
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"parameter": "Precise",
|
||||||
|
"presencePenaltyEnabled": false,
|
||||||
|
"presence_penalty": 0.5,
|
||||||
|
"prompts": [
|
||||||
|
{
|
||||||
|
"content": "# User Query\n {sys.query}",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sys_prompt": "## Role & Task\nYou are a **\u201cKnowledge Base Retrieval Q\\&A Agent\u201d** whose goal is to break down the user\u2019s question into retrievable subtasks, and then produce a multi-source-verified, structured, and actionable research report using the internal knowledge base.\n## Execution Framework (Detailed Steps & Key Points)\n1. **Assessment & Decomposition**\n * Actions:\n * Automatically extract: main topic, subtopics, entities (people/organizations/products/technologies), time window, geographic/business scope.\n * Output as a list: N facts/data points that must be collected (*N* ranges from 5\u201320 depending on question complexity).\n2. **Query Type Determination (Rule-Based)**\n * Example rules:\n * If the question involves a single issue but requests \u201cmethod comparison/multiple explanations\u201d \u2192 use **depth-first**.\n * If the question can naturally be split into \u22653 independent sub-questions \u2192 use **breadth-first**.\n * If the question can be answered by a single fact/specification/definition \u2192 use **simple query**.\n3. **Research Plan Formulation**\n * Depth-first: define 3\u20135 perspectives (methodology/stakeholders/time dimension/technical route, etc.), assign search keywords, target document types, and output format for each perspective.\n * Breadth-first: list subtasks, prioritize them, and assign search terms.\n * Simple query: directly provide the search sentence and required fields.\n4. **Retrieval Execution**\n * After retrieval: perform coverage check (does it contain the key facts?) and quality check (source diversity, authority, latest update time).\n * If standards are not met, automatically loop: rewrite queries (synonyms/cross-domain terms) and retry \u22643 times, or flag as requiring external search.\n5. **Integration & Reasoning**\n * Build the answer using a **fact\u2013evidence\u2013reasoning** chain. For each conclusion, attach 1\u20132 strongest pieces of evidence.\n---\n## Quality Gate Checklist (Verify at Each Stage)\n* **Stage 1 (Decomposition)**:\n * [ ] Key concepts and expected outputs identified\n * [ ] Required facts/data points listed\n* **Stage 2 (Retrieval)**:\n * [ ] Meets quality standards (see above)\n * [ ] If not met: execute query iteration\n* **Stage 3 (Generation)**:\n * [ ] Each conclusion has at least one direct evidence source\n * [ ] State assumptions/uncertainties\n * [ ] Provide next-step suggestions or experiment/retrieval plans\n * [ ] Final length and depth match user expectations (comply with word count/format if specified)\n---\n## Core Principles\n1. **Strict reliance on the knowledge base**: answers must be **fully bounded** by the content retrieved from the knowledge base.\n2. **No fabrication**: do not generate, infer, or create information that is not explicitly present in the knowledge base.\n3. **Accuracy first**: prefer incompleteness over inaccurate content.\n4. **Output format**:\n * Hierarchically clear modular structure\n * Logical grouping according to the MECE principle\n * Professionally presented formatting\n * Step-by-step cognitive guidance\n * Reasonable use of headings and dividers for clarity\n * *Italicize* key parameters\n * **Bold** critical information\n5. **LaTeX formula requirements**:\n * Inline formulas: start and end with `$`\n * Block formulas: start and end with `$$`, each `$$` on its own line\n * Block formula content must comply with LaTeX math syntax\n * Verify formula correctness\n---\n## Additional Notes (Interaction & Failure Strategy)\n* If the knowledge base does not cover critical facts: explicitly inform the user (with sample wording)\n* For time-sensitive issues: enforce time filtering in the search request, and indicate the latest retrieval date in the answer.\n* Language requirement: answer in the user\u2019s preferred language\n",
|
||||||
|
"temperature": "0.1",
|
||||||
|
"temperatureEnabled": true,
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"component_name": "Retrieval",
|
||||||
|
"name": "Retrieval",
|
||||||
|
"params": {
|
||||||
|
"cross_languages": [],
|
||||||
|
"description": "",
|
||||||
|
"empty_response": "",
|
||||||
|
"kb_ids": [],
|
||||||
|
"keywords_similarity_weight": 0.7,
|
||||||
|
"outputs": {
|
||||||
|
"formalized_content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rerank_id": "",
|
||||||
|
"similarity_threshold": 0.2,
|
||||||
|
"top_k": 1024,
|
||||||
|
"top_n": 8,
|
||||||
|
"use_kg": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"topPEnabled": false,
|
||||||
|
"top_p": 0.75,
|
||||||
|
"user_prompt": "",
|
||||||
|
"visual_files_var": ""
|
||||||
|
},
|
||||||
|
"label": "Agent",
|
||||||
|
"name": "Knowledge Base Agent"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Agent:NewPumasLick",
|
||||||
|
"measured": {
|
||||||
|
"height": 84,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 347.00048227952215,
|
||||||
|
"y": 186.49109364794631
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "agentNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"description": "This is an agent for a specific task.",
|
||||||
|
"user_prompt": "This is the order you need to send to the agent."
|
||||||
|
},
|
||||||
|
"label": "Tool",
|
||||||
|
"name": "flow.tool_10"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Tool:AllBirdsNail",
|
||||||
|
"measured": {
|
||||||
|
"height": 48,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 220.24819746977118,
|
||||||
|
"y": 403.31576836482583
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "toolNode"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"history": [],
|
||||||
|
"memory": [],
|
||||||
|
"messages": [],
|
||||||
|
"path": [],
|
||||||
|
"retrieval": []
|
||||||
|
},
|
||||||
|
"avatar": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAH0klEQVR4nO2ZC1BU1wGG/3uRp/IygG+DGK0GOjE1U6cxI4tT03Y0E+kENbaJbKpj60wzgNMwnTjuEtu0miGasY+0krI202kMVEnVxtoOLG00oVa0LajVBDcSEI0REFBgkZv/3GWXfdzdvctuHs7kmzmec9//d+45914XCXc4Xwjk1+59VJGGF7C5QAFSWBvgyWmWLl7IKiny6QNL173B5YjB84bOyrpKA4B1DLySdQpLKAiZGtZ7a/KMVoQJz6UfEZyhTWwaEBmssiLvCueu6BJg8EwFqGTTAC+uvNWC9w82sRWcux/JwaSHstjywcogRt4RG0KExwWG4QsVYCebKSwe3L5lR9OOWjyzfg2WL/0a1/jncO3b2FHxGnKeWYqo+Giu8UEMrWJKWBACPMY/DG+63txhvnKshUu+DF2/hayMDFRsL+VScDb++AVc6OjAuInxXPJl2tfnIikrzUyJMi7qQmLRhOEr2fOFbX/7P6STF7BqoWevfdij4NWGQfx+57OYO2sG1wSnsek8Nm15EU8sikF6ouelXz9ph7JwDqYt+5IIZaGEkauDIrH4wPBmhjexCSEws+VdVG1M4NIoj+2xYzBuJtavWcEl/VS8dggx/ZdQvcGzQwp+cxOXsu5RBQQMVkYJM4LA/Txh+ELFMWFVPARS5kFiabZdx8Olh7l17BzdvhzZmROhdJ3j6D/nIyBgOCMlLAgA9xmF4TMV4BSbrgnrLiBl5rOsRCRRbDUsBzQFiJjY91PCBj9w+yiP1lXWsTLAjc9YQGB9I8+Yx1oTiUWFvW9QgDo2PdASaDp/EQ8/sRnhcPTVcuTMncXwQQVESL9DidscaPW+QEtAICRu9PSxFTpJiePV8AI9AsTvXZBY/Pa+wJ9ApNApIILm8S5Y4QXXQwhYFH6csemDP4G3G5v579i5d04mknknQhDYS4HCrCVr/mC3D305KnbCEpvVIia5Onw6WaWw+KAl0Np+FUXbdiMcyoqfUoeRHoFrJ1uRtnBG1/9Mf/3LtElp+VwF2wcd7woJib1vUPwMH4GWQCQJJtBa/V9cPmFD8uQUpMdNGDhY8bNYrobh8acHu270/l0ImJWRt64Wn6WACN9z5gq2lXwPW8pfweT0icP/fH23vO9QLYq3/QKyLBmFQI3CUcT9NdESEEPItKsSN3r7MBaSJoxHWZERM6ZmMLy2gDP8/pd/og418dTL37hFSUpMUC5f+UiWZcnY9s5+ixCwUiCXx2iiJdDNx6f4pgkH8Q3lbxK7h8+enoHha1cRNdMp8axiHxo6+/5bVdk8DSROYIW1X7QEIom3wHD3gEf4vu1bVYEJZeWQ0zJQvmcfyiv2QZak6raG/QWfK4Ez9mTc5v8xPMJfuojoxXmIX/9DOMe+FCWbcHu4BJJ0YEwCx0824bFNW9HesB+CqYu+jepfPYcHF+aoPXS8sQl/+vU2bgmOU2C+qRc9/YrrPPbGBtzavd0nvCxLxui4pJrBm911PFwak4CYA80cj+JCAiGUzYkmxrSY4N2c3GLi6UEIFL/wRxxqkhmHnTEpDQcrfq6ea+hcE8bNy3GFzyq4H22HW1Kd4WMSkg1jmsSRpKj0Rzhy4gNUv/y8Gjrv8SJK3OWScA+fMn/ysVPPvTmeh6nh1TcxBUJ+jEaKYr7N36x7h+Edj0pB6+WrLokn87+BrTt/p4ZPzZ6MM7/8R2//h33vOcNzdwgBMwVMbGvySQmo4a0NqOZccU7YmGXLEfPQUlUid/XT6B8YdIU/99vjsPcOdEhDsfOd4QVCwKB8yp8SWuG1njbTl83DpMWz1PCKAswuWPDI0e8WebyAJBbxNdrF7cls+hBpAb3h3XtehL/3+4u7D35rQwpP4YFTwMJ91rHpQyQFQgmf9sAMNL9Ur4afv/FBjIuPVj+n4YVTwMD96tj0IVICoYYXv/q1VJ1Sl8UveQyaRwErvOB6B5SwKhqP00gI6A0vhsycJ7/KIzxhyHqGN0ADbnNAAYOicRfCFdAb/p50Gbfuc/wy5w1D5lOghk0fuG0USlgVr7sQjoDe8C8WxKGKPy2KjzlvAQb02/sCbh+FApngX1QUtyeSuwDi0hxFByV7L+LIf3r5kvpp4PBr07Hqvn71Y85bgOG6WS2ggA1+4D6eUKKQApVsqngI6KSkqh9HzsoM/3zg8Oz5VQ9E8wjf30YFDGdkeAsCwH18oYRZGXk7C4HuYxcwe6rjQsFovzaEvoFxqNkTOPzMjGikJso8wsF77XYkLx6dAwxWxvBmBIH7aUMJi8J3w0DnTVz7dyvX6KPzVBt+kL8cmzesRq9ps2Z48bRJmOIapS7E4zM2lXNt5CcU6ID7+ocSZkqY2NRN6ysnsHbJEpR8ZwV6t5Yg+iuLELf2KVd48VwXQf3BQGUMb4ZOuH9gKFEIYJfiNrEDcXZHHV4q3YRv5i7ikgM94RlETNgihrcgBHhccCiRCf7VhBK5rAPyr9I/Y/WKPEyfksH/9NjQ2dODhsYzwcLXsypkeBtCRGLRDUUMAMyKHxEx4dtrzyP97nQMygripiQiKi4aSbPvQmKW7+OXF69ntYvBa1iPCYklZEZECsGm4ja0Ops7EJsaj4SprlU+8IJiqIjAFga3Ikx4vvAYkTGALxyWFArlsnbBC9Sz6mI5zWKNRGh3JJY7mjte4GOz+r4tkRbxQQAAAABJRU5ErkJggg=="
|
||||||
|
}
|
||||||
333
agent/templates/knowledge_base_report_r.json
Normal file
333
agent/templates/knowledge_base_report_r.json
Normal file
@ -0,0 +1,333 @@
|
|||||||
|
{
|
||||||
|
"id": 21,
|
||||||
|
"title": {
|
||||||
|
"en": "Report Agent Using Knowledge Base",
|
||||||
|
"de": "Berichtsagent mit Wissensdatenbank",
|
||||||
|
"zh": "知识库检索智能体"},
|
||||||
|
"description": {
|
||||||
|
"en": "A report generation assistant using local knowledge base, with advanced capabilities in task planning, reasoning, and reflective analysis. Recommended for academic research paper Q&A",
|
||||||
|
"de": "Ein Berichtsgenerierungsassistent, der eine lokale Wissensdatenbank nutzt, mit erweiterten Fähigkeiten in Aufgabenplanung, Schlussfolgerung und reflektierender Analyse. Empfohlen für akademische Forschungspapier-Fragen und -Antworten.",
|
||||||
|
"zh": "一个使用本地知识库的报告生成助手,具备高级能力,包括任务规划、推理和反思性分析。推荐用于学术研究论文问答。"},
|
||||||
|
"canvas_type": "Recommended",
|
||||||
|
"dsl": {
|
||||||
|
"components": {
|
||||||
|
"Agent:NewPumasLick": {
|
||||||
|
"downstream": [
|
||||||
|
"Message:OrangeYearsShine"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Agent",
|
||||||
|
"params": {
|
||||||
|
"delay_after_error": 1,
|
||||||
|
"description": "",
|
||||||
|
"exception_comment": "",
|
||||||
|
"exception_default_value": "",
|
||||||
|
"exception_goto": [],
|
||||||
|
"exception_method": null,
|
||||||
|
"frequencyPenaltyEnabled": false,
|
||||||
|
"frequency_penalty": 0.5,
|
||||||
|
"llm_id": "qwen3-235b-a22b-instruct-2507@Tongyi-Qianwen",
|
||||||
|
"maxTokensEnabled": true,
|
||||||
|
"max_retries": 3,
|
||||||
|
"max_rounds": 3,
|
||||||
|
"max_tokens": 128000,
|
||||||
|
"mcp": [],
|
||||||
|
"message_history_window_size": 12,
|
||||||
|
"outputs": {
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"parameter": "Precise",
|
||||||
|
"presencePenaltyEnabled": false,
|
||||||
|
"presence_penalty": 0.5,
|
||||||
|
"prompts": [
|
||||||
|
{
|
||||||
|
"content": "# User Query\n {sys.query}",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sys_prompt": "## Role & Task\nYou are a **\u201cKnowledge Base Retrieval Q\\&A Agent\u201d** whose goal is to break down the user\u2019s question into retrievable subtasks, and then produce a multi-source-verified, structured, and actionable research report using the internal knowledge base.\n## Execution Framework (Detailed Steps & Key Points)\n1. **Assessment & Decomposition**\n * Actions:\n * Automatically extract: main topic, subtopics, entities (people/organizations/products/technologies), time window, geographic/business scope.\n * Output as a list: N facts/data points that must be collected (*N* ranges from 5\u201320 depending on question complexity).\n2. **Query Type Determination (Rule-Based)**\n * Example rules:\n * If the question involves a single issue but requests \u201cmethod comparison/multiple explanations\u201d \u2192 use **depth-first**.\n * If the question can naturally be split into \u22653 independent sub-questions \u2192 use **breadth-first**.\n * If the question can be answered by a single fact/specification/definition \u2192 use **simple query**.\n3. **Research Plan Formulation**\n * Depth-first: define 3\u20135 perspectives (methodology/stakeholders/time dimension/technical route, etc.), assign search keywords, target document types, and output format for each perspective.\n * Breadth-first: list subtasks, prioritize them, and assign search terms.\n * Simple query: directly provide the search sentence and required fields.\n4. **Retrieval Execution**\n * After retrieval: perform coverage check (does it contain the key facts?) and quality check (source diversity, authority, latest update time).\n * If standards are not met, automatically loop: rewrite queries (synonyms/cross-domain terms) and retry \u22643 times, or flag as requiring external search.\n5. **Integration & Reasoning**\n * Build the answer using a **fact\u2013evidence\u2013reasoning** chain. For each conclusion, attach 1\u20132 strongest pieces of evidence.\n---\n## Quality Gate Checklist (Verify at Each Stage)\n* **Stage 1 (Decomposition)**:\n * [ ] Key concepts and expected outputs identified\n * [ ] Required facts/data points listed\n* **Stage 2 (Retrieval)**:\n * [ ] Meets quality standards (see above)\n * [ ] If not met: execute query iteration\n* **Stage 3 (Generation)**:\n * [ ] Each conclusion has at least one direct evidence source\n * [ ] State assumptions/uncertainties\n * [ ] Provide next-step suggestions or experiment/retrieval plans\n * [ ] Final length and depth match user expectations (comply with word count/format if specified)\n---\n## Core Principles\n1. **Strict reliance on the knowledge base**: answers must be **fully bounded** by the content retrieved from the knowledge base.\n2. **No fabrication**: do not generate, infer, or create information that is not explicitly present in the knowledge base.\n3. **Accuracy first**: prefer incompleteness over inaccurate content.\n4. **Output format**:\n * Hierarchically clear modular structure\n * Logical grouping according to the MECE principle\n * Professionally presented formatting\n * Step-by-step cognitive guidance\n * Reasonable use of headings and dividers for clarity\n * *Italicize* key parameters\n * **Bold** critical information\n5. **LaTeX formula requirements**:\n * Inline formulas: start and end with `$`\n * Block formulas: start and end with `$$`, each `$$` on its own line\n * Block formula content must comply with LaTeX math syntax\n * Verify formula correctness\n---\n## Additional Notes (Interaction & Failure Strategy)\n* If the knowledge base does not cover critical facts: explicitly inform the user (with sample wording)\n* For time-sensitive issues: enforce time filtering in the search request, and indicate the latest retrieval date in the answer.\n* Language requirement: answer in the user\u2019s preferred language\n",
|
||||||
|
"temperature": "0.1",
|
||||||
|
"temperatureEnabled": true,
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"component_name": "Retrieval",
|
||||||
|
"name": "Retrieval",
|
||||||
|
"params": {
|
||||||
|
"cross_languages": [],
|
||||||
|
"description": "",
|
||||||
|
"empty_response": "",
|
||||||
|
"kb_ids": [],
|
||||||
|
"keywords_similarity_weight": 0.7,
|
||||||
|
"outputs": {
|
||||||
|
"formalized_content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rerank_id": "",
|
||||||
|
"similarity_threshold": 0.2,
|
||||||
|
"top_k": 1024,
|
||||||
|
"top_n": 8,
|
||||||
|
"use_kg": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"topPEnabled": false,
|
||||||
|
"top_p": 0.75,
|
||||||
|
"user_prompt": "",
|
||||||
|
"visual_files_var": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"begin"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Message:OrangeYearsShine": {
|
||||||
|
"downstream": [],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Message",
|
||||||
|
"params": {
|
||||||
|
"content": [
|
||||||
|
"{Agent:NewPumasLick@content}"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"Agent:NewPumasLick"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"begin": {
|
||||||
|
"downstream": [
|
||||||
|
"Agent:NewPumasLick"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Begin",
|
||||||
|
"params": {
|
||||||
|
"enablePrologue": true,
|
||||||
|
"inputs": {},
|
||||||
|
"mode": "conversational",
|
||||||
|
"prologue": "\u4f60\u597d\uff01 \u6211\u662f\u4f60\u7684\u52a9\u7406\uff0c\u6709\u4ec0\u4e48\u53ef\u4ee5\u5e2e\u5230\u4f60\u7684\u5417\uff1f"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"globals": {
|
||||||
|
"sys.conversation_turns": 0,
|
||||||
|
"sys.files": [],
|
||||||
|
"sys.query": "",
|
||||||
|
"sys.user_id": ""
|
||||||
|
},
|
||||||
|
"graph": {
|
||||||
|
"edges": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__beginstart-Agent:NewPumasLickend",
|
||||||
|
"source": "begin",
|
||||||
|
"sourceHandle": "start",
|
||||||
|
"target": "Agent:NewPumasLick",
|
||||||
|
"targetHandle": "end"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__Agent:NewPumasLickstart-Message:OrangeYearsShineend",
|
||||||
|
"markerEnd": "logo",
|
||||||
|
"source": "Agent:NewPumasLick",
|
||||||
|
"sourceHandle": "start",
|
||||||
|
"style": {
|
||||||
|
"stroke": "rgba(91, 93, 106, 1)",
|
||||||
|
"strokeWidth": 1
|
||||||
|
},
|
||||||
|
"target": "Message:OrangeYearsShine",
|
||||||
|
"targetHandle": "end",
|
||||||
|
"type": "buttonEdge",
|
||||||
|
"zIndex": 1001
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__Agent:NewPumasLicktool-Tool:AllBirdsNailend",
|
||||||
|
"selected": false,
|
||||||
|
"source": "Agent:NewPumasLick",
|
||||||
|
"sourceHandle": "tool",
|
||||||
|
"target": "Tool:AllBirdsNail",
|
||||||
|
"targetHandle": "end"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"enablePrologue": true,
|
||||||
|
"inputs": {},
|
||||||
|
"mode": "conversational",
|
||||||
|
"prologue": "\u4f60\u597d\uff01 \u6211\u662f\u4f60\u7684\u52a9\u7406\uff0c\u6709\u4ec0\u4e48\u53ef\u4ee5\u5e2e\u5230\u4f60\u7684\u5417\uff1f"
|
||||||
|
},
|
||||||
|
"label": "Begin",
|
||||||
|
"name": "begin"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "begin",
|
||||||
|
"measured": {
|
||||||
|
"height": 48,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": -9.569875358221438,
|
||||||
|
"y": 205.84018385864917
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "left",
|
||||||
|
"targetPosition": "right",
|
||||||
|
"type": "beginNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"content": [
|
||||||
|
"{Agent:NewPumasLick@content}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"label": "Message",
|
||||||
|
"name": "Response"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Message:OrangeYearsShine",
|
||||||
|
"measured": {
|
||||||
|
"height": 56,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 734.4061285881053,
|
||||||
|
"y": 199.9706031723009
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "messageNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"delay_after_error": 1,
|
||||||
|
"description": "",
|
||||||
|
"exception_comment": "",
|
||||||
|
"exception_default_value": "",
|
||||||
|
"exception_goto": [],
|
||||||
|
"exception_method": null,
|
||||||
|
"frequencyPenaltyEnabled": false,
|
||||||
|
"frequency_penalty": 0.5,
|
||||||
|
"llm_id": "qwen3-235b-a22b-instruct-2507@Tongyi-Qianwen",
|
||||||
|
"maxTokensEnabled": true,
|
||||||
|
"max_retries": 3,
|
||||||
|
"max_rounds": 3,
|
||||||
|
"max_tokens": 128000,
|
||||||
|
"mcp": [],
|
||||||
|
"message_history_window_size": 12,
|
||||||
|
"outputs": {
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"parameter": "Precise",
|
||||||
|
"presencePenaltyEnabled": false,
|
||||||
|
"presence_penalty": 0.5,
|
||||||
|
"prompts": [
|
||||||
|
{
|
||||||
|
"content": "# User Query\n {sys.query}",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sys_prompt": "## Role & Task\nYou are a **\u201cKnowledge Base Retrieval Q\\&A Agent\u201d** whose goal is to break down the user\u2019s question into retrievable subtasks, and then produce a multi-source-verified, structured, and actionable research report using the internal knowledge base.\n## Execution Framework (Detailed Steps & Key Points)\n1. **Assessment & Decomposition**\n * Actions:\n * Automatically extract: main topic, subtopics, entities (people/organizations/products/technologies), time window, geographic/business scope.\n * Output as a list: N facts/data points that must be collected (*N* ranges from 5\u201320 depending on question complexity).\n2. **Query Type Determination (Rule-Based)**\n * Example rules:\n * If the question involves a single issue but requests \u201cmethod comparison/multiple explanations\u201d \u2192 use **depth-first**.\n * If the question can naturally be split into \u22653 independent sub-questions \u2192 use **breadth-first**.\n * If the question can be answered by a single fact/specification/definition \u2192 use **simple query**.\n3. **Research Plan Formulation**\n * Depth-first: define 3\u20135 perspectives (methodology/stakeholders/time dimension/technical route, etc.), assign search keywords, target document types, and output format for each perspective.\n * Breadth-first: list subtasks, prioritize them, and assign search terms.\n * Simple query: directly provide the search sentence and required fields.\n4. **Retrieval Execution**\n * After retrieval: perform coverage check (does it contain the key facts?) and quality check (source diversity, authority, latest update time).\n * If standards are not met, automatically loop: rewrite queries (synonyms/cross-domain terms) and retry \u22643 times, or flag as requiring external search.\n5. **Integration & Reasoning**\n * Build the answer using a **fact\u2013evidence\u2013reasoning** chain. For each conclusion, attach 1\u20132 strongest pieces of evidence.\n---\n## Quality Gate Checklist (Verify at Each Stage)\n* **Stage 1 (Decomposition)**:\n * [ ] Key concepts and expected outputs identified\n * [ ] Required facts/data points listed\n* **Stage 2 (Retrieval)**:\n * [ ] Meets quality standards (see above)\n * [ ] If not met: execute query iteration\n* **Stage 3 (Generation)**:\n * [ ] Each conclusion has at least one direct evidence source\n * [ ] State assumptions/uncertainties\n * [ ] Provide next-step suggestions or experiment/retrieval plans\n * [ ] Final length and depth match user expectations (comply with word count/format if specified)\n---\n## Core Principles\n1. **Strict reliance on the knowledge base**: answers must be **fully bounded** by the content retrieved from the knowledge base.\n2. **No fabrication**: do not generate, infer, or create information that is not explicitly present in the knowledge base.\n3. **Accuracy first**: prefer incompleteness over inaccurate content.\n4. **Output format**:\n * Hierarchically clear modular structure\n * Logical grouping according to the MECE principle\n * Professionally presented formatting\n * Step-by-step cognitive guidance\n * Reasonable use of headings and dividers for clarity\n * *Italicize* key parameters\n * **Bold** critical information\n5. **LaTeX formula requirements**:\n * Inline formulas: start and end with `$`\n * Block formulas: start and end with `$$`, each `$$` on its own line\n * Block formula content must comply with LaTeX math syntax\n * Verify formula correctness\n---\n## Additional Notes (Interaction & Failure Strategy)\n* If the knowledge base does not cover critical facts: explicitly inform the user (with sample wording)\n* For time-sensitive issues: enforce time filtering in the search request, and indicate the latest retrieval date in the answer.\n* Language requirement: answer in the user\u2019s preferred language\n",
|
||||||
|
"temperature": "0.1",
|
||||||
|
"temperatureEnabled": true,
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"component_name": "Retrieval",
|
||||||
|
"name": "Retrieval",
|
||||||
|
"params": {
|
||||||
|
"cross_languages": [],
|
||||||
|
"description": "",
|
||||||
|
"empty_response": "",
|
||||||
|
"kb_ids": [],
|
||||||
|
"keywords_similarity_weight": 0.7,
|
||||||
|
"outputs": {
|
||||||
|
"formalized_content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rerank_id": "",
|
||||||
|
"similarity_threshold": 0.2,
|
||||||
|
"top_k": 1024,
|
||||||
|
"top_n": 8,
|
||||||
|
"use_kg": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"topPEnabled": false,
|
||||||
|
"top_p": 0.75,
|
||||||
|
"user_prompt": "",
|
||||||
|
"visual_files_var": ""
|
||||||
|
},
|
||||||
|
"label": "Agent",
|
||||||
|
"name": "Knowledge Base Agent"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Agent:NewPumasLick",
|
||||||
|
"measured": {
|
||||||
|
"height": 84,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 347.00048227952215,
|
||||||
|
"y": 186.49109364794631
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "agentNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"description": "This is an agent for a specific task.",
|
||||||
|
"user_prompt": "This is the order you need to send to the agent."
|
||||||
|
},
|
||||||
|
"label": "Tool",
|
||||||
|
"name": "flow.tool_10"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Tool:AllBirdsNail",
|
||||||
|
"measured": {
|
||||||
|
"height": 48,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 220.24819746977118,
|
||||||
|
"y": 403.31576836482583
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "toolNode"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"history": [],
|
||||||
|
"memory": [],
|
||||||
|
"messages": [],
|
||||||
|
"path": [],
|
||||||
|
"retrieval": []
|
||||||
|
},
|
||||||
|
"avatar": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAH0klEQVR4nO2ZC1BU1wGG/3uRp/IygG+DGK0GOjE1U6cxI4tT03Y0E+kENbaJbKpj60wzgNMwnTjuEtu0miGasY+0krI202kMVEnVxtoOLG00oVa0LajVBDcSEI0REFBgkZv/3GWXfdzdvctuHs7kmzmec9//d+45914XCXc4Xwjk1+59VJGGF7C5QAFSWBvgyWmWLl7IKiny6QNL173B5YjB84bOyrpKA4B1DLySdQpLKAiZGtZ7a/KMVoQJz6UfEZyhTWwaEBmssiLvCueu6BJg8EwFqGTTAC+uvNWC9w82sRWcux/JwaSHstjywcogRt4RG0KExwWG4QsVYCebKSwe3L5lR9OOWjyzfg2WL/0a1/jncO3b2FHxGnKeWYqo+Giu8UEMrWJKWBACPMY/DG+63txhvnKshUu+DF2/hayMDFRsL+VScDb++AVc6OjAuInxXPJl2tfnIikrzUyJMi7qQmLRhOEr2fOFbX/7P6STF7BqoWevfdij4NWGQfx+57OYO2sG1wSnsek8Nm15EU8sikF6ouelXz9ph7JwDqYt+5IIZaGEkauDIrH4wPBmhjexCSEws+VdVG1M4NIoj+2xYzBuJtavWcEl/VS8dggx/ZdQvcGzQwp+cxOXsu5RBQQMVkYJM4LA/Txh+ELFMWFVPARS5kFiabZdx8Olh7l17BzdvhzZmROhdJ3j6D/nIyBgOCMlLAgA9xmF4TMV4BSbrgnrLiBl5rOsRCRRbDUsBzQFiJjY91PCBj9w+yiP1lXWsTLAjc9YQGB9I8+Yx1oTiUWFvW9QgDo2PdASaDp/EQ8/sRnhcPTVcuTMncXwQQVESL9DidscaPW+QEtAICRu9PSxFTpJiePV8AI9AsTvXZBY/Pa+wJ9ApNApIILm8S5Y4QXXQwhYFH6csemDP4G3G5v579i5d04mknknQhDYS4HCrCVr/mC3D305KnbCEpvVIia5Onw6WaWw+KAl0Np+FUXbdiMcyoqfUoeRHoFrJ1uRtnBG1/9Mf/3LtElp+VwF2wcd7woJib1vUPwMH4GWQCQJJtBa/V9cPmFD8uQUpMdNGDhY8bNYrobh8acHu270/l0ImJWRt64Wn6WACN9z5gq2lXwPW8pfweT0icP/fH23vO9QLYq3/QKyLBmFQI3CUcT9NdESEEPItKsSN3r7MBaSJoxHWZERM6ZmMLy2gDP8/pd/og418dTL37hFSUpMUC5f+UiWZcnY9s5+ixCwUiCXx2iiJdDNx6f4pgkH8Q3lbxK7h8+enoHha1cRNdMp8axiHxo6+/5bVdk8DSROYIW1X7QEIom3wHD3gEf4vu1bVYEJZeWQ0zJQvmcfyiv2QZak6raG/QWfK4Ez9mTc5v8xPMJfuojoxXmIX/9DOMe+FCWbcHu4BJJ0YEwCx0824bFNW9HesB+CqYu+jepfPYcHF+aoPXS8sQl/+vU2bgmOU2C+qRc9/YrrPPbGBtzavd0nvCxLxui4pJrBm911PFwak4CYA80cj+JCAiGUzYkmxrSY4N2c3GLi6UEIFL/wRxxqkhmHnTEpDQcrfq6ea+hcE8bNy3GFzyq4H22HW1Kd4WMSkg1jmsSRpKj0Rzhy4gNUv/y8Gjrv8SJK3OWScA+fMn/ysVPPvTmeh6nh1TcxBUJ+jEaKYr7N36x7h+Edj0pB6+WrLokn87+BrTt/p4ZPzZ6MM7/8R2//h33vOcNzdwgBMwVMbGvySQmo4a0NqOZccU7YmGXLEfPQUlUid/XT6B8YdIU/99vjsPcOdEhDsfOd4QVCwKB8yp8SWuG1njbTl83DpMWz1PCKAswuWPDI0e8WebyAJBbxNdrF7cls+hBpAb3h3XtehL/3+4u7D35rQwpP4YFTwMJ91rHpQyQFQgmf9sAMNL9Ur4afv/FBjIuPVj+n4YVTwMD96tj0IVICoYYXv/q1VJ1Sl8UveQyaRwErvOB6B5SwKhqP00gI6A0vhsycJ7/KIzxhyHqGN0ADbnNAAYOicRfCFdAb/p50Gbfuc/wy5w1D5lOghk0fuG0USlgVr7sQjoDe8C8WxKGKPy2KjzlvAQb02/sCbh+FApngX1QUtyeSuwDi0hxFByV7L+LIf3r5kvpp4PBr07Hqvn71Y85bgOG6WS2ggA1+4D6eUKKQApVsqngI6KSkqh9HzsoM/3zg8Oz5VQ9E8wjf30YFDGdkeAsCwH18oYRZGXk7C4HuYxcwe6rjQsFovzaEvoFxqNkTOPzMjGikJso8wsF77XYkLx6dAwxWxvBmBIH7aUMJi8J3w0DnTVz7dyvX6KPzVBt+kL8cmzesRq9ps2Z48bRJmOIapS7E4zM2lXNt5CcU6ID7+ocSZkqY2NRN6ysnsHbJEpR8ZwV6t5Yg+iuLELf2KVd48VwXQf3BQGUMb4ZOuH9gKFEIYJfiNrEDcXZHHV4q3YRv5i7ikgM94RlETNgihrcgBHhccCiRCf7VhBK5rAPyr9I/Y/WKPEyfksH/9NjQ2dODhsYzwcLXsypkeBtCRGLRDUUMAMyKHxEx4dtrzyP97nQMygripiQiKi4aSbPvQmKW7+OXF69ntYvBa1iPCYklZEZECsGm4ja0Ops7EJsaj4SprlU+8IJiqIjAFga3Ikx4vvAYkTGALxyWFArlsnbBC9Sz6mI5zWKNRGh3JJY7mjte4GOz+r4tkRbxQQAAAABJRU5ErkJggg=="
|
||||||
|
}
|
||||||
@ -1,7 +1,13 @@
|
|||||||
{
|
{
|
||||||
"id": 12,
|
"id": 12,
|
||||||
"title": "Generate SEO Blog",
|
"title": {
|
||||||
"description": "This workflow automatically generates a complete SEO-optimized blog article based on a simple user input. You don’t need any writing experience. Just provide a topic or short request — the system will handle the rest.",
|
"en": "Generate SEO Blog",
|
||||||
|
"de": "SEO Blog generieren",
|
||||||
|
"zh": "生成SEO博客"},
|
||||||
|
"description": {
|
||||||
|
"en": "This workflow automatically generates a complete SEO-optimized blog article based on a simple user input. You don't need any writing experience. Just provide a topic or short request — the system will handle the rest.",
|
||||||
|
"de": "Dieser Workflow generiert automatisch einen vollständigen SEO-optimierten Blogartikel basierend auf einer einfachen Benutzereingabe. Sie benötigen keine Schreiberfahrung. Geben Sie einfach ein Thema oder eine kurze Anfrage ein – das System übernimmt den Rest.",
|
||||||
|
"zh": "此工作流根据简单的用户输入自动生成完整的SEO博客文章。你无需任何写作经验,只需提供一个主题或简短请求,系统将处理其余部分。"},
|
||||||
"canvas_type": "Marketing",
|
"canvas_type": "Marketing",
|
||||||
"dsl": {
|
"dsl": {
|
||||||
"components": {
|
"components": {
|
||||||
@ -912,4 +918,4 @@
|
|||||||
"retrieval": []
|
"retrieval": []
|
||||||
},
|
},
|
||||||
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAAwADADASIAAhEBAxEB/8QAGQAAAwEBAQAAAAAAAAAAAAAABgkKBwUI/8QAMBAAAAYCAQIEBQQCAwAAAAAAAQIDBAUGBxEhCAkAEjFBFFFhcaETFiKRFyOx8PH/xAAaAQACAwEBAAAAAAAAAAAAAAACAwABBgQF/8QALBEAAgIBAgUCBAcAAAAAAAAAAQIDBBEFEgATITFRIkEGIzJhFBUWgaGx8P/aAAwDAQACEQMRAD8AfF2hez9089t7pvxgQMa1Gb6qZ6oQE9m/NEvCIStyPfJSOF/M1epzMugo/qtMqbiRc1mJjoJKCLMNIxKcsLJedfO1Ct9cI63x9fx6CA/19t+oh4LFA5HfuAgP/A8eOIsnsTBrkBHXA7+v53+Q+ficTgJft9gIgA+/P9/1r342O/YA8A8k3/if+IbAN7+2/f8AAiI6H19PGoPyESTMZQPKUAHkQEN+3r9dh78/YPGUTk2wb/qAZZIugH1OHH5DjkdfbnWw2DsOxPj+xjrnx2H39unBopJGBn9s+PHv1HXjPJtH+J+B40O9a16h/wB/92j/ALrPa/wR104UyAobHlXhuo2HrEtK4qy3CwjKOuJLRHJLSkXWrFKs/gVrJVrE8TUiH8bPrP20UEu8m4hNpMJJuTOfnbUw/kUqyZgMHGjAO9+mtDsQ53sdcB6eMhnpEjhNQxRKICAgHy5+/roOdjr7c+J6O4x07dx484/n7nzw1gexBGfIPkZ/3t39uGpqc6+fP5/Ht8vGFZCzJjWpWuBxvO2yPjrtclUUK7BqmUI4fuASeyhG5FzFI0Bw4aQ0iZNoDgzvRW4qtyFkI4XmwyEk2YNnDp0sVBu3IUyy5iqH8gqKERSIRNIii67hddRJs1at01Xbx2sgzZoLu10UFJR+4V1A5cxF3FqNcLvjwcno43uuLrOxZYjujaClcb4QQfxEizpFiQyM9olcueRnjC2ZMt9iY06zL0qytrMSqSOVGsfHMaGhZ3l4lSRI2MqE74zJvRTveNFWWIh3RWw+XCAM5icKQLrCH57T17FhErSlRXnWvyZXKQwWJ3eraD14p5YuZCFgacskK2oGkVuKO5GYTHzf7DaD12cBD3DgPOIDrWw9PnrXPgDkpVsUDGMG+DD6E9gHXIjrYjwUPQTCXYgHPhIV974+F6E1hpC14Yzmzj56YaQEeZhXsayD1zLPW7pygxaMf81Nzu1iJsnIuDIKnaJAkPldqrHaoORZ73tMVEbFdSXT9nVgRQgnBq6j8e/HCIEATpAnH5KlmRVkFRFJwks/bqImSXJ5VFyA3N6Ikh3bCW3YHp5cowOmCfTgA+xJCnrjtwHKcLvJj2ZGcTRFj19kEhckdzgEjKnABGSSzdc1Fe5byXXGNjKdvRcw5NxvLidNZFFCxUa62KrzMaChw8hhYScFJtROAgmuLByq1MsgkZYPaVVuDe0wraRaqAdJwgRQo+YR8xTlAQNx6b49w41vXiJpCalLh1jZhyrTqRM4+jstdRmYryNkydLQRWg1LNGcWd5jIFFvCythlIySa0mNu74sKRQtaWsTmupqPItw0lE52ufpyYzrSkx6cw5bLmBEpkTsz+dt8P5QFuCRtAIkBH9MuwKHICIaDQhnojMs9mKaeGcrMxXlQtAYkdVljimRrE5MqI4zL8oSqQ6wxjodBqK05qdK3Vo3aCSVkBW7bjuC1NFJJBPaqyx6fp6pWkliYLXK2XrukkRu2CCVoSWMgsdMyySKwoLFcIGWSTUMg4IBgTcICoBhRcplMcpFkhIqQp1ClMBTmA0Zfe1zpjvHfXff65bZlzXpB3jjGTgiirmPjAfs16PHqHeQ75Wbj3xxZpOEkV3LRJJSPdomUBZISJLncV2k+8D07dxXp7xsYuTapA9UkJUYWIzNhadnWEZeCXGLQQiJi1ViHfhHL2unWh+mlORsrW0JFpEFnGVfm1mU4kq0FY3eD6corJncv6dr5NLSMNXVaTUksjTiMnaq8uFfSVuDyiJ1iZpy0LOJtpa3YfkcQ5fdozyxI2m5qqcrHN61YYmHsh6v3o9ParYmYJEtlhIx6+gUbjgD23M6oqg92YL0JyF6Bps+qDValVA9h9Lj5SZI3SHXdEQlj1wiQtLLIe6pGzjO3BlBkK1hxpblLVH5wdW0BcFKf/JwRtjsot2z8omaSdxbzzk1iEjsE0AM9rrRZNRIrVyo7dGO6E+oh8axLlJ5H5VaJKx7ePRGFbW6vUeFfHQIWPTI9Tm7HHfuhqY7E6C7JFqUzM6iZXIoncNxX7+bIVdJnTT48x3OQU1krIDW3UeixVhyISzYz6cadY5Xph6TseRNTRsTElzzBn9Vlly0TAERsdgnMYyLROjyFbg5R4ZlsGaMT4yNi2Zlq1GwjZB3jq0PsaJfA3t0jL0W0Y9xf1V41lpWckXMLaZiwxuKYPqc6LlHdkeRF+Qxswx5ASDqBVrsL+2A/N6SiCbYymV2BywJiMZj3GRRMTnL+lVyHCll3R7Szv0vqXMtQ74T+HijljIScLaEpkKCB3rqMBIi0jPs5JeOKTZMZEi5VVnouzy0k3jXjWSMlY6UcVGDxlKMVDqx91SILWSi3D2KdgYy3kP8E9X/AE1SnRXBNdNRMlefT6g7aY6giK+cPLGNg0bY68rcnpsNh9PqIBve/EcPQ3WIq2dR93xpSgk5SAZ9R6MLAOZFUkpLSUDXp6/KPpGUkmTdswlnKnwbl5ITMdGwcXJi7LKsqzUmT5tWYmkXuF9wjBvb76b7dHheazJ9RElUJOCxViuMlUJC0Gtz6PKyjLBY4qMWUe12r1xZ6lOyT6XPEBKN2CkTDOlZd02TBdTMt7Upx2knrkdCv1UKjDKn1A7XBYH6SCOOrWn5Oi/DtRiu+GleRthDL8rXdVjZlcfWrSIxVlGGGCOnH//Z"
|
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAAwADADASIAAhEBAxEB/8QAGQAAAwEBAQAAAAAAAAAAAAAABgkKBwUI/8QAMBAAAAYCAQIEBQQCAwAAAAAAAQIDBAUGBxEhCAkAEjFBFFFhcaETFiKRFyOx8PH/xAAaAQACAwEBAAAAAAAAAAAAAAACAwABBgQF/8QALBEAAgIBAgUCBAcAAAAAAAAAAQIDBBEFEgATITFRIkEGIzJhFBUWgaGx8P/aAAwDAQACEQMRAD8AfF2hez9089t7pvxgQMa1Gb6qZ6oQE9m/NEvCIStyPfJSOF/M1epzMugo/qtMqbiRc1mJjoJKCLMNIxKcsLJedfO1Ct9cI63x9fx6CA/19t+oh4LFA5HfuAgP/A8eOIsnsTBrkBHXA7+v53+Q+ficTgJft9gIgA+/P9/1r342O/YA8A8k3/if+IbAN7+2/f8AAiI6H19PGoPyESTMZQPKUAHkQEN+3r9dh78/YPGUTk2wb/qAZZIugH1OHH5DjkdfbnWw2DsOxPj+xjrnx2H39unBopJGBn9s+PHv1HXjPJtH+J+B40O9a16h/wB/92j/ALrPa/wR104UyAobHlXhuo2HrEtK4qy3CwjKOuJLRHJLSkXWrFKs/gVrJVrE8TUiH8bPrP20UEu8m4hNpMJJuTOfnbUw/kUqyZgMHGjAO9+mtDsQ53sdcB6eMhnpEjhNQxRKICAgHy5+/roOdjr7c+J6O4x07dx484/n7nzw1gexBGfIPkZ/3t39uGpqc6+fP5/Ht8vGFZCzJjWpWuBxvO2yPjrtclUUK7BqmUI4fuASeyhG5FzFI0Bw4aQ0iZNoDgzvRW4qtyFkI4XmwyEk2YNnDp0sVBu3IUyy5iqH8gqKERSIRNIii67hddRJs1at01Xbx2sgzZoLu10UFJR+4V1A5cxF3FqNcLvjwcno43uuLrOxZYjujaClcb4QQfxEizpFiQyM9olcueRnjC2ZMt9iY06zL0qytrMSqSOVGsfHMaGhZ3l4lSRI2MqE74zJvRTveNFWWIh3RWw+XCAM5icKQLrCH57T17FhErSlRXnWvyZXKQwWJ3eraD14p5YuZCFgacskK2oGkVuKO5GYTHzf7DaD12cBD3DgPOIDrWw9PnrXPgDkpVsUDGMG+DD6E9gHXIjrYjwUPQTCXYgHPhIV974+F6E1hpC14Yzmzj56YaQEeZhXsayD1zLPW7pygxaMf81Nzu1iJsnIuDIKnaJAkPldqrHaoORZ73tMVEbFdSXT9nVgRQgnBq6j8e/HCIEATpAnH5KlmRVkFRFJwks/bqImSXJ5VFyA3N6Ikh3bCW3YHp5cowOmCfTgA+xJCnrjtwHKcLvJj2ZGcTRFj19kEhckdzgEjKnABGSSzdc1Fe5byXXGNjKdvRcw5NxvLidNZFFCxUa62KrzMaChw8hhYScFJtROAgmuLByq1MsgkZYPaVVuDe0wraRaqAdJwgRQo+YR8xTlAQNx6b49w41vXiJpCalLh1jZhyrTqRM4+jstdRmYryNkydLQRWg1LNGcWd5jIFFvCythlIySa0mNu74sKRQtaWsTmupqPItw0lE52ufpyYzrSkx6cw5bLmBEpkTsz+dt8P5QFuCRtAIkBH9MuwKHICIaDQhnojMs9mKaeGcrMxXlQtAYkdVljimRrE5MqI4zL8oSqQ6wxjodBqK05qdK3Vo3aCSVkBW7bjuC1NFJJBPaqyx6fp6pWkliYLXK2XrukkRu2CCVoSWMgsdMyySKwoLFcIGWSTUMg4IBgTcICoBhRcplMcpFkhIqQp1ClMBTmA0Zfe1zpjvHfXff65bZlzXpB3jjGTgiirmPjAfs16PHqHeQ75Wbj3xxZpOEkV3LRJJSPdomUBZISJLncV2k+8D07dxXp7xsYuTapA9UkJUYWIzNhadnWEZeCXGLQQiJi1ViHfhHL2unWh+mlORsrW0JFpEFnGVfm1mU4kq0FY3eD6corJncv6dr5NLSMNXVaTUksjTiMnaq8uFfSVuDyiJ1iZpy0LOJtpa3YfkcQ5fdozyxI2m5qqcrHN61YYmHsh6v3o9ParYmYJEtlhIx6+gUbjgD23M6oqg92YL0JyF6Bps+qDValVA9h9Lj5SZI3SHXdEQlj1wiQtLLIe6pGzjO3BlBkK1hxpblLVH5wdW0BcFKf/JwRtjsot2z8omaSdxbzzk1iEjsE0AM9rrRZNRIrVyo7dGO6E+oh8axLlJ5H5VaJKx7ePRGFbW6vUeFfHQIWPTI9Tm7HHfuhqY7E6C7JFqUzM6iZXIoncNxX7+bIVdJnTT48x3OQU1krIDW3UeixVhyISzYz6cadY5Xph6TseRNTRsTElzzBn9Vlly0TAERsdgnMYyLROjyFbg5R4ZlsGaMT4yNi2Zlq1GwjZB3jq0PsaJfA3t0jL0W0Y9xf1V41lpWckXMLaZiwxuKYPqc6LlHdkeRF+Qxswx5ASDqBVrsL+2A/N6SiCbYymV2BywJiMZj3GRRMTnL+lVyHCll3R7Szv0vqXMtQ74T+HijljIScLaEpkKCB3rqMBIi0jPs5JeOKTZMZEi5VVnouzy0k3jXjWSMlY6UcVGDxlKMVDqx91SILWSi3D2KdgYy3kP8E9X/AE1SnRXBNdNRMlefT6g7aY6giK+cPLGNg0bY68rcnpsNh9PqIBve/EcPQ3WIq2dR93xpSgk5SAZ9R6MLAOZFUkpLSUDXp6/KPpGUkmTdswlnKnwbl5ITMdGwcXJi7LKsqzUmT5tWYmkXuF9wjBvb76b7dHheazJ9RElUJOCxViuMlUJC0Gtz6PKyjLBY4qMWUe12r1xZ6lOyT6XPEBKN2CkTDOlZd02TBdTMt7Upx2knrkdCv1UKjDKn1A7XBYH6SCOOrWn5Oi/DtRiu+GleRthDL8rXdVjZlcfWrSIxVlGGGCOnH//Z"
|
||||||
}
|
}
|
||||||
@ -1,7 +1,13 @@
|
|||||||
{
|
{
|
||||||
"id": 4,
|
"id": 4,
|
||||||
"title": "Generate SEO Blog",
|
"title": {
|
||||||
"description": "This workflow automatically generates a complete SEO-optimized blog article based on a simple user input. You don’t need any writing experience. Just provide a topic or short request — the system will handle the rest.",
|
"en": "Generate SEO Blog",
|
||||||
|
"de": "SEO Blog generieren",
|
||||||
|
"zh": "生成SEO博客"},
|
||||||
|
"description": {
|
||||||
|
"en": "This workflow automatically generates a complete SEO-optimized blog article based on a simple user input. You don't need any writing experience. Just provide a topic or short request — the system will handle the rest.",
|
||||||
|
"de": "Dieser Workflow generiert automatisch einen vollständigen SEO-optimierten Blogartikel basierend auf einer einfachen Benutzereingabe. Sie benötigen keine Schreiberfahrung. Geben Sie einfach ein Thema oder eine kurze Anfrage ein – das System übernimmt den Rest.",
|
||||||
|
"zh": "此工作流根据简单的用户输入自动生成完整的SEO博客文章。你无需任何写作经验,只需提供一个主题或简短请求,系统将处理其余部分。"},
|
||||||
"canvas_type": "Recommended",
|
"canvas_type": "Recommended",
|
||||||
"dsl": {
|
"dsl": {
|
||||||
"components": {
|
"components": {
|
||||||
@ -912,4 +918,4 @@
|
|||||||
"retrieval": []
|
"retrieval": []
|
||||||
},
|
},
|
||||||
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAAwADADASIAAhEBAxEB/8QAGQAAAwEBAQAAAAAAAAAAAAAABgkKBwUI/8QAMBAAAAYCAQIEBQQCAwAAAAAAAQIDBAUGBxEhCAkAEjFBFFFhcaETFiKRFyOx8PH/xAAaAQACAwEBAAAAAAAAAAAAAAACAwABBgQF/8QALBEAAgIBAgUCBAcAAAAAAAAAAQIDBBEFEgATITFRIkEGIzJhFBUWgaGx8P/aAAwDAQACEQMRAD8AfF2hez9089t7pvxgQMa1Gb6qZ6oQE9m/NEvCIStyPfJSOF/M1epzMugo/qtMqbiRc1mJjoJKCLMNIxKcsLJedfO1Ct9cI63x9fx6CA/19t+oh4LFA5HfuAgP/A8eOIsnsTBrkBHXA7+v53+Q+ficTgJft9gIgA+/P9/1r342O/YA8A8k3/if+IbAN7+2/f8AAiI6H19PGoPyESTMZQPKUAHkQEN+3r9dh78/YPGUTk2wb/qAZZIugH1OHH5DjkdfbnWw2DsOxPj+xjrnx2H39unBopJGBn9s+PHv1HXjPJtH+J+B40O9a16h/wB/92j/ALrPa/wR104UyAobHlXhuo2HrEtK4qy3CwjKOuJLRHJLSkXWrFKs/gVrJVrE8TUiH8bPrP20UEu8m4hNpMJJuTOfnbUw/kUqyZgMHGjAO9+mtDsQ53sdcB6eMhnpEjhNQxRKICAgHy5+/roOdjr7c+J6O4x07dx484/n7nzw1gexBGfIPkZ/3t39uGpqc6+fP5/Ht8vGFZCzJjWpWuBxvO2yPjrtclUUK7BqmUI4fuASeyhG5FzFI0Bw4aQ0iZNoDgzvRW4qtyFkI4XmwyEk2YNnDp0sVBu3IUyy5iqH8gqKERSIRNIii67hddRJs1at01Xbx2sgzZoLu10UFJR+4V1A5cxF3FqNcLvjwcno43uuLrOxZYjujaClcb4QQfxEizpFiQyM9olcueRnjC2ZMt9iY06zL0qytrMSqSOVGsfHMaGhZ3l4lSRI2MqE74zJvRTveNFWWIh3RWw+XCAM5icKQLrCH57T17FhErSlRXnWvyZXKQwWJ3eraD14p5YuZCFgacskK2oGkVuKO5GYTHzf7DaD12cBD3DgPOIDrWw9PnrXPgDkpVsUDGMG+DD6E9gHXIjrYjwUPQTCXYgHPhIV974+F6E1hpC14Yzmzj56YaQEeZhXsayD1zLPW7pygxaMf81Nzu1iJsnIuDIKnaJAkPldqrHaoORZ73tMVEbFdSXT9nVgRQgnBq6j8e/HCIEATpAnH5KlmRVkFRFJwks/bqImSXJ5VFyA3N6Ikh3bCW3YHp5cowOmCfTgA+xJCnrjtwHKcLvJj2ZGcTRFj19kEhckdzgEjKnABGSSzdc1Fe5byXXGNjKdvRcw5NxvLidNZFFCxUa62KrzMaChw8hhYScFJtROAgmuLByq1MsgkZYPaVVuDe0wraRaqAdJwgRQo+YR8xTlAQNx6b49w41vXiJpCalLh1jZhyrTqRM4+jstdRmYryNkydLQRWg1LNGcWd5jIFFvCythlIySa0mNu74sKRQtaWsTmupqPItw0lE52ufpyYzrSkx6cw5bLmBEpkTsz+dt8P5QFuCRtAIkBH9MuwKHICIaDQhnojMs9mKaeGcrMxXlQtAYkdVljimRrE5MqI4zL8oSqQ6wxjodBqK05qdK3Vo3aCSVkBW7bjuC1NFJJBPaqyx6fp6pWkliYLXK2XrukkRu2CCVoSWMgsdMyySKwoLFcIGWSTUMg4IBgTcICoBhRcplMcpFkhIqQp1ClMBTmA0Zfe1zpjvHfXff65bZlzXpB3jjGTgiirmPjAfs16PHqHeQ75Wbj3xxZpOEkV3LRJJSPdomUBZISJLncV2k+8D07dxXp7xsYuTapA9UkJUYWIzNhadnWEZeCXGLQQiJi1ViHfhHL2unWh+mlORsrW0JFpEFnGVfm1mU4kq0FY3eD6corJncv6dr5NLSMNXVaTUksjTiMnaq8uFfSVuDyiJ1iZpy0LOJtpa3YfkcQ5fdozyxI2m5qqcrHN61YYmHsh6v3o9ParYmYJEtlhIx6+gUbjgD23M6oqg92YL0JyF6Bps+qDValVA9h9Lj5SZI3SHXdEQlj1wiQtLLIe6pGzjO3BlBkK1hxpblLVH5wdW0BcFKf/JwRtjsot2z8omaSdxbzzk1iEjsE0AM9rrRZNRIrVyo7dGO6E+oh8axLlJ5H5VaJKx7ePRGFbW6vUeFfHQIWPTI9Tm7HHfuhqY7E6C7JFqUzM6iZXIoncNxX7+bIVdJnTT48x3OQU1krIDW3UeixVhyISzYz6cadY5Xph6TseRNTRsTElzzBn9Vlly0TAERsdgnMYyLROjyFbg5R4ZlsGaMT4yNi2Zlq1GwjZB3jq0PsaJfA3t0jL0W0Y9xf1V41lpWckXMLaZiwxuKYPqc6LlHdkeRF+Qxswx5ASDqBVrsL+2A/N6SiCbYymV2BywJiMZj3GRRMTnL+lVyHCll3R7Szv0vqXMtQ74T+HijljIScLaEpkKCB3rqMBIi0jPs5JeOKTZMZEi5VVnouzy0k3jXjWSMlY6UcVGDxlKMVDqx91SILWSi3D2KdgYy3kP8E9X/AE1SnRXBNdNRMlefT6g7aY6giK+cPLGNg0bY68rcnpsNh9PqIBve/EcPQ3WIq2dR93xpSgk5SAZ9R6MLAOZFUkpLSUDXp6/KPpGUkmTdswlnKnwbl5ITMdGwcXJi7LKsqzUmT5tWYmkXuF9wjBvb76b7dHheazJ9RElUJOCxViuMlUJC0Gtz6PKyjLBY4qMWUe12r1xZ6lOyT6XPEBKN2CkTDOlZd02TBdTMt7Upx2knrkdCv1UKjDKn1A7XBYH6SCOOrWn5Oi/DtRiu+GleRthDL8rXdVjZlcfWrSIxVlGGGCOnH//Z"
|
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAAwADADASIAAhEBAxEB/8QAGQAAAwEBAQAAAAAAAAAAAAAABgkKBwUI/8QAMBAAAAYCAQIEBQQCAwAAAAAAAQIDBAUGBxEhCAkAEjFBFFFhcaETFiKRFyOx8PH/xAAaAQACAwEBAAAAAAAAAAAAAAACAwABBgQF/8QALBEAAgIBAgUCBAcAAAAAAAAAAQIDBBEFEgATITFRIkEGIzJhFBUWgaGx8P/aAAwDAQACEQMRAD8AfF2hez9089t7pvxgQMa1Gb6qZ6oQE9m/NEvCIStyPfJSOF/M1epzMugo/qtMqbiRc1mJjoJKCLMNIxKcsLJedfO1Ct9cI63x9fx6CA/19t+oh4LFA5HfuAgP/A8eOIsnsTBrkBHXA7+v53+Q+ficTgJft9gIgA+/P9/1r342O/YA8A8k3/if+IbAN7+2/f8AAiI6H19PGoPyESTMZQPKUAHkQEN+3r9dh78/YPGUTk2wb/qAZZIugH1OHH5DjkdfbnWw2DsOxPj+xjrnx2H39unBopJGBn9s+PHv1HXjPJtH+J+B40O9a16h/wB/92j/ALrPa/wR104UyAobHlXhuo2HrEtK4qy3CwjKOuJLRHJLSkXWrFKs/gVrJVrE8TUiH8bPrP20UEu8m4hNpMJJuTOfnbUw/kUqyZgMHGjAO9+mtDsQ53sdcB6eMhnpEjhNQxRKICAgHy5+/roOdjr7c+J6O4x07dx484/n7nzw1gexBGfIPkZ/3t39uGpqc6+fP5/Ht8vGFZCzJjWpWuBxvO2yPjrtclUUK7BqmUI4fuASeyhG5FzFI0Bw4aQ0iZNoDgzvRW4qtyFkI4XmwyEk2YNnDp0sVBu3IUyy5iqH8gqKERSIRNIii67hddRJs1at01Xbx2sgzZoLu10UFJR+4V1A5cxF3FqNcLvjwcno43uuLrOxZYjujaClcb4QQfxEizpFiQyM9olcueRnjC2ZMt9iY06zL0qytrMSqSOVGsfHMaGhZ3l4lSRI2MqE74zJvRTveNFWWIh3RWw+XCAM5icKQLrCH57T17FhErSlRXnWvyZXKQwWJ3eraD14p5YuZCFgacskK2oGkVuKO5GYTHzf7DaD12cBD3DgPOIDrWw9PnrXPgDkpVsUDGMG+DD6E9gHXIjrYjwUPQTCXYgHPhIV974+F6E1hpC14Yzmzj56YaQEeZhXsayD1zLPW7pygxaMf81Nzu1iJsnIuDIKnaJAkPldqrHaoORZ73tMVEbFdSXT9nVgRQgnBq6j8e/HCIEATpAnH5KlmRVkFRFJwks/bqImSXJ5VFyA3N6Ikh3bCW3YHp5cowOmCfTgA+xJCnrjtwHKcLvJj2ZGcTRFj19kEhckdzgEjKnABGSSzdc1Fe5byXXGNjKdvRcw5NxvLidNZFFCxUa62KrzMaChw8hhYScFJtROAgmuLByq1MsgkZYPaVVuDe0wraRaqAdJwgRQo+YR8xTlAQNx6b49w41vXiJpCalLh1jZhyrTqRM4+jstdRmYryNkydLQRWg1LNGcWd5jIFFvCythlIySa0mNu74sKRQtaWsTmupqPItw0lE52ufpyYzrSkx6cw5bLmBEpkTsz+dt8P5QFuCRtAIkBH9MuwKHICIaDQhnojMs9mKaeGcrMxXlQtAYkdVljimRrE5MqI4zL8oSqQ6wxjodBqK05qdK3Vo3aCSVkBW7bjuC1NFJJBPaqyx6fp6pWkliYLXK2XrukkRu2CCVoSWMgsdMyySKwoLFcIGWSTUMg4IBgTcICoBhRcplMcpFkhIqQp1ClMBTmA0Zfe1zpjvHfXff65bZlzXpB3jjGTgiirmPjAfs16PHqHeQ75Wbj3xxZpOEkV3LRJJSPdomUBZISJLncV2k+8D07dxXp7xsYuTapA9UkJUYWIzNhadnWEZeCXGLQQiJi1ViHfhHL2unWh+mlORsrW0JFpEFnGVfm1mU4kq0FY3eD6corJncv6dr5NLSMNXVaTUksjTiMnaq8uFfSVuDyiJ1iZpy0LOJtpa3YfkcQ5fdozyxI2m5qqcrHN61YYmHsh6v3o9ParYmYJEtlhIx6+gUbjgD23M6oqg92YL0JyF6Bps+qDValVA9h9Lj5SZI3SHXdEQlj1wiQtLLIe6pGzjO3BlBkK1hxpblLVH5wdW0BcFKf/JwRtjsot2z8omaSdxbzzk1iEjsE0AM9rrRZNRIrVyo7dGO6E+oh8axLlJ5H5VaJKx7ePRGFbW6vUeFfHQIWPTI9Tm7HHfuhqY7E6C7JFqUzM6iZXIoncNxX7+bIVdJnTT48x3OQU1krIDW3UeixVhyISzYz6cadY5Xph6TseRNTRsTElzzBn9Vlly0TAERsdgnMYyLROjyFbg5R4ZlsGaMT4yNi2Zlq1GwjZB3jq0PsaJfA3t0jL0W0Y9xf1V41lpWckXMLaZiwxuKYPqc6LlHdkeRF+Qxswx5ASDqBVrsL+2A/N6SiCbYymV2BywJiMZj3GRRMTnL+lVyHCll3R7Szv0vqXMtQ74T+HijljIScLaEpkKCB3rqMBIi0jPs5JeOKTZMZEi5VVnouzy0k3jXjWSMlY6UcVGDxlKMVDqx91SILWSi3D2KdgYy3kP8E9X/AE1SnRXBNdNRMlefT6g7aY6giK+cPLGNg0bY68rcnpsNh9PqIBve/EcPQ3WIq2dR93xpSgk5SAZ9R6MLAOZFUkpLSUDXp6/KPpGUkmTdswlnKnwbl5ITMdGwcXJi7LKsqzUmT5tWYmkXuF9wjBvb76b7dHheazJ9RElUJOCxViuMlUJC0Gtz6PKyjLBY4qMWUe12r1xZ6lOyT6XPEBKN2CkTDOlZd02TBdTMt7Upx2knrkdCv1UKjDKn1A7XBYH6SCOOrWn5Oi/DtRiu+GleRthDL8rXdVjZlcfWrSIxVlGGGCOnH//Z"
|
||||||
}
|
}
|
||||||
@ -1,7 +1,13 @@
|
|||||||
{
|
{
|
||||||
"id": 17,
|
"id": 17,
|
||||||
"title": "SQL Assistant",
|
"title": {
|
||||||
"description": "SQL Assistant is an AI-powered tool that lets business users turn plain-English questions into fully formed SQL queries. Simply type your question (e.g., “Show me last quarter’s top 10 products by revenue”) and SQL Assistant generates the exact SQL, runs it against your database, and returns the results in seconds. ",
|
"en": "SQL Assistant",
|
||||||
|
"de": "SQL Assistent",
|
||||||
|
"zh": "SQL助理"},
|
||||||
|
"description": {
|
||||||
|
"en": "SQL Assistant is an AI-powered tool that lets business users turn plain-English questions into fully formed SQL queries. Simply type your question (e.g., 'Show me last quarter's top 10 products by revenue') and SQL Assistant generates the exact SQL, runs it against your database, and returns the results in seconds. ",
|
||||||
|
"de": "SQL-Assistent ist ein KI-gestütztes Tool, mit dem Geschäftsanwender einfache englische Fragen in vollständige SQL-Abfragen umwandeln können. Geben Sie einfach Ihre Frage ein (z.B. 'Zeige mir die Top 10 Produkte des letzten Quartals nach Umsatz') und der SQL-Assistent generiert das exakte SQL, führt es gegen Ihre Datenbank aus und liefert die Ergebnisse in Sekunden.",
|
||||||
|
"zh": "用户能够将简单文本问题转化为完整的SQL查询并输出结果。只需输入您的问题(例如,展示上个季度前十名按收入排序的产品),SQL助理就会生成精确的SQL语句,对其运行您的数据库,并几秒钟内返回结果。"},
|
||||||
"canvas_type": "Marketing",
|
"canvas_type": "Marketing",
|
||||||
"dsl": {
|
"dsl": {
|
||||||
"components": {
|
"components": {
|
||||||
@ -77,10 +83,10 @@
|
|||||||
"value": []
|
"value": []
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"password": "20010812Yy!",
|
"password": "",
|
||||||
"port": 3306,
|
"port": 3306,
|
||||||
"sql": "Agent:WickedGoatsDivide@content",
|
"sql": "{Agent:WickedGoatsDivide@content}",
|
||||||
"username": "13637682833@163.com"
|
"username": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"upstream": [
|
"upstream": [
|
||||||
@ -110,9 +116,7 @@
|
|||||||
"params": {
|
"params": {
|
||||||
"cross_languages": [],
|
"cross_languages": [],
|
||||||
"empty_response": "",
|
"empty_response": "",
|
||||||
"kb_ids": [
|
"kb_ids": [],
|
||||||
"ed31364c727211f0bdb2bafe6e7908e6"
|
|
||||||
],
|
|
||||||
"keywords_similarity_weight": 0.7,
|
"keywords_similarity_weight": 0.7,
|
||||||
"outputs": {
|
"outputs": {
|
||||||
"formalized_content": {
|
"formalized_content": {
|
||||||
@ -120,7 +124,7 @@
|
|||||||
"value": ""
|
"value": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"query": "sys.query",
|
"query": "{sys.query}",
|
||||||
"rerank_id": "",
|
"rerank_id": "",
|
||||||
"similarity_threshold": 0.2,
|
"similarity_threshold": 0.2,
|
||||||
"top_k": 1024,
|
"top_k": 1024,
|
||||||
@ -141,9 +145,7 @@
|
|||||||
"params": {
|
"params": {
|
||||||
"cross_languages": [],
|
"cross_languages": [],
|
||||||
"empty_response": "",
|
"empty_response": "",
|
||||||
"kb_ids": [
|
"kb_ids": [],
|
||||||
"0f968106727311f08357bafe6e7908e6"
|
|
||||||
],
|
|
||||||
"keywords_similarity_weight": 0.7,
|
"keywords_similarity_weight": 0.7,
|
||||||
"outputs": {
|
"outputs": {
|
||||||
"formalized_content": {
|
"formalized_content": {
|
||||||
@ -151,7 +153,7 @@
|
|||||||
"value": ""
|
"value": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"query": "sys.query",
|
"query": "{sys.query}",
|
||||||
"rerank_id": "",
|
"rerank_id": "",
|
||||||
"similarity_threshold": 0.2,
|
"similarity_threshold": 0.2,
|
||||||
"top_k": 1024,
|
"top_k": 1024,
|
||||||
@ -172,9 +174,7 @@
|
|||||||
"params": {
|
"params": {
|
||||||
"cross_languages": [],
|
"cross_languages": [],
|
||||||
"empty_response": "",
|
"empty_response": "",
|
||||||
"kb_ids": [
|
"kb_ids": [],
|
||||||
"4ad1f9d0727311f0827dbafe6e7908e6"
|
|
||||||
],
|
|
||||||
"keywords_similarity_weight": 0.7,
|
"keywords_similarity_weight": 0.7,
|
||||||
"outputs": {
|
"outputs": {
|
||||||
"formalized_content": {
|
"formalized_content": {
|
||||||
@ -182,7 +182,7 @@
|
|||||||
"value": ""
|
"value": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"query": "sys.query",
|
"query": "{sys.query}",
|
||||||
"rerank_id": "",
|
"rerank_id": "",
|
||||||
"similarity_threshold": 0.2,
|
"similarity_threshold": 0.2,
|
||||||
"top_k": 1024,
|
"top_k": 1024,
|
||||||
@ -206,7 +206,7 @@
|
|||||||
"enablePrologue": true,
|
"enablePrologue": true,
|
||||||
"inputs": {},
|
"inputs": {},
|
||||||
"mode": "conversational",
|
"mode": "conversational",
|
||||||
"prologue": "Hi! I'm your SQL assistant, what can I do for you?"
|
"prologue": "Hi! I'm your SQL assistant. What can I do for you?"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"upstream": []
|
"upstream": []
|
||||||
@ -319,7 +319,7 @@
|
|||||||
"enablePrologue": true,
|
"enablePrologue": true,
|
||||||
"inputs": {},
|
"inputs": {},
|
||||||
"mode": "conversational",
|
"mode": "conversational",
|
||||||
"prologue": "Hi! I'm your SQL assistant, what can I do for you?"
|
"prologue": "Hi! I'm your SQL assistant. What can I do for you?"
|
||||||
},
|
},
|
||||||
"label": "Begin",
|
"label": "Begin",
|
||||||
"name": "begin"
|
"name": "begin"
|
||||||
@ -343,9 +343,7 @@
|
|||||||
"form": {
|
"form": {
|
||||||
"cross_languages": [],
|
"cross_languages": [],
|
||||||
"empty_response": "",
|
"empty_response": "",
|
||||||
"kb_ids": [
|
"kb_ids": [],
|
||||||
"ed31364c727211f0bdb2bafe6e7908e6"
|
|
||||||
],
|
|
||||||
"keywords_similarity_weight": 0.7,
|
"keywords_similarity_weight": 0.7,
|
||||||
"outputs": {
|
"outputs": {
|
||||||
"formalized_content": {
|
"formalized_content": {
|
||||||
@ -353,7 +351,7 @@
|
|||||||
"value": ""
|
"value": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"query": "sys.query",
|
"query": "{sys.query}",
|
||||||
"rerank_id": "",
|
"rerank_id": "",
|
||||||
"similarity_threshold": 0.2,
|
"similarity_threshold": 0.2,
|
||||||
"top_k": 1024,
|
"top_k": 1024,
|
||||||
@ -383,9 +381,7 @@
|
|||||||
"form": {
|
"form": {
|
||||||
"cross_languages": [],
|
"cross_languages": [],
|
||||||
"empty_response": "",
|
"empty_response": "",
|
||||||
"kb_ids": [
|
"kb_ids": [],
|
||||||
"0f968106727311f08357bafe6e7908e6"
|
|
||||||
],
|
|
||||||
"keywords_similarity_weight": 0.7,
|
"keywords_similarity_weight": 0.7,
|
||||||
"outputs": {
|
"outputs": {
|
||||||
"formalized_content": {
|
"formalized_content": {
|
||||||
@ -393,7 +389,7 @@
|
|||||||
"value": ""
|
"value": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"query": "sys.query",
|
"query": "{sys.query}",
|
||||||
"rerank_id": "",
|
"rerank_id": "",
|
||||||
"similarity_threshold": 0.2,
|
"similarity_threshold": 0.2,
|
||||||
"top_k": 1024,
|
"top_k": 1024,
|
||||||
@ -423,9 +419,7 @@
|
|||||||
"form": {
|
"form": {
|
||||||
"cross_languages": [],
|
"cross_languages": [],
|
||||||
"empty_response": "",
|
"empty_response": "",
|
||||||
"kb_ids": [
|
"kb_ids": [],
|
||||||
"4ad1f9d0727311f0827dbafe6e7908e6"
|
|
||||||
],
|
|
||||||
"keywords_similarity_weight": 0.7,
|
"keywords_similarity_weight": 0.7,
|
||||||
"outputs": {
|
"outputs": {
|
||||||
"formalized_content": {
|
"formalized_content": {
|
||||||
@ -433,7 +427,7 @@
|
|||||||
"value": ""
|
"value": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"query": "sys.query",
|
"query": "{sys.query}",
|
||||||
"rerank_id": "",
|
"rerank_id": "",
|
||||||
"similarity_threshold": 0.2,
|
"similarity_threshold": 0.2,
|
||||||
"top_k": 1024,
|
"top_k": 1024,
|
||||||
@ -533,10 +527,10 @@
|
|||||||
"value": []
|
"value": []
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"password": "20010812Yy!",
|
"password": "",
|
||||||
"port": 3306,
|
"port": 3306,
|
||||||
"sql": "Agent:WickedGoatsDivide@content",
|
"sql": "{Agent:WickedGoatsDivide@content}",
|
||||||
"username": "13637682833@163.com"
|
"username": ""
|
||||||
},
|
},
|
||||||
"label": "ExeSQL",
|
"label": "ExeSQL",
|
||||||
"name": "ExeSQL"
|
"name": "ExeSQL"
|
||||||
@ -721,4 +715,4 @@
|
|||||||
"retrieval": []
|
"retrieval": []
|
||||||
},
|
},
|
||||||
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAcFBQYFBAcGBQYIBwcIChELCgkJChUPEAwRGBUaGRgVGBcbHichGx0lHRcYIi4iJSgpKywrGiAvMy8qMicqKyr/2wBDAQcICAoJChQLCxQqHBgcKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKir/wAARCAAwADADAREAAhEBAxEB/8QAGgAAAwEBAQEAAAAAAAAAAAAABQYHBAMAAf/EADIQAAEDAwMCBAMHBQAAAAAAAAECAwQFESEABjESEyJBUYEUYXEHFSNSkaGxMjNictH/xAAZAQADAQEBAAAAAAAAAAAAAAACAwQBAAX/xAAlEQACAgICAgEEAwAAAAAAAAABAgARAyESMQRBEyIycYFCkbH/2gAMAwEAAhEDEQA/AKHt2DGpNHXDLrZdWtSrIub39tZ5GbGwPA+pmDFkX7x7idvra85xqQaFNkxUTVIVJQzf8QpBFjbgEenNs681MnA9WJ6fEOKJoxVpSpFLTCo6KEZlTlLcQBIJS20hAv1D1ve+qPk52b0IsYuIGtyt7ZkVVNP+H3A5GdlN2u7GQUBSfmkk8cXH10tmLD6Yl0CG5qmTXBMZiQEMuvupUoKdc6UeEi4FsqOeBxrsKnv1AY+hJ2l5yfu6qQ6/UZtPDRHZ+Eldpsqz1hSrXJGLXwRxqxUQizFs7galPYUFDKT+h15oMuImspQpFiL+2i1A3A1bgxmixUgwlT8ZfgJ/y8P8HXdRuPZoxaqtfkQKbKqF03jtEoDeFKV1lNgfK4H764XfccVUgipvdiwKpFaXMLklFg4juuqV0m3Izg/MaEZCDYMScYqiJOd6xmqfUVfBJcWwtHV1Elfi87k51ViyhrsxL4ivQj1KrFZjTGjTJ8aShdyph5SUqFhwPzX9jpC0dXUqZK3ViHNq7oNaVJjz2Vw5LCrdKknpULZyfMf801MfI1e5NmpAGHUL12EZNFWWlhXSUuWHKgk3xomwEDuDhzLysySU9EndEVyIz3GmxJR+KpBIdCLlRHn/AFEjjIF9AMJlZ8gLZ/qUiJSg1Tu0HO4plFj4FC1h9NYfHIU7kwzgnqCJlKLiCO2s6hKytWiPJoFdfnLW7HS0or6bqXbjg2AI99XjAa3NPlL6jFTduOR5sd1+oyfjQMONqI7QOMA4V7/pqjHjC9SLNn56I1HiqrqTUKM0hbq2lpst5CQSST54xjSPJbICOHUhawISiRQ02T2Uq6AAkqFj/GquJQks1iEr/INLU82bploKSFXusG9xfjHofXQuQUNRoQqQT0ZwVEST5687iZWGgpDsebNbaTDfKVL/ALnbQU/UkKNhjXpFt0BJBVXe/wAGGG6YMlvvNkjlBGmKeJimHIVc0TY89akCKspT28C5BKgDyR7fvrCFI+q/1DQsvVfudYcVyKw49KU6tZyQbmwHFhrOKr9s0uz0CAIpbr3RKo1Rbh02C4HJISp2ZIz0pJ8IQk5Nr/QXznSX6NSnGAwHI/gD/TM+3vtAj1arJpcpgtPdPSH0kFt5wDxAWOOLgamIAFwijCfD927N2tGXuNxlK2W0occUhJWpR+QzzrPjc+pvyqT3Ftf2zbObf7YYecb6CrrDAGfy20wYMkA5Vjbtev7b3nEcXRela27d1ogoWi/rnQsjrqZzHdwzKoKUsqWz3mOnJUlZJt8uokD621w+RdzgynUkUpoUafPZXMnSHlrKluyX1Eug8XF7GwxbgWxrubMO5WmNRsCKtLfcY3rAU0nIltkBP+w0X8Jjdz//2Q=="
|
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAcFBQYFBAcGBQYIBwcIChELCgkJChUPEAwRGBUaGRgVGBcbHichGx0lHRcYIi4iJSgpKywrGiAvMy8qMicqKyr/2wBDAQcICAoJChQLCxQqHBgcKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKir/wAARCAAwADADAREAAhEBAxEB/8QAGgAAAwEBAQEAAAAAAAAAAAAABQYHBAMAAf/EADIQAAEDAwMCBAMHBQAAAAAAAAECAwQFESEABjESEyJBUYEUYXEHFSNSkaGxMjNictH/xAAZAQADAQEBAAAAAAAAAAAAAAACAwQBAAX/xAAlEQACAgICAgEEAwAAAAAAAAABAgARAyESMQRBEyIycYFCkbH/2gAMAwEAAhEDEQA/AKHt2DGpNHXDLrZdWtSrIub39tZ5GbGwPA+pmDFkX7x7idvra85xqQaFNkxUTVIVJQzf8QpBFjbgEenNs681MnA9WJ6fEOKJoxVpSpFLTCo6KEZlTlLcQBIJS20hAv1D1ve+qPk52b0IsYuIGtyt7ZkVVNP+H3A5GdlN2u7GQUBSfmkk8cXH10tmLD6Yl0CG5qmTXBMZiQEMuvupUoKdc6UeEi4FsqOeBxrsKnv1AY+hJ2l5yfu6qQ6/UZtPDRHZ+Eldpsqz1hSrXJGLXwRxqxUQizFs7galPYUFDKT+h15oMuImspQpFiL+2i1A3A1bgxmixUgwlT8ZfgJ/y8P8HXdRuPZoxaqtfkQKbKqF03jtEoDeFKV1lNgfK4H764XfccVUgipvdiwKpFaXMLklFg4juuqV0m3Izg/MaEZCDYMScYqiJOd6xmqfUVfBJcWwtHV1Elfi87k51ViyhrsxL4ivQj1KrFZjTGjTJ8aShdyph5SUqFhwPzX9jpC0dXUqZK3ViHNq7oNaVJjz2Vw5LCrdKknpULZyfMf801MfI1e5NmpAGHUL12EZNFWWlhXSUuWHKgk3xomwEDuDhzLysySU9EndEVyIz3GmxJR+KpBIdCLlRHn/AFEjjIF9AMJlZ8gLZ/qUiJSg1Tu0HO4plFj4FC1h9NYfHIU7kwzgnqCJlKLiCO2s6hKytWiPJoFdfnLW7HS0or6bqXbjg2AI99XjAa3NPlL6jFTduOR5sd1+oyfjQMONqI7QOMA4V7/pqjHjC9SLNn56I1HiqrqTUKM0hbq2lpst5CQSST54xjSPJbICOHUhawISiRQ02T2Uq6AAkqFj/GquJQks1iEr/INLU82bploKSFXusG9xfjHofXQuQUNRoQqQT0ZwVEST5687iZWGgpDsebNbaTDfKVL/ALnbQU/UkKNhjXpFt0BJBVXe/wAGGG6YMlvvNkjlBGmKeJimHIVc0TY89akCKspT28C5BKgDyR7fvrCFI+q/1DQsvVfudYcVyKw49KU6tZyQbmwHFhrOKr9s0uz0CAIpbr3RKo1Rbh02C4HJISp2ZIz0pJ8IQk5Nr/QXznSX6NSnGAwHI/gD/TM+3vtAj1arJpcpgtPdPSH0kFt5wDxAWOOLgamIAFwijCfD927N2tGXuNxlK2W0occUhJWpR+QzzrPjc+pvyqT3Ftf2zbObf7YYecb6CrrDAGfy20wYMkA5Vjbtev7b3nEcXRela27d1ogoWi/rnQsjrqZzHdwzKoKUsqWz3mOnJUlZJt8uokD621w+RdzgynUkUpoUafPZXMnSHlrKluyX1Eug8XF7GwxbgWxrubMO5WmNRsCKtLfcY3rAU0nIltkBP+w0X8Jjdz//2Q=="
|
||||||
}
|
}
|
||||||
1173
agent/templates/stock_research_report.json
Normal file
1173
agent/templates/stock_research_report.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
371
agent/templates/title_chunker.json
Normal file
371
agent/templates/title_chunker.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
519
agent/templates/user_interaction.json
Normal file
519
agent/templates/user_interaction.json
Normal file
@ -0,0 +1,519 @@
|
|||||||
|
{
|
||||||
|
"id": 27,
|
||||||
|
"title": {
|
||||||
|
"en": "Interactive Agent",
|
||||||
|
"zh": "可交互的 Agent"
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
"en": "During the Agent’s execution, users can actively intervene and interact with the Agent to adjust or guide its output, ensuring the final result aligns with their intentions.",
|
||||||
|
"zh": "在 Agent 的运行过程中,用户可以随时介入,与 Agent 进行交互,以调整或引导生成结果,使最终输出更符合预期。"
|
||||||
|
},
|
||||||
|
"canvas_type": "Agent",
|
||||||
|
"dsl": {
|
||||||
|
"components": {
|
||||||
|
"Agent:LargeFliesMelt": {
|
||||||
|
"downstream": [
|
||||||
|
"UserFillUp:GoldBroomsRelate"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Agent",
|
||||||
|
"params": {
|
||||||
|
"cite": true,
|
||||||
|
"delay_after_error": 1,
|
||||||
|
"description": "",
|
||||||
|
"exception_default_value": "",
|
||||||
|
"exception_goto": [],
|
||||||
|
"exception_method": "",
|
||||||
|
"frequencyPenaltyEnabled": false,
|
||||||
|
"frequency_penalty": 0.7,
|
||||||
|
"llm_id": "qwen-turbo@Tongyi-Qianwen",
|
||||||
|
"maxTokensEnabled": false,
|
||||||
|
"max_retries": 3,
|
||||||
|
"max_rounds": 1,
|
||||||
|
"max_tokens": 256,
|
||||||
|
"mcp": [],
|
||||||
|
"message_history_window_size": 12,
|
||||||
|
"outputs": {
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"structured": {}
|
||||||
|
},
|
||||||
|
"presencePenaltyEnabled": false,
|
||||||
|
"presence_penalty": 0.4,
|
||||||
|
"prompts": [
|
||||||
|
{
|
||||||
|
"content": "User query:{sys.query}",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sys_prompt": "<role>\nYou are the Planning Agent in a multi-agent RAG workflow.\nYour sole job is to design a crisp, executable Search Plan for the next agent. Do not search or answer the user’s question.\n</role>\n<objectives>\nUnderstand the user’s task and decompose it into evidence-seeking steps.\nProduce high-quality queries and retrieval settings tailored to the task type (fact lookup, multi-hop reasoning, comparison, statistics, how-to, etc.).\nIdentify missing information that would materially change the plan (≤3 concise questions).\nOptimize for source trustworthiness, diversity, and recency; define stopping criteria to avoid over-searching.\nAnswer in 150 words.\n<objectives>",
|
||||||
|
"temperature": 0.1,
|
||||||
|
"temperatureEnabled": false,
|
||||||
|
"tools": [],
|
||||||
|
"topPEnabled": false,
|
||||||
|
"top_p": 0.3,
|
||||||
|
"user_prompt": "",
|
||||||
|
"visual_files_var": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"begin"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Agent:TangyWordsType": {
|
||||||
|
"downstream": [
|
||||||
|
"Message:FreshWallsStudy"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Agent",
|
||||||
|
"params": {
|
||||||
|
"cite": true,
|
||||||
|
"delay_after_error": 1,
|
||||||
|
"description": "",
|
||||||
|
"exception_default_value": "",
|
||||||
|
"exception_goto": [],
|
||||||
|
"exception_method": "",
|
||||||
|
"frequencyPenaltyEnabled": false,
|
||||||
|
"frequency_penalty": 0.7,
|
||||||
|
"llm_id": "qwen-turbo@Tongyi-Qianwen",
|
||||||
|
"maxTokensEnabled": false,
|
||||||
|
"max_retries": 3,
|
||||||
|
"max_rounds": 1,
|
||||||
|
"max_tokens": 256,
|
||||||
|
"mcp": [],
|
||||||
|
"message_history_window_size": 12,
|
||||||
|
"outputs": {
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"structured": {}
|
||||||
|
},
|
||||||
|
"presencePenaltyEnabled": false,
|
||||||
|
"presence_penalty": 0.4,
|
||||||
|
"prompts": [
|
||||||
|
{
|
||||||
|
"content": "Search Plan: {Agent:LargeFliesMelt@content}\n\n\n\nAwait Response feedback:{UserFillUp:GoldBroomsRelate@instructions}\n",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sys_prompt": "<role>\nYou are the Search Agent.\nYour job is to execute the approved Search Plan, integrate the Await Response feedback, retrieve evidence, and produce a well-grounded answer.\n</role>\n<objectives>\nTranslate the plan + feedback into concrete searches.\nCollect diverse, trustworthy, and recent evidence meeting the plan’s evidence bar.\nSynthesize a concise answer; include citations next to claims they support.\nIf evidence is insufficient or conflicting, clearly state limitations and propose next steps.\n</objectives>\n <tools>\nRetrieval: You must use Retrieval to do the search.\n </tools>\n",
|
||||||
|
"temperature": 0.1,
|
||||||
|
"temperatureEnabled": false,
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"component_name": "Retrieval",
|
||||||
|
"name": "Retrieval",
|
||||||
|
"params": {
|
||||||
|
"cross_languages": [],
|
||||||
|
"description": "",
|
||||||
|
"empty_response": "",
|
||||||
|
"kb_ids": [],
|
||||||
|
"keywords_similarity_weight": 0.7,
|
||||||
|
"outputs": {
|
||||||
|
"formalized_content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"json": {
|
||||||
|
"type": "Array<Object>",
|
||||||
|
"value": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rerank_id": "",
|
||||||
|
"similarity_threshold": 0.2,
|
||||||
|
"toc_enhance": false,
|
||||||
|
"top_k": 1024,
|
||||||
|
"top_n": 8,
|
||||||
|
"use_kg": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"topPEnabled": false,
|
||||||
|
"top_p": 0.3,
|
||||||
|
"user_prompt": "",
|
||||||
|
"visual_files_var": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"UserFillUp:GoldBroomsRelate"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"Message:FreshWallsStudy": {
|
||||||
|
"downstream": [],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Message",
|
||||||
|
"params": {
|
||||||
|
"content": [
|
||||||
|
"{Agent:TangyWordsType@content}"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"Agent:TangyWordsType"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"UserFillUp:GoldBroomsRelate": {
|
||||||
|
"downstream": [
|
||||||
|
"Agent:TangyWordsType"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "UserFillUp",
|
||||||
|
"params": {
|
||||||
|
"enable_tips": true,
|
||||||
|
"inputs": {
|
||||||
|
"instructions": {
|
||||||
|
"name": "instructions",
|
||||||
|
"optional": false,
|
||||||
|
"options": [],
|
||||||
|
"type": "paragraph"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"instructions": {
|
||||||
|
"name": "instructions",
|
||||||
|
"optional": false,
|
||||||
|
"options": [],
|
||||||
|
"type": "paragraph"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tips": "Here is my search plan:\n{Agent:LargeFliesMelt@content}\nAre you okay with it?"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"upstream": [
|
||||||
|
"Agent:LargeFliesMelt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"begin": {
|
||||||
|
"downstream": [
|
||||||
|
"Agent:LargeFliesMelt"
|
||||||
|
],
|
||||||
|
"obj": {
|
||||||
|
"component_name": "Begin",
|
||||||
|
"params": {}
|
||||||
|
},
|
||||||
|
"upstream": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"globals": {
|
||||||
|
"sys.conversation_turns": 0,
|
||||||
|
"sys.files": [],
|
||||||
|
"sys.query": "",
|
||||||
|
"sys.user_id": ""
|
||||||
|
},
|
||||||
|
"graph": {
|
||||||
|
"edges": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__beginstart-Agent:LargeFliesMeltend",
|
||||||
|
"source": "begin",
|
||||||
|
"sourceHandle": "start",
|
||||||
|
"target": "Agent:LargeFliesMelt",
|
||||||
|
"targetHandle": "end"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__Agent:LargeFliesMeltstart-UserFillUp:GoldBroomsRelateend",
|
||||||
|
"source": "Agent:LargeFliesMelt",
|
||||||
|
"sourceHandle": "start",
|
||||||
|
"target": "UserFillUp:GoldBroomsRelate",
|
||||||
|
"targetHandle": "end"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"isHovered": false
|
||||||
|
},
|
||||||
|
"id": "xy-edge__UserFillUp:GoldBroomsRelatestart-Agent:TangyWordsTypeend",
|
||||||
|
"source": "UserFillUp:GoldBroomsRelate",
|
||||||
|
"sourceHandle": "start",
|
||||||
|
"target": "Agent:TangyWordsType",
|
||||||
|
"targetHandle": "end"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "xy-edge__Agent:TangyWordsTypetool-Tool:NastyBatsGoend",
|
||||||
|
"source": "Agent:TangyWordsType",
|
||||||
|
"sourceHandle": "tool",
|
||||||
|
"target": "Tool:NastyBatsGo",
|
||||||
|
"targetHandle": "end"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "xy-edge__Agent:TangyWordsTypestart-Message:FreshWallsStudyend",
|
||||||
|
"source": "Agent:TangyWordsType",
|
||||||
|
"sourceHandle": "start",
|
||||||
|
"target": "Message:FreshWallsStudy",
|
||||||
|
"targetHandle": "end"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"label": "Begin",
|
||||||
|
"name": "begin"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "begin",
|
||||||
|
"measured": {
|
||||||
|
"height": 50,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 154.9008789064451,
|
||||||
|
"y": 119.51001744285344
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "left",
|
||||||
|
"targetPosition": "right",
|
||||||
|
"type": "beginNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"cite": true,
|
||||||
|
"delay_after_error": 1,
|
||||||
|
"description": "",
|
||||||
|
"exception_default_value": "",
|
||||||
|
"exception_goto": [],
|
||||||
|
"exception_method": "",
|
||||||
|
"frequencyPenaltyEnabled": false,
|
||||||
|
"frequency_penalty": 0.7,
|
||||||
|
"llm_id": "qwen-turbo@Tongyi-Qianwen",
|
||||||
|
"maxTokensEnabled": false,
|
||||||
|
"max_retries": 3,
|
||||||
|
"max_rounds": 1,
|
||||||
|
"max_tokens": 256,
|
||||||
|
"mcp": [],
|
||||||
|
"message_history_window_size": 12,
|
||||||
|
"outputs": {
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"structured": {}
|
||||||
|
},
|
||||||
|
"presencePenaltyEnabled": false,
|
||||||
|
"presence_penalty": 0.4,
|
||||||
|
"prompts": [
|
||||||
|
{
|
||||||
|
"content": "User query:{sys.query}",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sys_prompt": "<role>\nYou are the Planning Agent in a multi-agent RAG workflow.\nYour sole job is to design a crisp, executable Search Plan for the next agent. Do not search or answer the user’s question.\n</role>\n<objectives>\nUnderstand the user’s task and decompose it into evidence-seeking steps.\nProduce high-quality queries and retrieval settings tailored to the task type (fact lookup, multi-hop reasoning, comparison, statistics, how-to, etc.).\nIdentify missing information that would materially change the plan (≤3 concise questions).\nOptimize for source trustworthiness, diversity, and recency; define stopping criteria to avoid over-searching.\nAnswer in 150 words.\n<objectives>",
|
||||||
|
"temperature": 0.1,
|
||||||
|
"temperatureEnabled": false,
|
||||||
|
"tools": [],
|
||||||
|
"topPEnabled": false,
|
||||||
|
"top_p": 0.3,
|
||||||
|
"user_prompt": "",
|
||||||
|
"visual_files_var": ""
|
||||||
|
},
|
||||||
|
"label": "Agent",
|
||||||
|
"name": "Planning Agent"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Agent:LargeFliesMelt",
|
||||||
|
"measured": {
|
||||||
|
"height": 90,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 443.96309330796714,
|
||||||
|
"y": 104.61370811205677
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "agentNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"enable_tips": true,
|
||||||
|
"inputs": {
|
||||||
|
"instructions": {
|
||||||
|
"name": "instructions",
|
||||||
|
"optional": false,
|
||||||
|
"options": [],
|
||||||
|
"type": "paragraph"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"instructions": {
|
||||||
|
"name": "instructions",
|
||||||
|
"optional": false,
|
||||||
|
"options": [],
|
||||||
|
"type": "paragraph"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tips": "Here is my search plan:\n{Agent:LargeFliesMelt@content}\nAre you okay with it?"
|
||||||
|
},
|
||||||
|
"label": "UserFillUp",
|
||||||
|
"name": "Await Response"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "UserFillUp:GoldBroomsRelate",
|
||||||
|
"measured": {
|
||||||
|
"height": 50,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 683.3409492927474,
|
||||||
|
"y": 116.76274137645598
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "ragNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"cite": true,
|
||||||
|
"delay_after_error": 1,
|
||||||
|
"description": "",
|
||||||
|
"exception_default_value": "",
|
||||||
|
"exception_goto": [],
|
||||||
|
"exception_method": "",
|
||||||
|
"frequencyPenaltyEnabled": false,
|
||||||
|
"frequency_penalty": 0.7,
|
||||||
|
"llm_id": "qwen-turbo@Tongyi-Qianwen",
|
||||||
|
"maxTokensEnabled": false,
|
||||||
|
"max_retries": 3,
|
||||||
|
"max_rounds": 1,
|
||||||
|
"max_tokens": 256,
|
||||||
|
"mcp": [],
|
||||||
|
"message_history_window_size": 12,
|
||||||
|
"outputs": {
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"structured": {}
|
||||||
|
},
|
||||||
|
"presencePenaltyEnabled": false,
|
||||||
|
"presence_penalty": 0.4,
|
||||||
|
"prompts": [
|
||||||
|
{
|
||||||
|
"content": "Search Plan: {Agent:LargeFliesMelt@content}\n\n\n\nAwait Response feedback:{UserFillUp:GoldBroomsRelate@instructions}\n",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sys_prompt": "<role>\nYou are the Search Agent.\nYour job is to execute the approved Search Plan, integrate the Await Response feedback, retrieve evidence, and produce a well-grounded answer.\n</role>\n<objectives>\nTranslate the plan + feedback into concrete searches.\nCollect diverse, trustworthy, and recent evidence meeting the plan’s evidence bar.\nSynthesize a concise answer; include citations next to claims they support.\nIf evidence is insufficient or conflicting, clearly state limitations and propose next steps.\n</objectives>\n <tools>\nRetrieval: You must use Retrieval to do the search.\n </tools>\n",
|
||||||
|
"temperature": 0.1,
|
||||||
|
"temperatureEnabled": false,
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"component_name": "Retrieval",
|
||||||
|
"name": "Retrieval",
|
||||||
|
"params": {
|
||||||
|
"cross_languages": [],
|
||||||
|
"description": "",
|
||||||
|
"empty_response": "",
|
||||||
|
"kb_ids": [],
|
||||||
|
"keywords_similarity_weight": 0.7,
|
||||||
|
"outputs": {
|
||||||
|
"formalized_content": {
|
||||||
|
"type": "string",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"json": {
|
||||||
|
"type": "Array<Object>",
|
||||||
|
"value": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rerank_id": "",
|
||||||
|
"similarity_threshold": 0.2,
|
||||||
|
"toc_enhance": false,
|
||||||
|
"top_k": 1024,
|
||||||
|
"top_n": 8,
|
||||||
|
"use_kg": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"topPEnabled": false,
|
||||||
|
"top_p": 0.3,
|
||||||
|
"user_prompt": "",
|
||||||
|
"visual_files_var": ""
|
||||||
|
},
|
||||||
|
"label": "Agent",
|
||||||
|
"name": "Search Agent"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Agent:TangyWordsType",
|
||||||
|
"measured": {
|
||||||
|
"height": 90,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 944.6411255659472,
|
||||||
|
"y": 99.84499066368488
|
||||||
|
},
|
||||||
|
"selected": true,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "agentNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"description": "This is an agent for a specific task.",
|
||||||
|
"user_prompt": "This is the order you need to send to the agent."
|
||||||
|
},
|
||||||
|
"label": "Tool",
|
||||||
|
"name": "flow.tool_0"
|
||||||
|
},
|
||||||
|
"id": "Tool:NastyBatsGo",
|
||||||
|
"measured": {
|
||||||
|
"height": 50,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 862.6411255659472,
|
||||||
|
"y": 239.84499066368488
|
||||||
|
},
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "toolNode"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"form": {
|
||||||
|
"content": [
|
||||||
|
"{Agent:TangyWordsType@content}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"label": "Message",
|
||||||
|
"name": "Message"
|
||||||
|
},
|
||||||
|
"dragging": false,
|
||||||
|
"id": "Message:FreshWallsStudy",
|
||||||
|
"measured": {
|
||||||
|
"height": 50,
|
||||||
|
"width": 200
|
||||||
|
},
|
||||||
|
"position": {
|
||||||
|
"x": 1216.7057997987163,
|
||||||
|
"y": 120.48541298149814
|
||||||
|
},
|
||||||
|
"selected": false,
|
||||||
|
"sourcePosition": "right",
|
||||||
|
"targetPosition": "left",
|
||||||
|
"type": "messageNode"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"history": [],
|
||||||
|
"messages": [],
|
||||||
|
"path": [],
|
||||||
|
"retrieval": [],
|
||||||
|
"variables": {}
|
||||||
|
},
|
||||||
|
"avatar":
|
||||||
|
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAACXBIWXMAABYlAAAWJQFJUiTwAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA1FSURBVHgBzVppcFRVFv7e672TTjrprGTrkI1tgFjiAI4TmgEGcEFgVFAHxr2mLFe0cKxyBOeHljVjwVDqzDhVTqksOlOiuCBrCwk7hCAkLAGy70kn3eklvb03571e8rrTCQGi5em6ecu9fe85557znXNuh0EUHTGb5/Ast4TnmXsB3oifBTFVxEsVy7PrZ5lM9RE9oZvTZrPeAe51Gvh85ABGMuonJh6Ra9Mzw2KDm2PXm0ymPoS6ReZ5n5n6p4f45QOsj0ihEXxwdGg9Pta3Ax2R98ErH+wIzMFjFFTlhcwkCMEKT4qE+Nc5np8e5Aa8MAf94UNtmEl5DPYFrpHPER8+xn3U3KNkXqDpKtFaSGi1Wm188MH7655+6inoExLR0toW0N211R+8jJ198YHtiFyDH5EHE8Oy7Iccx/1BeJ4yZRI2bXgHSfokyGQsLH19YDgax4RXGJz450A8v1HGMMw62soMEgRxcVqcrDyG9X95E339fSidPh05Odnot9vB0Qe84CB8wMQglSe8HWPI3HAmJVmLYTOYJ55YxVedOYcTxyux59svsfqpJ5GWkorGhhZYLL0oKSnC2pfX4DemOaIg9n473B63ZCFGOu2YU4QIgikIJiF5yaxcuZTPzzfC42WQnZWJLdu2IiXFgG+/3oOs7Ey0t3fC7/cjMyMdU6dOwpyyMswtmwO9PhmdnYE+5hrcS9Eq8MzSHRfuvT7Dj5qbiKeGY4fKsWL1ShQWFKG1uQVz55Thh5pqVJ87D6fLDY/HA7/PD87PiYxMnToRLzz7LI0zoamphQTxYewpKIzUB6OUJcIoOTEOVpjhcroRH69FQ2MLHANOZKZn4PcPP4DikgKoVWrI5CzkcjmUSiUuXriKRx77I2bMuh0tba1IStKL84TXDTU26hp9z0QzJYVVbhBuJdDL8Vy4yeRK+TrBNHp7e9HR0QWXawDNTa3o7ulB2R2zER+nQXZ2FhRKBVwOl7BloO+IVwGpBtxebPv0v0hNN+CO2beHHT4cD6I9Pvo+iobElmtYk6xkQuE6nvOjpaWNmHeDJxPp7bVCp4vHvv0HsXfvQVSeOgOtRoOFi+aJu+C0O6FVa2AwJEOhkMPhcODEiVNwuJ0w5ubRLsbB7/UPMsvH5HTYxvNBwSUAIT5yQcG4wTlk3+/5bt2cX5chIzMdbW3tOH/+EtRaNex2B1RkKkuXLkJCog41NZdw8MBhFBUVICU1WTQDwZwMKcloJJN74snHMeByoq7uCr1XECBkEzB4I3ge6p4xkp3hiOFj+rdcYFpg5vaZs3H3nXehh0znyPFjWLv2NcyeNQPbtn0hDlRr1GKc2L1rP5QqJeQyMiMZRH8QqKujDa+9+gruWf4A4siPlLQzeXlG+Dl/hAjR2B7KnURQEnyDC4kyXPoS5dN7dn7FR0umJ4d8aNUjBKEdePSxh8iMDuDixcsiCglbK5PJ4PMFUEe4F6B01zfbMeD1iTx8vPUTMi0lXn7uJXT1dIr+Eph/EFViAMqoKGBag89sZFIVuMaTplsJWZYtvwv7yQdqai5CQWYhoMyECUWYv8BE2s0RJ8gk03v/3Q24UHsJ27/YjnaKDa4BD1INBpQfrgDlWoFZRRPgcdNBW4hlLBNGM3mEdMGrmzDfSNt/4OBhdLR1Yv26tXj77U0iEqUYUvDvf34AjUpOaUclssaNQ2cXoZdzAC++/Cds3vwZLl+oxtZPt2INPVefOYH6hsbALtwkDabtg8jG8pK0OURtbR34dMtHaKhrImlZvPXW35FsSMKKFfeivOIwsnKMWLDwbrjdbtEHOB8HFfnF8YoD+Md7m7Bv3y7s2rtfXOjkqUqMFYnMR5tQsCdCQwK+t5MQ+3d/g4cfXiHCZHeXBZ98/D/R5gX0sfRZsGTZAzDNW4SG5kbk5uSSL3DIzc1GaekMkNyQ0Z+PN29FYmICxooEPkNN5Hv3zh0jhgq9PhFXrtSh+nw1Kg4dIah04Oix02JfQUEepk2bgs8//0bcocWLfotXXlqD7u5uzF1wp7hIcnIyKr7fjSZKTyDV5HXkO9EkrfjksQZI62Brnw2pqQbMz5yH5fcug4/zUtCqxIcffSRmsI2NrSKjlp5ebNnyGWG/B2++8QYW3TlfTAhZGQOej8EAw0SYbcTaGLk6kxZR8miGY4wWGRAY6+7pFu1PSLH/88G/yAcG8NcNG3H85CkxSgrBT4gPDqcDE4sn4GjKCbBCsGAki7KDO8AybIBRugqLMFFMhuPDCALJI9CBGfxidKkYfi+Mp0mFGCEkUy888wy0Wq3YJyf/sFOa0W/rR093DzLS0tDXZw3WHkPnE9li5IhzdMCu0EGupnn8/sgxLEISBJTMISKSsVKnCDEtvYY+ovLIKQVGQk1waCF56yDs7+zsotjRDlu/DR6fF4sXL0RN9QWwFD/ik1JJwdwQ7XEspSKudqSqFUhsqcWAtUccH4tC/Ilrh/hlmbB84RcjtZgTSwQMPQtayhqXiTPvvIDHJyXjtWVlUCUkQ6ZQBHZDXIvMR6akOmMALtoxTUom9K2XaPesYl9wsmsSG2aQYTDih4ndQvl89LOb0o72wlkonTYZ9y25BwffWYOexstQJ+jhJ8377b1Ic9ngam4A53GC9zqgpCrPd7YcnFwZqB4l64aVHNXYmBoMDkbMgmMUJNqqH6rEFChvW0wMWjFv0RKom37AuX3bkcnbUKCOQ3LqOFgb69Gu1GGgs5HiiwYTbp0JS8UOMBpdxLrDRXI2WruICmrRGh5VEz9kIj4PtIZMqGdQ1HZZEZ+ZhxmFuVD73WT/bhx5/8+wZk1AvHEqrAYjCdoHH9UUxRMnoefIDggFHitjR9QVK9VyQIBYzNxAHhPEeI6EkJM2vfm3QSnzwpA/FZaOeuzdvAn9GUYUlC2Gj2zfkJoPRfpkNFdW0DgWeVlGKJqr4SKQkFNmOxwHbDSTTFjxTFAuaQ9z7Y8EIUK+RdgIXq1DUsFM2Pva0dfWDGu/C8ZxBhiaTyGXbF/ltsBSfRI+dQKhmB8KbTwUcYlgG2vQ095CZaxy0KwxaOLscEyEtyWYx4ey4ZhloFQ/XLD5o5pgDl46V+q4igOU6PVyGtxy10ooEtPQfKYc+z/cBBfrRM7EKZR3dYs7x/IK6JJT4K2vRq8QTxgpfwgaaizn4MN/pFsyfAsWKRFNFlCPCNRC3k7PjVZKCpvrULZ8BV7927vobmxG7dG9OHK4HDYqW6/W1cI3wCAtdyql8c0U/Z3wUuGUWzwFF8w7oIzTiZYptQ2WCUdoVnKVoFL4lgn3D+5UYKx45YVdCupEsmNhUyRUUhrywGWUoGhCKdpbWmGpO42LZ8/BR/EgnmqNxb97FCqFH9UHd8JG6YjT1U9w7IOP6pNJhUYc3f0VtCSElBt5KDeJFEKKnbwElQLhX0oMI70O52rBBM3nRELRL9HQeh5x8KGy3IzjVbWwWfvxwc7dsFssuHz2NBo7W1Ciywdnt4KOQ+ChkxC1SoWmc+XwmBZRYaUWq0Oe80H26KpV60TNhTQW0fiAVoNFBBPSMs9E+ED08/CNxgm2HZcCy5XjkGt16Dp/Fms3vUfiyGFtv4KK/ftQWFxEhwEMvFQwyTihWFKIJ4Ie+u6V0+XkUjzcDhvcNJ9ckkxgSN6LoNfzsYA08t3ooTYA1UrjrXD+cAC/mj+X0CYJ/cT8oe++ppQiBf2OfjE5tFD2qzekIk9GdXpGDtRUSMnpN4wByptSUhOhdLVK6gFecu4SfY3Nxo0TbX18qhHujHqkjy9G84WT6GptwZUOC2qqziM7Lx16jRI54/MJwTzgKYdiBSgWYopSQ0edJXB6/NhyslYCo+F8Q5KDYGiUHVTiTYkAn9MGfcks9NFRpnAUqc/MIgY5LF1xH9Ioe1247H5MnjQNxZNvAUMptttuQyYdlrXRaYmccqnaLhvSEjWRFdlozEDK+I0KIVZiAsRSkaSdOBfeumPobWkUS9RcOqZJjtchJbsEjt42eEhQpS5ZFBiETHVX68jnPHR040YGHW1GFjQxXED67mbq2IhpJTvJu21QjZ8BFaXRPnq2kP3rE3RouUBVHsGzlQ4PEhLi4CB/aCLo9dLhmRDFs+k3jEt0GC3sQD01Y2jCoatJb6MGXEOe0QnMUPCyI+OWBdDk/AK1x3ejraFeLJYG6DTE7XIhjiC0z9IFJ9UNOq2AOz4UZ2bD6hyokj22enU+zTETN0LMyO16MliOzEmpVsH4i1lIzy2kn0+14CjAUSYHi80uFj8cOfL4wiJkpKfT4QKHLB37HVNuNs9hWc6MkVU5IiL9GCSccMsVKjrOl5HZuKnWtqKj4TI6as+AcffTkaUKfl6WL9rEoQP7NpAenhvNxEOO92LGjx+BGCGfUkKh0ZIv21Bz9vTGJ59+8XlRALPZrFexvLAL0zEGNNLJ80h910FVmoQkU2lpaeBfDYT/OXBzjIm0uhFjQMwN9o2KOH6jhngVmI853xGz2QgZ1lH2MI3yIXFHeP4nNP7YVE9cfMlw7BezTKbvpR3/Bx465XnKBextAAAAAElFTkSuQmCC"
|
||||||
|
}
|
||||||
File diff suppressed because one or more lines are too long
@ -16,7 +16,7 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
from agent.canvas import Canvas
|
from agent.canvas import Canvas
|
||||||
from api import settings
|
from common import settings
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import time
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
import arxiv
|
import arxiv
|
||||||
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class ArXivParam(ToolParamBase):
|
class ArXivParam(ToolParamBase):
|
||||||
@ -61,14 +61,20 @@ class ArXivParam(ToolParamBase):
|
|||||||
class ArXiv(ToolBase, ABC):
|
class ArXiv(ToolBase, ABC):
|
||||||
component_name = "ArXiv"
|
component_name = "ArXiv"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("ArXiv processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("formalized_content", "")
|
self.set_output("formalized_content", "")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("ArXiv processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sort_choices = {"relevance": arxiv.SortCriterion.Relevance,
|
sort_choices = {"relevance": arxiv.SortCriterion.Relevance,
|
||||||
"lastUpdatedDate": arxiv.SortCriterion.LastUpdatedDate,
|
"lastUpdatedDate": arxiv.SortCriterion.LastUpdatedDate,
|
||||||
@ -79,12 +85,20 @@ class ArXiv(ToolBase, ABC):
|
|||||||
max_results=self._param.top_n,
|
max_results=self._param.top_n,
|
||||||
sort_by=sort_choices[self._param.sort_by]
|
sort_by=sort_choices[self._param.sort_by]
|
||||||
)
|
)
|
||||||
self._retrieve_chunks(list(arxiv_client.results(search)),
|
results = list(arxiv_client.results(search))
|
||||||
|
|
||||||
|
if self.check_if_canceled("ArXiv processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
self._retrieve_chunks(results,
|
||||||
get_title=lambda r: r.title,
|
get_title=lambda r: r.title,
|
||||||
get_url=lambda r: r.pdf_url,
|
get_url=lambda r: r.pdf_url,
|
||||||
get_content=lambda r: r.summary)
|
get_content=lambda r: r.summary)
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("ArXiv processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"ArXiv error: {e}")
|
logging.exception(f"ArXiv error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -97,6 +111,6 @@ class ArXiv(ToolBase, ABC):
|
|||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return """
|
return """
|
||||||
Keywords: {}
|
Keywords: {}
|
||||||
Looking for the most relevant articles.
|
Looking for the most relevant articles.
|
||||||
""".format(self.get_input().get("query", "-_-!"))
|
""".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
@ -17,13 +17,14 @@ import logging
|
|||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
import asyncio
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from typing import TypedDict, List, Any
|
from typing import TypedDict, List, Any
|
||||||
from agent.component.base import ComponentParamBase, ComponentBase
|
from agent.component.base import ComponentParamBase, ComponentBase
|
||||||
from api.utils import hash_str2int
|
from common.misc_utils import hash_str2int
|
||||||
from rag.llm.chat_model import ToolCallSession
|
from rag.prompts.generator import kb_prompt
|
||||||
from rag.prompts.prompts import kb_prompt
|
from common.mcp_tool_call_conn import MCPToolCallSession, ToolCallSession
|
||||||
from rag.utils.mcp_tool_call_conn import MCPToolCallSession
|
from timeit import default_timer as timer
|
||||||
|
|
||||||
|
|
||||||
class ToolParameter(TypedDict):
|
class ToolParameter(TypedDict):
|
||||||
@ -48,13 +49,21 @@ class LLMToolPluginCallSession(ToolCallSession):
|
|||||||
self.callback = callback
|
self.callback = callback
|
||||||
|
|
||||||
def tool_call(self, name: str, arguments: dict[str, Any]) -> Any:
|
def tool_call(self, name: str, arguments: dict[str, Any]) -> Any:
|
||||||
assert name in self.tools_map, f"LLM tool {name} does not exist"
|
return asyncio.run(self.tool_call_async(name, arguments))
|
||||||
if isinstance(self.tools_map[name], MCPToolCallSession):
|
|
||||||
resp = self.tools_map[name].tool_call(name, arguments, 60)
|
|
||||||
else:
|
|
||||||
resp = self.tools_map[name].invoke(**arguments)
|
|
||||||
|
|
||||||
self.callback(name, arguments, resp)
|
async def tool_call_async(self, name: str, arguments: dict[str, Any]) -> Any:
|
||||||
|
assert name in self.tools_map, f"LLM tool {name} does not exist"
|
||||||
|
st = timer()
|
||||||
|
tool_obj = self.tools_map[name]
|
||||||
|
if isinstance(tool_obj, MCPToolCallSession):
|
||||||
|
resp = await asyncio.to_thread(tool_obj.tool_call, name, arguments, 60)
|
||||||
|
else:
|
||||||
|
if hasattr(tool_obj, "invoke_async") and asyncio.iscoroutinefunction(tool_obj.invoke_async):
|
||||||
|
resp = await tool_obj.invoke_async(**arguments)
|
||||||
|
else:
|
||||||
|
resp = await asyncio.to_thread(tool_obj.invoke, **arguments)
|
||||||
|
|
||||||
|
self.callback(name, arguments, resp, elapsed_time=timer()-st)
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
def get_tool_obj(self, name):
|
def get_tool_obj(self, name):
|
||||||
@ -123,6 +132,9 @@ class ToolBase(ComponentBase):
|
|||||||
return self._param.get_meta()
|
return self._param.get_meta()
|
||||||
|
|
||||||
def invoke(self, **kwargs):
|
def invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Tool processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self.set_output("_created_time", time.perf_counter())
|
self.set_output("_created_time", time.perf_counter())
|
||||||
try:
|
try:
|
||||||
res = self._invoke(**kwargs)
|
res = self._invoke(**kwargs)
|
||||||
@ -135,6 +147,33 @@ class ToolBase(ComponentBase):
|
|||||||
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
|
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
async def invoke_async(self, **kwargs):
|
||||||
|
"""
|
||||||
|
Async wrapper for tool invocation.
|
||||||
|
If `_invoke` is a coroutine, await it directly; otherwise run in a thread to avoid blocking.
|
||||||
|
Mirrors the exception handling of `invoke`.
|
||||||
|
"""
|
||||||
|
if self.check_if_canceled("Tool processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
self.set_output("_created_time", time.perf_counter())
|
||||||
|
try:
|
||||||
|
fn_async = getattr(self, "_invoke_async", None)
|
||||||
|
if fn_async and asyncio.iscoroutinefunction(fn_async):
|
||||||
|
res = await fn_async(**kwargs)
|
||||||
|
elif asyncio.iscoroutinefunction(self._invoke):
|
||||||
|
res = await self._invoke(**kwargs)
|
||||||
|
else:
|
||||||
|
res = await asyncio.to_thread(self._invoke, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
self._param.outputs["_ERROR"] = {"value": str(e)}
|
||||||
|
logging.exception(e)
|
||||||
|
res = str(e)
|
||||||
|
self._param.debug_inputs = []
|
||||||
|
|
||||||
|
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
|
||||||
|
return res
|
||||||
|
|
||||||
def _retrieve_chunks(self, res_list: list, get_title, get_url, get_content, get_score=None):
|
def _retrieve_chunks(self, res_list: list, get_title, get_url, get_content, get_score=None):
|
||||||
chunks = []
|
chunks = []
|
||||||
aggs = []
|
aggs = []
|
||||||
@ -164,8 +203,8 @@ class ToolBase(ComponentBase):
|
|||||||
"count": 1,
|
"count": 1,
|
||||||
"url": url
|
"url": url
|
||||||
})
|
})
|
||||||
self._canvas.add_refernce(chunks, aggs)
|
self._canvas.add_reference(chunks, aggs)
|
||||||
self.set_output("formalized_content", "\n".join(kb_prompt({"chunks": chunks, "doc_aggs": aggs}, 200000, True)))
|
self.set_output("formalized_content", "\n".join(kb_prompt({"chunks": chunks, "doc_aggs": aggs}, 200000, True)))
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return self._canvas.get_component_name(self._id) + " is running..."
|
return self._canvas.get_component_name(self._id) + " is running..."
|
||||||
|
|||||||
@ -13,16 +13,20 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import ast
|
||||||
import base64
|
import base64
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from enum import StrEnum
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, field_validator
|
from pydantic import BaseModel, Field, field_validator
|
||||||
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
from strenum import StrEnum
|
||||||
from api import settings
|
|
||||||
from api.utils.api_utils import timeout
|
from agent.tools.base import ToolBase, ToolMeta, ToolParamBase
|
||||||
|
from common import settings
|
||||||
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class Language(StrEnum):
|
class Language(StrEnum):
|
||||||
@ -62,16 +66,24 @@ class CodeExecParam(ToolParamBase):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.meta:ToolMeta = {
|
self.meta: ToolMeta = {
|
||||||
"name": "execute_code",
|
"name": "execute_code",
|
||||||
"description": """
|
"description": """
|
||||||
This tool has a sandbox that can execute code written in 'Python'/'Javascript'. It recieves a piece of code and return a Json string.
|
This tool has a sandbox that can execute code written in 'Python'/'Javascript'. It receives a piece of code and return a Json string.
|
||||||
Here's a code example for Python(`main` function MUST be included):
|
Here's a code example for Python(`main` function MUST be included):
|
||||||
def main(arg1: str, arg2: str) -> dict:
|
def main() -> dict:
|
||||||
|
\"\"\"
|
||||||
|
Generate Fibonacci numbers within 100.
|
||||||
|
\"\"\"
|
||||||
|
def fibonacci_recursive(n):
|
||||||
|
if n <= 1:
|
||||||
|
return n
|
||||||
|
else:
|
||||||
|
return fibonacci_recursive(n-1) + fibonacci_recursive(n-2)
|
||||||
return {
|
return {
|
||||||
"result": arg1 + arg2,
|
"result": fibonacci_recursive(100),
|
||||||
}
|
}
|
||||||
|
|
||||||
Here's a code example for Javascript(`main` function MUST be included and exported):
|
Here's a code example for Javascript(`main` function MUST be included and exported):
|
||||||
const axios = require('axios');
|
const axios = require('axios');
|
||||||
async function main(args) {
|
async function main(args) {
|
||||||
@ -91,16 +103,12 @@ module.exports = { main };
|
|||||||
"enum": ["python", "javascript"],
|
"enum": ["python", "javascript"],
|
||||||
"required": True,
|
"required": True,
|
||||||
},
|
},
|
||||||
"script": {
|
"script": {"type": "string", "description": "A piece of code in right format. There MUST be main function.", "required": True},
|
||||||
"type": "string",
|
},
|
||||||
"description": "A piece of code in right format. There MUST be main function.",
|
|
||||||
"required": True
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.lang = Language.PYTHON.value
|
self.lang = Language.PYTHON.value
|
||||||
self.script = "def main(arg1: str, arg2: str) -> dict: return {\"result\": arg1 + arg2}"
|
self.script = 'def main(arg1: str, arg2: str) -> dict: return {"result": arg1 + arg2}'
|
||||||
self.arguments = {}
|
self.arguments = {}
|
||||||
self.outputs = {"result": {"value": "", "type": "string"}}
|
self.outputs = {"result": {"value": "", "type": "string"}}
|
||||||
|
|
||||||
@ -111,18 +119,18 @@ module.exports = { main };
|
|||||||
def get_input_form(self) -> dict[str, dict]:
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
res = {}
|
res = {}
|
||||||
for k, v in self.arguments.items():
|
for k, v in self.arguments.items():
|
||||||
res[k] = {
|
res[k] = {"type": "line", "name": k}
|
||||||
"type": "line",
|
|
||||||
"name": k
|
|
||||||
}
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
class CodeExec(ToolBase, ABC):
|
class CodeExec(ToolBase, ABC):
|
||||||
component_name = "CodeExec"
|
component_name = "CodeExec"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10 * 60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("CodeExec processing"):
|
||||||
|
return
|
||||||
|
|
||||||
lang = kwargs.get("lang", self._param.lang)
|
lang = kwargs.get("lang", self._param.lang)
|
||||||
script = kwargs.get("script", self._param.script)
|
script = kwargs.get("script", self._param.script)
|
||||||
arguments = {}
|
arguments = {}
|
||||||
@ -132,24 +140,33 @@ class CodeExec(ToolBase, ABC):
|
|||||||
continue
|
continue
|
||||||
arguments[k] = self._canvas.get_variable_value(v) if v else None
|
arguments[k] = self._canvas.get_variable_value(v) if v else None
|
||||||
|
|
||||||
self._execute_code(
|
self._execute_code(language=lang, code=script, arguments=arguments)
|
||||||
language=lang,
|
|
||||||
code=script,
|
|
||||||
arguments=arguments
|
|
||||||
)
|
|
||||||
|
|
||||||
def _execute_code(self, language: str, code: str, arguments: dict):
|
def _execute_code(self, language: str, code: str, arguments: dict):
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
if self.check_if_canceled("CodeExec execution"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
code_b64 = self._encode_code(code)
|
code_b64 = self._encode_code(code)
|
||||||
code_req = CodeExecutionRequest(code_b64=code_b64, language=language, arguments=arguments).model_dump()
|
code_req = CodeExecutionRequest(code_b64=code_b64, language=language, arguments=arguments).model_dump()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("CodeExec execution"):
|
||||||
|
return
|
||||||
|
|
||||||
self.set_output("_ERROR", "construct code request error: " + str(e))
|
self.set_output("_ERROR", "construct code request error: " + str(e))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resp = requests.post(url=f"http://{settings.SANDBOX_HOST}:9385/run", json=code_req, timeout=10)
|
if self.check_if_canceled("CodeExec execution"):
|
||||||
logging.info(f"http://{settings.SANDBOX_HOST}:9385/run", code_req, resp.status_code)
|
return "Task has been canceled"
|
||||||
|
|
||||||
|
resp = requests.post(url=f"http://{settings.SANDBOX_HOST}:9385/run", json=code_req, timeout=int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10 * 60)))
|
||||||
|
logging.info(f"http://{settings.SANDBOX_HOST}:9385/run, code_req: {code_req}, resp.status_code {resp.status_code}:")
|
||||||
|
|
||||||
|
if self.check_if_canceled("CodeExec execution"):
|
||||||
|
return "Task has been canceled"
|
||||||
|
|
||||||
if resp.status_code != 200:
|
if resp.status_code != 200:
|
||||||
resp.raise_for_status()
|
resp.raise_for_status()
|
||||||
body = resp.json()
|
body = resp.json()
|
||||||
@ -158,30 +175,17 @@ class CodeExec(ToolBase, ABC):
|
|||||||
if stderr:
|
if stderr:
|
||||||
self.set_output("_ERROR", stderr)
|
self.set_output("_ERROR", stderr)
|
||||||
return
|
return
|
||||||
try:
|
raw_stdout = body.get("stdout", "")
|
||||||
rt = eval(body.get("stdout", ""))
|
parsed_stdout = self._deserialize_stdout(raw_stdout)
|
||||||
except Exception:
|
logging.info(f"[CodeExec]: http://{settings.SANDBOX_HOST}:9385/run -> {parsed_stdout}")
|
||||||
rt = body.get("stdout", "")
|
self._populate_outputs(parsed_stdout, raw_stdout)
|
||||||
logging.info(f"http://{settings.SANDBOX_HOST}:9385/run -> {rt}")
|
|
||||||
if isinstance(rt, tuple):
|
|
||||||
for i, (k, o) in enumerate(self._param.outputs.items()):
|
|
||||||
if k.find("_") == 0:
|
|
||||||
continue
|
|
||||||
o["value"] = rt[i]
|
|
||||||
elif isinstance(rt, dict):
|
|
||||||
for i, (k, o) in enumerate(self._param.outputs.items()):
|
|
||||||
if k not in rt or k.find("_") == 0:
|
|
||||||
continue
|
|
||||||
o["value"] = rt[k]
|
|
||||||
else:
|
|
||||||
for i, (k, o) in enumerate(self._param.outputs.items()):
|
|
||||||
if k.find("_") == 0:
|
|
||||||
continue
|
|
||||||
o["value"] = rt
|
|
||||||
else:
|
else:
|
||||||
self.set_output("_ERROR", "There is no response from sandbox")
|
self.set_output("_ERROR", "There is no response from sandbox")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("CodeExec execution"):
|
||||||
|
return
|
||||||
|
|
||||||
self.set_output("_ERROR", "Exception executing code: " + str(e))
|
self.set_output("_ERROR", "Exception executing code: " + str(e))
|
||||||
|
|
||||||
return self.output()
|
return self.output()
|
||||||
@ -191,3 +195,149 @@ class CodeExec(ToolBase, ABC):
|
|||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Running a short script to process data."
|
return "Running a short script to process data."
|
||||||
|
|
||||||
|
def _deserialize_stdout(self, stdout: str):
|
||||||
|
text = str(stdout).strip()
|
||||||
|
if not text:
|
||||||
|
return ""
|
||||||
|
for loader in (json.loads, ast.literal_eval):
|
||||||
|
try:
|
||||||
|
return loader(text)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
return text
|
||||||
|
|
||||||
|
def _coerce_output_value(self, value, expected_type: Optional[str]):
|
||||||
|
if expected_type is None:
|
||||||
|
return value
|
||||||
|
|
||||||
|
etype = expected_type.strip().lower()
|
||||||
|
inner_type = None
|
||||||
|
if etype.startswith("array<") and etype.endswith(">"):
|
||||||
|
inner_type = etype[6:-1].strip()
|
||||||
|
etype = "array"
|
||||||
|
|
||||||
|
try:
|
||||||
|
if etype == "string":
|
||||||
|
return "" if value is None else str(value)
|
||||||
|
|
||||||
|
if etype == "number":
|
||||||
|
if value is None or value == "":
|
||||||
|
return None
|
||||||
|
if isinstance(value, (int, float)):
|
||||||
|
return value
|
||||||
|
if isinstance(value, str):
|
||||||
|
try:
|
||||||
|
return float(value)
|
||||||
|
except Exception:
|
||||||
|
return value
|
||||||
|
return float(value)
|
||||||
|
|
||||||
|
if etype == "boolean":
|
||||||
|
if isinstance(value, bool):
|
||||||
|
return value
|
||||||
|
if isinstance(value, str):
|
||||||
|
lv = value.lower()
|
||||||
|
if lv in ("true", "1", "yes", "y", "on"):
|
||||||
|
return True
|
||||||
|
if lv in ("false", "0", "no", "n", "off"):
|
||||||
|
return False
|
||||||
|
return bool(value)
|
||||||
|
|
||||||
|
if etype == "array":
|
||||||
|
candidate = value
|
||||||
|
if isinstance(candidate, str):
|
||||||
|
parsed = self._deserialize_stdout(candidate)
|
||||||
|
candidate = parsed
|
||||||
|
if isinstance(candidate, tuple):
|
||||||
|
candidate = list(candidate)
|
||||||
|
if not isinstance(candidate, list):
|
||||||
|
candidate = [] if candidate is None else [candidate]
|
||||||
|
|
||||||
|
if inner_type == "string":
|
||||||
|
return ["" if v is None else str(v) for v in candidate]
|
||||||
|
if inner_type == "number":
|
||||||
|
coerced = []
|
||||||
|
for v in candidate:
|
||||||
|
try:
|
||||||
|
if v is None or v == "":
|
||||||
|
coerced.append(None)
|
||||||
|
elif isinstance(v, (int, float)):
|
||||||
|
coerced.append(v)
|
||||||
|
else:
|
||||||
|
coerced.append(float(v))
|
||||||
|
except Exception:
|
||||||
|
coerced.append(v)
|
||||||
|
return coerced
|
||||||
|
return candidate
|
||||||
|
|
||||||
|
if etype == "object":
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return value
|
||||||
|
if isinstance(value, str):
|
||||||
|
parsed = self._deserialize_stdout(value)
|
||||||
|
if isinstance(parsed, dict):
|
||||||
|
return parsed
|
||||||
|
return value
|
||||||
|
except Exception:
|
||||||
|
return value
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def _populate_outputs(self, parsed_stdout, raw_stdout: str):
|
||||||
|
outputs_items = list(self._param.outputs.items())
|
||||||
|
logging.info(f"[CodeExec]: outputs schema keys: {[k for k, _ in outputs_items]}")
|
||||||
|
if not outputs_items:
|
||||||
|
return
|
||||||
|
|
||||||
|
if isinstance(parsed_stdout, dict):
|
||||||
|
for key, meta in outputs_items:
|
||||||
|
if key.startswith("_"):
|
||||||
|
continue
|
||||||
|
val = self._get_by_path(parsed_stdout, key)
|
||||||
|
coerced = self._coerce_output_value(val, meta.get("type"))
|
||||||
|
logging.info(f"[CodeExec]: populate dict key='{key}' raw='{val}' coerced='{coerced}'")
|
||||||
|
self.set_output(key, coerced)
|
||||||
|
return
|
||||||
|
|
||||||
|
if isinstance(parsed_stdout, (list, tuple)):
|
||||||
|
for idx, (key, meta) in enumerate(outputs_items):
|
||||||
|
if key.startswith("_"):
|
||||||
|
continue
|
||||||
|
val = parsed_stdout[idx] if idx < len(parsed_stdout) else None
|
||||||
|
coerced = self._coerce_output_value(val, meta.get("type"))
|
||||||
|
logging.info(f"[CodeExec]: populate list key='{key}' raw='{val}' coerced='{coerced}'")
|
||||||
|
self.set_output(key, coerced)
|
||||||
|
return
|
||||||
|
|
||||||
|
default_val = parsed_stdout if parsed_stdout is not None else raw_stdout
|
||||||
|
for idx, (key, meta) in enumerate(outputs_items):
|
||||||
|
if key.startswith("_"):
|
||||||
|
continue
|
||||||
|
val = default_val if idx == 0 else None
|
||||||
|
coerced = self._coerce_output_value(val, meta.get("type"))
|
||||||
|
logging.info(f"[CodeExec]: populate scalar key='{key}' raw='{val}' coerced='{coerced}'")
|
||||||
|
self.set_output(key, coerced)
|
||||||
|
|
||||||
|
def _get_by_path(self, data, path: str):
|
||||||
|
if not path:
|
||||||
|
return None
|
||||||
|
cur = data
|
||||||
|
for part in path.split("."):
|
||||||
|
part = part.strip()
|
||||||
|
if not part:
|
||||||
|
return None
|
||||||
|
if isinstance(cur, dict):
|
||||||
|
cur = cur.get(part)
|
||||||
|
elif isinstance(cur, list):
|
||||||
|
try:
|
||||||
|
idx = int(part)
|
||||||
|
cur = cur[idx]
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
if cur is None:
|
||||||
|
return None
|
||||||
|
logging.info(f"[CodeExec]: resolve path '{path}' -> {cur}")
|
||||||
|
return cur
|
||||||
|
|||||||
@ -16,9 +16,8 @@
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
import asyncio
|
import asyncio
|
||||||
from crawl4ai import AsyncWebCrawler
|
from crawl4ai import AsyncWebCrawler
|
||||||
|
|
||||||
from agent.tools.base import ToolParamBase, ToolBase
|
from agent.tools.base import ToolParamBase, ToolBase
|
||||||
from api.utils.web_utils import is_valid_url
|
|
||||||
|
|
||||||
|
|
||||||
class CrawlerParam(ToolParamBase):
|
class CrawlerParam(ToolParamBase):
|
||||||
@ -30,7 +29,7 @@ class CrawlerParam(ToolParamBase):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.proxy = None
|
self.proxy = None
|
||||||
self.extract_type = "markdown"
|
self.extract_type = "markdown"
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.extract_type, "Type of content from the crawler", ['html', 'markdown', 'content'])
|
self.check_valid_value(self.extract_type, "Type of content from the crawler", ['html', 'markdown', 'content'])
|
||||||
|
|
||||||
@ -39,6 +38,7 @@ class Crawler(ToolBase, ABC):
|
|||||||
component_name = "Crawler"
|
component_name = "Crawler"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
|
from api.utils.web_utils import is_valid_url
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
if not is_valid_url(ans):
|
if not is_valid_url(ans):
|
||||||
@ -47,22 +47,28 @@ class Crawler(ToolBase, ABC):
|
|||||||
result = asyncio.run(self.get_web(ans))
|
result = asyncio.run(self.get_web(ans))
|
||||||
|
|
||||||
return Crawler.be_output(result)
|
return Crawler.be_output(result)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return Crawler.be_output(f"An unexpected error occurred: {str(e)}")
|
return Crawler.be_output(f"An unexpected error occurred: {str(e)}")
|
||||||
|
|
||||||
async def get_web(self, url):
|
async def get_web(self, url):
|
||||||
|
if self.check_if_canceled("Crawler async operation"):
|
||||||
|
return
|
||||||
|
|
||||||
proxy = self._param.proxy if self._param.proxy else None
|
proxy = self._param.proxy if self._param.proxy else None
|
||||||
async with AsyncWebCrawler(verbose=True, proxy=proxy) as crawler:
|
async with AsyncWebCrawler(verbose=True, proxy=proxy) as crawler:
|
||||||
result = await crawler.arun(
|
result = await crawler.arun(
|
||||||
url=url,
|
url=url,
|
||||||
bypass_cache=True
|
bypass_cache=True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.check_if_canceled("Crawler async operation"):
|
||||||
|
return
|
||||||
|
|
||||||
if self._param.extract_type == 'html':
|
if self._param.extract_type == 'html':
|
||||||
return result.cleaned_html
|
return result.cleaned_html
|
||||||
elif self._param.extract_type == 'markdown':
|
elif self._param.extract_type == 'markdown':
|
||||||
return result.markdown
|
return result.markdown
|
||||||
elif self._param.extract_type == 'content':
|
elif self._param.extract_type == 'content':
|
||||||
result.extracted_content
|
return result.extracted_content
|
||||||
return result.markdown
|
return result.markdown
|
||||||
|
|||||||
@ -43,14 +43,19 @@ class DeepLParam(ComponentParamBase):
|
|||||||
|
|
||||||
|
|
||||||
class DeepL(ComponentBase, ABC):
|
class DeepL(ComponentBase, ABC):
|
||||||
component_name = "GitHub"
|
component_name = "DeepL"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
|
if self.check_if_canceled("DeepL processing"):
|
||||||
|
return
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not ans:
|
||||||
return DeepL.be_output("")
|
return DeepL.be_output("")
|
||||||
|
|
||||||
|
if self.check_if_canceled("DeepL processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
translator = deepl.Translator(self._param.auth_key)
|
translator = deepl.Translator(self._param.auth_key)
|
||||||
result = translator.translate_text(ans, source_lang=self._param.source_lang,
|
result = translator.translate_text(ans, source_lang=self._param.source_lang,
|
||||||
@ -58,4 +63,6 @@ class DeepL(ComponentBase, ABC):
|
|||||||
|
|
||||||
return DeepL.be_output(result.text)
|
return DeepL.be_output(result.text)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("DeepL processing"):
|
||||||
|
return
|
||||||
DeepL.be_output("**Error**:" + str(e))
|
DeepL.be_output("**Error**:" + str(e))
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import time
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
from duckduckgo_search import DDGS
|
from duckduckgo_search import DDGS
|
||||||
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class DuckDuckGoParam(ToolParamBase):
|
class DuckDuckGoParam(ToolParamBase):
|
||||||
@ -73,19 +73,32 @@ class DuckDuckGoParam(ToolParamBase):
|
|||||||
class DuckDuckGo(ToolBase, ABC):
|
class DuckDuckGo(ToolBase, ABC):
|
||||||
component_name = "DuckDuckGo"
|
component_name = "DuckDuckGo"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("DuckDuckGo processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("formalized_content", "")
|
self.set_output("formalized_content", "")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("DuckDuckGo processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if kwargs.get("topic", "general") == "general":
|
if kwargs.get("topic", "general") == "general":
|
||||||
with DDGS() as ddgs:
|
with DDGS() as ddgs:
|
||||||
|
if self.check_if_canceled("DuckDuckGo processing"):
|
||||||
|
return
|
||||||
|
|
||||||
# {'title': '', 'href': '', 'body': ''}
|
# {'title': '', 'href': '', 'body': ''}
|
||||||
duck_res = ddgs.text(kwargs["query"], max_results=self._param.top_n)
|
duck_res = ddgs.text(kwargs["query"], max_results=self._param.top_n)
|
||||||
|
|
||||||
|
if self.check_if_canceled("DuckDuckGo processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self._retrieve_chunks(duck_res,
|
self._retrieve_chunks(duck_res,
|
||||||
get_title=lambda r: r["title"],
|
get_title=lambda r: r["title"],
|
||||||
get_url=lambda r: r.get("href", r.get("url")),
|
get_url=lambda r: r.get("href", r.get("url")),
|
||||||
@ -94,8 +107,15 @@ class DuckDuckGo(ToolBase, ABC):
|
|||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
else:
|
else:
|
||||||
with DDGS() as ddgs:
|
with DDGS() as ddgs:
|
||||||
|
if self.check_if_canceled("DuckDuckGo processing"):
|
||||||
|
return
|
||||||
|
|
||||||
# {'date': '', 'title': '', 'body': '', 'url': '', 'image': '', 'source': ''}
|
# {'date': '', 'title': '', 'body': '', 'url': '', 'image': '', 'source': ''}
|
||||||
duck_res = ddgs.news(kwargs["query"], max_results=self._param.top_n)
|
duck_res = ddgs.news(kwargs["query"], max_results=self._param.top_n)
|
||||||
|
|
||||||
|
if self.check_if_canceled("DuckDuckGo processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self._retrieve_chunks(duck_res,
|
self._retrieve_chunks(duck_res,
|
||||||
get_title=lambda r: r["title"],
|
get_title=lambda r: r["title"],
|
||||||
get_url=lambda r: r.get("href", r.get("url")),
|
get_url=lambda r: r.get("href", r.get("url")),
|
||||||
@ -103,6 +123,9 @@ class DuckDuckGo(ToolBase, ABC):
|
|||||||
self.set_output("json", duck_res)
|
self.set_output("json", duck_res)
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("DuckDuckGo processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"DuckDuckGo error: {e}")
|
logging.exception(f"DuckDuckGo error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -115,6 +138,6 @@ class DuckDuckGo(ToolBase, ABC):
|
|||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return """
|
return """
|
||||||
Keywords: {}
|
Keywords: {}
|
||||||
Looking for the most relevant articles.
|
Looking for the most relevant articles.
|
||||||
""".format(self.get_input().get("query", "-_-!"))
|
""".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
@ -25,7 +25,7 @@ from email.header import Header
|
|||||||
from email.utils import formataddr
|
from email.utils import formataddr
|
||||||
|
|
||||||
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class EmailParam(ToolParamBase):
|
class EmailParam(ToolParamBase):
|
||||||
@ -98,22 +98,30 @@ class EmailParam(ToolParamBase):
|
|||||||
|
|
||||||
class Email(ToolBase, ABC):
|
class Email(ToolBase, ABC):
|
||||||
component_name = "Email"
|
component_name = "Email"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Email processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("to_email"):
|
if not kwargs.get("to_email"):
|
||||||
self.set_output("success", False)
|
self.set_output("success", False)
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("Email processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Parse JSON string passed from upstream
|
# Parse JSON string passed from upstream
|
||||||
email_data = kwargs
|
email_data = kwargs
|
||||||
|
|
||||||
# Validate required fields
|
# Validate required fields
|
||||||
if "to_email" not in email_data:
|
if "to_email" not in email_data:
|
||||||
return Email.be_output("Missing required field: to_email")
|
self.set_output("_ERROR", "Missing required field: to_email")
|
||||||
|
self.set_output("success", False)
|
||||||
|
return False
|
||||||
|
|
||||||
# Create email object
|
# Create email object
|
||||||
msg = MIMEMultipart('alternative')
|
msg = MIMEMultipart('alternative')
|
||||||
@ -133,6 +141,9 @@ class Email(ToolBase, ABC):
|
|||||||
# Connect to SMTP server and send
|
# Connect to SMTP server and send
|
||||||
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
||||||
|
|
||||||
|
if self.check_if_canceled("Email processing"):
|
||||||
|
return
|
||||||
|
|
||||||
context = smtplib.ssl.create_default_context()
|
context = smtplib.ssl.create_default_context()
|
||||||
with smtplib.SMTP(self._param.smtp_server, self._param.smtp_port) as server:
|
with smtplib.SMTP(self._param.smtp_server, self._param.smtp_port) as server:
|
||||||
server.ehlo()
|
server.ehlo()
|
||||||
@ -149,6 +160,10 @@ class Email(ToolBase, ABC):
|
|||||||
|
|
||||||
# Send email
|
# Send email
|
||||||
logging.info(f"Sending email to recipients: {recipients}")
|
logging.info(f"Sending email to recipients: {recipients}")
|
||||||
|
|
||||||
|
if self.check_if_canceled("Email processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
server.send_message(msg, self._param.email, recipients)
|
server.send_message(msg, self._param.email, recipients)
|
||||||
success = True
|
success = True
|
||||||
@ -212,4 +227,4 @@ class Email(ToolBase, ABC):
|
|||||||
To: {}
|
To: {}
|
||||||
Subject: {}
|
Subject: {}
|
||||||
Your email is on its way—sit tight!
|
Your email is on its way—sit tight!
|
||||||
""".format(inputs.get("to_email", "-_-!"), inputs.get("subject", "-_-!"))
|
""".format(inputs.get("to_email", "-_-!"), inputs.get("subject", "-_-!"))
|
||||||
|
|||||||
@ -13,14 +13,16 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pymysql
|
import pymysql
|
||||||
import psycopg2
|
import psycopg2
|
||||||
import pyodbc
|
import pyodbc
|
||||||
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class ExeSQLParam(ToolParamBase):
|
class ExeSQLParam(ToolParamBase):
|
||||||
@ -51,12 +53,13 @@ class ExeSQLParam(ToolParamBase):
|
|||||||
self.max_records = 1024
|
self.max_records = 1024
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb', 'mssql'])
|
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgres', 'mariadb', 'mssql', 'IBM DB2', 'trino'])
|
||||||
self.check_empty(self.database, "Database name")
|
self.check_empty(self.database, "Database name")
|
||||||
self.check_empty(self.username, "database username")
|
self.check_empty(self.username, "database username")
|
||||||
self.check_empty(self.host, "IP Address")
|
self.check_empty(self.host, "IP Address")
|
||||||
self.check_positive_integer(self.port, "IP Port")
|
self.check_positive_integer(self.port, "IP Port")
|
||||||
self.check_empty(self.password, "Database password")
|
if self.db_type != "trino":
|
||||||
|
self.check_empty(self.password, "Database password")
|
||||||
self.check_positive_integer(self.max_records, "Maximum number of records")
|
self.check_positive_integer(self.max_records, "Maximum number of records")
|
||||||
if self.database == "rag_flow":
|
if self.database == "rag_flow":
|
||||||
if self.host == "ragflow-mysql":
|
if self.host == "ragflow-mysql":
|
||||||
@ -76,17 +79,48 @@ class ExeSQLParam(ToolParamBase):
|
|||||||
class ExeSQL(ToolBase, ABC):
|
class ExeSQL(ToolBase, ABC):
|
||||||
component_name = "ExeSQL"
|
component_name = "ExeSQL"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("ExeSQL processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
def convert_decimals(obj):
|
||||||
|
from decimal import Decimal
|
||||||
|
if isinstance(obj, Decimal):
|
||||||
|
return float(obj) # 或 str(obj)
|
||||||
|
elif isinstance(obj, dict):
|
||||||
|
return {k: convert_decimals(v) for k, v in obj.items()}
|
||||||
|
elif isinstance(obj, list):
|
||||||
|
return [convert_decimals(item) for item in obj]
|
||||||
|
return obj
|
||||||
|
|
||||||
sql = kwargs.get("sql")
|
sql = kwargs.get("sql")
|
||||||
if not sql:
|
if not sql:
|
||||||
raise Exception("SQL for `ExeSQL` MUST not be empty.")
|
raise Exception("SQL for `ExeSQL` MUST not be empty.")
|
||||||
sqls = sql.split(";")
|
|
||||||
|
|
||||||
|
if self.check_if_canceled("ExeSQL processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
vars = self.get_input_elements_from_text(sql)
|
||||||
|
args = {}
|
||||||
|
for k, o in vars.items():
|
||||||
|
args[k] = o["value"]
|
||||||
|
if not isinstance(args[k], str):
|
||||||
|
try:
|
||||||
|
args[k] = json.dumps(args[k], ensure_ascii=False)
|
||||||
|
except Exception:
|
||||||
|
args[k] = str(args[k])
|
||||||
|
self.set_input_value(k, args[k])
|
||||||
|
sql = self.string_format(sql, args)
|
||||||
|
|
||||||
|
if self.check_if_canceled("ExeSQL processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
sqls = sql.split(";")
|
||||||
if self._param.db_type in ["mysql", "mariadb"]:
|
if self._param.db_type in ["mysql", "mariadb"]:
|
||||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||||
port=self._param.port, password=self._param.password)
|
port=self._param.port, password=self._param.password)
|
||||||
elif self._param.db_type == 'postgresql':
|
elif self._param.db_type == 'postgres':
|
||||||
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
|
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
|
||||||
port=self._param.port, password=self._param.password)
|
port=self._param.port, password=self._param.password)
|
||||||
elif self._param.db_type == 'mssql':
|
elif self._param.db_type == 'mssql':
|
||||||
@ -98,6 +132,101 @@ class ExeSQL(ToolBase, ABC):
|
|||||||
r'PWD=' + self._param.password
|
r'PWD=' + self._param.password
|
||||||
)
|
)
|
||||||
db = pyodbc.connect(conn_str)
|
db = pyodbc.connect(conn_str)
|
||||||
|
elif self._param.db_type == 'trino':
|
||||||
|
try:
|
||||||
|
import trino
|
||||||
|
from trino.auth import BasicAuthentication
|
||||||
|
except Exception:
|
||||||
|
raise Exception("Missing dependency 'trino'. Please install: pip install trino")
|
||||||
|
|
||||||
|
def _parse_catalog_schema(db: str):
|
||||||
|
if not db:
|
||||||
|
return None, None
|
||||||
|
if "." in db:
|
||||||
|
c, s = db.split(".", 1)
|
||||||
|
elif "/" in db:
|
||||||
|
c, s = db.split("/", 1)
|
||||||
|
else:
|
||||||
|
c, s = db, "default"
|
||||||
|
return c, s
|
||||||
|
|
||||||
|
catalog, schema = _parse_catalog_schema(self._param.database)
|
||||||
|
if not catalog:
|
||||||
|
raise Exception("For Trino, `database` must be 'catalog.schema' or at least 'catalog'.")
|
||||||
|
|
||||||
|
http_scheme = "https" if os.environ.get("TRINO_USE_TLS", "0") == "1" else "http"
|
||||||
|
auth = None
|
||||||
|
if http_scheme == "https" and self._param.password:
|
||||||
|
auth = BasicAuthentication(self._param.username, self._param.password)
|
||||||
|
|
||||||
|
try:
|
||||||
|
db = trino.dbapi.connect(
|
||||||
|
host=self._param.host,
|
||||||
|
port=int(self._param.port or 8080),
|
||||||
|
user=self._param.username or "ragflow",
|
||||||
|
catalog=catalog,
|
||||||
|
schema=schema or "default",
|
||||||
|
http_scheme=http_scheme,
|
||||||
|
auth=auth
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception("Database Connection Failed! \n" + str(e))
|
||||||
|
elif self._param.db_type == 'IBM DB2':
|
||||||
|
import ibm_db
|
||||||
|
conn_str = (
|
||||||
|
f"DATABASE={self._param.database};"
|
||||||
|
f"HOSTNAME={self._param.host};"
|
||||||
|
f"PORT={self._param.port};"
|
||||||
|
f"PROTOCOL=TCPIP;"
|
||||||
|
f"UID={self._param.username};"
|
||||||
|
f"PWD={self._param.password};"
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
conn = ibm_db.connect(conn_str, "", "")
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception("Database Connection Failed! \n" + str(e))
|
||||||
|
|
||||||
|
sql_res = []
|
||||||
|
formalized_content = []
|
||||||
|
for single_sql in sqls:
|
||||||
|
if self.check_if_canceled("ExeSQL processing"):
|
||||||
|
ibm_db.close(conn)
|
||||||
|
return
|
||||||
|
|
||||||
|
single_sql = single_sql.replace("```", "").strip()
|
||||||
|
if not single_sql:
|
||||||
|
continue
|
||||||
|
single_sql = re.sub(r"\[ID:[0-9]+\]", "", single_sql)
|
||||||
|
|
||||||
|
stmt = ibm_db.exec_immediate(conn, single_sql)
|
||||||
|
rows = []
|
||||||
|
row = ibm_db.fetch_assoc(stmt)
|
||||||
|
while row and len(rows) < self._param.max_records:
|
||||||
|
if self.check_if_canceled("ExeSQL processing"):
|
||||||
|
ibm_db.close(conn)
|
||||||
|
return
|
||||||
|
rows.append(row)
|
||||||
|
row = ibm_db.fetch_assoc(stmt)
|
||||||
|
|
||||||
|
if not rows:
|
||||||
|
sql_res.append({"content": "No record in the database!"})
|
||||||
|
continue
|
||||||
|
|
||||||
|
df = pd.DataFrame(rows)
|
||||||
|
for col in df.columns:
|
||||||
|
if pd.api.types.is_datetime64_any_dtype(df[col]):
|
||||||
|
df[col] = df[col].dt.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
df = df.where(pd.notnull(df), None)
|
||||||
|
|
||||||
|
sql_res.append(convert_decimals(df.to_dict(orient="records")))
|
||||||
|
formalized_content.append(df.to_markdown(index=False, floatfmt=".6f"))
|
||||||
|
|
||||||
|
ibm_db.close(conn)
|
||||||
|
|
||||||
|
self.set_output("json", sql_res)
|
||||||
|
self.set_output("formalized_content", "\n\n".join(formalized_content))
|
||||||
|
return self.output("formalized_content")
|
||||||
try:
|
try:
|
||||||
cursor = db.cursor()
|
cursor = db.cursor()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -106,10 +235,15 @@ class ExeSQL(ToolBase, ABC):
|
|||||||
sql_res = []
|
sql_res = []
|
||||||
formalized_content = []
|
formalized_content = []
|
||||||
for single_sql in sqls:
|
for single_sql in sqls:
|
||||||
|
if self.check_if_canceled("ExeSQL processing"):
|
||||||
|
cursor.close()
|
||||||
|
db.close()
|
||||||
|
return
|
||||||
|
|
||||||
single_sql = single_sql.replace('```','')
|
single_sql = single_sql.replace('```','')
|
||||||
if not single_sql:
|
if not single_sql:
|
||||||
continue
|
continue
|
||||||
|
single_sql = re.sub(r"\[ID:[0-9]+\]", "", single_sql)
|
||||||
cursor.execute(single_sql)
|
cursor.execute(single_sql)
|
||||||
if cursor.rowcount == 0:
|
if cursor.rowcount == 0:
|
||||||
sql_res.append({"content": "No record in the database!"})
|
sql_res.append({"content": "No record in the database!"})
|
||||||
@ -121,12 +255,21 @@ class ExeSQL(ToolBase, ABC):
|
|||||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.max_records)])
|
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.max_records)])
|
||||||
single_res.columns = [i[0] for i in cursor.description]
|
single_res.columns = [i[0] for i in cursor.description]
|
||||||
|
|
||||||
sql_res.append(single_res.to_dict(orient='records'))
|
for col in single_res.columns:
|
||||||
|
if pd.api.types.is_datetime64_any_dtype(single_res[col]):
|
||||||
|
single_res[col] = single_res[col].dt.strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
single_res = single_res.where(pd.notnull(single_res), None)
|
||||||
|
|
||||||
|
sql_res.append(convert_decimals(single_res.to_dict(orient='records')))
|
||||||
formalized_content.append(single_res.to_markdown(index=False, floatfmt=".6f"))
|
formalized_content.append(single_res.to_markdown(index=False, floatfmt=".6f"))
|
||||||
|
|
||||||
|
cursor.close()
|
||||||
|
db.close()
|
||||||
|
|
||||||
self.set_output("json", sql_res)
|
self.set_output("json", sql_res)
|
||||||
self.set_output("formalized_content", "\n\n".join(formalized_content))
|
self.set_output("formalized_content", "\n\n".join(formalized_content))
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Query sent—waiting for the data."
|
return "Query sent—waiting for the data."
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import time
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
import requests
|
import requests
|
||||||
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class GitHubParam(ToolParamBase):
|
class GitHubParam(ToolParamBase):
|
||||||
@ -57,19 +57,29 @@ class GitHubParam(ToolParamBase):
|
|||||||
class GitHub(ToolBase, ABC):
|
class GitHub(ToolBase, ABC):
|
||||||
component_name = "GitHub"
|
component_name = "GitHub"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("GitHub processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("formalized_content", "")
|
self.set_output("formalized_content", "")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("GitHub processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
url = 'https://api.github.com/search/repositories?q=' + kwargs["query"] + '&sort=stars&order=desc&per_page=' + str(
|
url = 'https://api.github.com/search/repositories?q=' + kwargs["query"] + '&sort=stars&order=desc&per_page=' + str(
|
||||||
self._param.top_n)
|
self._param.top_n)
|
||||||
headers = {"Content-Type": "application/vnd.github+json", "X-GitHub-Api-Version": '2022-11-28'}
|
headers = {"Content-Type": "application/vnd.github+json", "X-GitHub-Api-Version": '2022-11-28'}
|
||||||
response = requests.get(url=url, headers=headers).json()
|
response = requests.get(url=url, headers=headers).json()
|
||||||
|
|
||||||
|
if self.check_if_canceled("GitHub processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self._retrieve_chunks(response['items'],
|
self._retrieve_chunks(response['items'],
|
||||||
get_title=lambda r: r["name"],
|
get_title=lambda r: r["name"],
|
||||||
get_url=lambda r: r["html_url"],
|
get_url=lambda r: r["html_url"],
|
||||||
@ -77,6 +87,9 @@ class GitHub(ToolBase, ABC):
|
|||||||
self.set_output("json", response['items'])
|
self.set_output("json", response['items'])
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("GitHub processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"GitHub error: {e}")
|
logging.exception(f"GitHub error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -88,4 +101,4 @@ class GitHub(ToolBase, ABC):
|
|||||||
assert False, self.output()
|
assert False, self.output()
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Scanning GitHub repos related to `{}`.".format(self.get_input().get("query", "-_-!"))
|
return "Scanning GitHub repos related to `{}`.".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import time
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
from serpapi import GoogleSearch
|
from serpapi import GoogleSearch
|
||||||
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class GoogleParam(ToolParamBase):
|
class GoogleParam(ToolParamBase):
|
||||||
@ -116,8 +116,11 @@ class GoogleParam(ToolParamBase):
|
|||||||
class Google(ToolBase, ABC):
|
class Google(ToolBase, ABC):
|
||||||
component_name = "Google"
|
component_name = "Google"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Google processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("q"):
|
if not kwargs.get("q"):
|
||||||
self.set_output("formalized_content", "")
|
self.set_output("formalized_content", "")
|
||||||
return ""
|
return ""
|
||||||
@ -132,8 +135,15 @@ class Google(ToolBase, ABC):
|
|||||||
}
|
}
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("Google processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
search = GoogleSearch(params).get_dict()
|
search = GoogleSearch(params).get_dict()
|
||||||
|
|
||||||
|
if self.check_if_canceled("Google processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self._retrieve_chunks(search["organic_results"],
|
self._retrieve_chunks(search["organic_results"],
|
||||||
get_title=lambda r: r["title"],
|
get_title=lambda r: r["title"],
|
||||||
get_url=lambda r: r["link"],
|
get_url=lambda r: r["link"],
|
||||||
@ -142,6 +152,9 @@ class Google(ToolBase, ABC):
|
|||||||
self.set_output("json", search["organic_results"])
|
self.set_output("json", search["organic_results"])
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("Google processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"Google error: {e}")
|
logging.exception(f"Google error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -154,6 +167,6 @@ class Google(ToolBase, ABC):
|
|||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return """
|
return """
|
||||||
Keywords: {}
|
Keywords: {}
|
||||||
Looking for the most relevant articles.
|
Looking for the most relevant articles.
|
||||||
""".format(self.get_input().get("query", "-_-!"))
|
""".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import time
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
from scholarly import scholarly
|
from scholarly import scholarly
|
||||||
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class GoogleScholarParam(ToolParamBase):
|
class GoogleScholarParam(ToolParamBase):
|
||||||
@ -63,17 +63,27 @@ class GoogleScholarParam(ToolParamBase):
|
|||||||
class GoogleScholar(ToolBase, ABC):
|
class GoogleScholar(ToolBase, ABC):
|
||||||
component_name = "GoogleScholar"
|
component_name = "GoogleScholar"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("GoogleScholar processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("formalized_content", "")
|
self.set_output("formalized_content", "")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("GoogleScholar processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
scholar_client = scholarly.search_pubs(kwargs["query"], patents=self._param.patents, year_low=self._param.year_low,
|
scholar_client = scholarly.search_pubs(kwargs["query"], patents=self._param.patents, year_low=self._param.year_low,
|
||||||
year_high=self._param.year_high, sort_by=self._param.sort_by)
|
year_high=self._param.year_high, sort_by=self._param.sort_by)
|
||||||
|
|
||||||
|
if self.check_if_canceled("GoogleScholar processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self._retrieve_chunks(scholar_client,
|
self._retrieve_chunks(scholar_client,
|
||||||
get_title=lambda r: r['bib']['title'],
|
get_title=lambda r: r['bib']['title'],
|
||||||
get_url=lambda r: r["pub_url"],
|
get_url=lambda r: r["pub_url"],
|
||||||
@ -82,6 +92,9 @@ class GoogleScholar(ToolBase, ABC):
|
|||||||
self.set_output("json", list(scholar_client))
|
self.set_output("json", list(scholar_client))
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("GoogleScholar processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"GoogleScholar error: {e}")
|
logging.exception(f"GoogleScholar error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -93,4 +106,4 @@ class GoogleScholar(ToolBase, ABC):
|
|||||||
assert False, self.output()
|
assert False, self.output()
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Looking for scholarly papers on `{}`,” prioritising reputable sources.".format(self.get_input().get("query", "-_-!"))
|
return "Looking for scholarly papers on `{}`,” prioritising reputable sources.".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
@ -50,6 +50,9 @@ class Jin10(ComponentBase, ABC):
|
|||||||
component_name = "Jin10"
|
component_name = "Jin10"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
|
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not ans:
|
||||||
@ -58,6 +61,9 @@ class Jin10(ComponentBase, ABC):
|
|||||||
jin10_res = []
|
jin10_res = []
|
||||||
headers = {'secret-key': self._param.secret_key}
|
headers = {'secret-key': self._param.secret_key}
|
||||||
try:
|
try:
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if self._param.type == "flash":
|
if self._param.type == "flash":
|
||||||
params = {
|
params = {
|
||||||
'category': self._param.flash_type,
|
'category': self._param.flash_type,
|
||||||
@ -69,6 +75,8 @@ class Jin10(ComponentBase, ABC):
|
|||||||
headers=headers, data=json.dumps(params))
|
headers=headers, data=json.dumps(params))
|
||||||
response = response.json()
|
response = response.json()
|
||||||
for i in response['data']:
|
for i in response['data']:
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
jin10_res.append({"content": i['data']['content']})
|
jin10_res.append({"content": i['data']['content']})
|
||||||
if self._param.type == "calendar":
|
if self._param.type == "calendar":
|
||||||
params = {
|
params = {
|
||||||
@ -79,6 +87,8 @@ class Jin10(ComponentBase, ABC):
|
|||||||
headers=headers, data=json.dumps(params))
|
headers=headers, data=json.dumps(params))
|
||||||
|
|
||||||
response = response.json()
|
response = response.json()
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||||
if self._param.type == "symbols":
|
if self._param.type == "symbols":
|
||||||
params = {
|
params = {
|
||||||
@ -90,8 +100,12 @@ class Jin10(ComponentBase, ABC):
|
|||||||
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
url='https://open-data-api.jin10.com/data-api/' + self._param.symbols_datatype + '?type=' + self._param.symbols_type,
|
||||||
headers=headers, data=json.dumps(params))
|
headers=headers, data=json.dumps(params))
|
||||||
response = response.json()
|
response = response.json()
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
if self._param.symbols_datatype == "symbols":
|
if self._param.symbols_datatype == "symbols":
|
||||||
for i in response['data']:
|
for i in response['data']:
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
i['Commodity Code'] = i['c']
|
i['Commodity Code'] = i['c']
|
||||||
i['Stock Exchange'] = i['e']
|
i['Stock Exchange'] = i['e']
|
||||||
i['Commodity Name'] = i['n']
|
i['Commodity Name'] = i['n']
|
||||||
@ -99,6 +113,8 @@ class Jin10(ComponentBase, ABC):
|
|||||||
del i['c'], i['e'], i['n'], i['t']
|
del i['c'], i['e'], i['n'], i['t']
|
||||||
if self._param.symbols_datatype == "quotes":
|
if self._param.symbols_datatype == "quotes":
|
||||||
for i in response['data']:
|
for i in response['data']:
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
i['Selling Price'] = i['a']
|
i['Selling Price'] = i['a']
|
||||||
i['Buying Price'] = i['b']
|
i['Buying Price'] = i['b']
|
||||||
i['Commodity Code'] = i['c']
|
i['Commodity Code'] = i['c']
|
||||||
@ -120,8 +136,12 @@ class Jin10(ComponentBase, ABC):
|
|||||||
url='https://open-data-api.jin10.com/data-api/news',
|
url='https://open-data-api.jin10.com/data-api/news',
|
||||||
headers=headers, data=json.dumps(params))
|
headers=headers, data=json.dumps(params))
|
||||||
response = response.json()
|
response = response.json()
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
jin10_res.append({"content": pd.DataFrame(response['data']).to_markdown()})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("Jin10 processing"):
|
||||||
|
return
|
||||||
return Jin10.be_output("**ERROR**: " + str(e))
|
return Jin10.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
if not jin10_res:
|
if not jin10_res:
|
||||||
|
|||||||
@ -21,7 +21,7 @@ from Bio import Entrez
|
|||||||
import re
|
import re
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class PubMedParam(ToolParamBase):
|
class PubMedParam(ToolParamBase):
|
||||||
@ -33,7 +33,7 @@ class PubMedParam(ToolParamBase):
|
|||||||
self.meta:ToolMeta = {
|
self.meta:ToolMeta = {
|
||||||
"name": "pubmed_search",
|
"name": "pubmed_search",
|
||||||
"description": """
|
"description": """
|
||||||
PubMed is an openly accessible, free database which includes primarily the MEDLINE database of references and abstracts on life sciences and biomedical topics.
|
PubMed is an openly accessible, free database which includes primarily the MEDLINE database of references and abstracts on life sciences and biomedical topics.
|
||||||
In addition to MEDLINE, PubMed provides access to:
|
In addition to MEDLINE, PubMed provides access to:
|
||||||
- older references from the print version of Index Medicus, back to 1951 and earlier
|
- older references from the print version of Index Medicus, back to 1951 and earlier
|
||||||
- references to some journals before they were indexed in Index Medicus and MEDLINE, for instance Science, BMJ, and Annals of Surgery
|
- references to some journals before they were indexed in Index Medicus and MEDLINE, for instance Science, BMJ, and Annals of Surgery
|
||||||
@ -69,31 +69,42 @@ In addition to MEDLINE, PubMed provides access to:
|
|||||||
class PubMed(ToolBase, ABC):
|
class PubMed(ToolBase, ABC):
|
||||||
component_name = "PubMed"
|
component_name = "PubMed"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("PubMed processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("formalized_content", "")
|
self.set_output("formalized_content", "")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("PubMed processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
Entrez.email = self._param.email
|
Entrez.email = self._param.email
|
||||||
pubmedids = Entrez.read(Entrez.esearch(db='pubmed', retmax=self._param.top_n, term=kwargs["query"]))['IdList']
|
pubmedids = Entrez.read(Entrez.esearch(db='pubmed', retmax=self._param.top_n, term=kwargs["query"]))['IdList']
|
||||||
|
|
||||||
|
if self.check_if_canceled("PubMed processing"):
|
||||||
|
return
|
||||||
|
|
||||||
pubmedcnt = ET.fromstring(re.sub(r'<(/?)b>|<(/?)i>', '', Entrez.efetch(db='pubmed', id=",".join(pubmedids),
|
pubmedcnt = ET.fromstring(re.sub(r'<(/?)b>|<(/?)i>', '', Entrez.efetch(db='pubmed', id=",".join(pubmedids),
|
||||||
retmode="xml").read().decode("utf-8")))
|
retmode="xml").read().decode("utf-8")))
|
||||||
|
|
||||||
|
if self.check_if_canceled("PubMed processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self._retrieve_chunks(pubmedcnt.findall("PubmedArticle"),
|
self._retrieve_chunks(pubmedcnt.findall("PubmedArticle"),
|
||||||
get_title=lambda child: child.find("MedlineCitation").find("Article").find("ArticleTitle").text,
|
get_title=lambda child: child.find("MedlineCitation").find("Article").find("ArticleTitle").text,
|
||||||
get_url=lambda child: "https://pubmed.ncbi.nlm.nih.gov/" + child.find("MedlineCitation").find("PMID").text,
|
get_url=lambda child: "https://pubmed.ncbi.nlm.nih.gov/" + child.find("MedlineCitation").find("PMID").text,
|
||||||
get_content=lambda child: child.find("MedlineCitation") \
|
get_content=lambda child: self._format_pubmed_content(child),)
|
||||||
.find("Article") \
|
|
||||||
.find("Abstract") \
|
|
||||||
.find("AbstractText").text \
|
|
||||||
if child.find("MedlineCitation")\
|
|
||||||
.find("Article").find("Abstract") \
|
|
||||||
else "No abstract available")
|
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("PubMed processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"PubMed error: {e}")
|
logging.exception(f"PubMed error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -104,5 +115,50 @@ class PubMed(ToolBase, ABC):
|
|||||||
|
|
||||||
assert False, self.output()
|
assert False, self.output()
|
||||||
|
|
||||||
|
def _format_pubmed_content(self, child):
|
||||||
|
"""Extract structured reference info from PubMed XML"""
|
||||||
|
def safe_find(path):
|
||||||
|
node = child
|
||||||
|
for p in path.split("/"):
|
||||||
|
if node is None:
|
||||||
|
return None
|
||||||
|
node = node.find(p)
|
||||||
|
return node.text if node is not None and node.text else None
|
||||||
|
|
||||||
|
title = safe_find("MedlineCitation/Article/ArticleTitle") or "No title"
|
||||||
|
abstract = safe_find("MedlineCitation/Article/Abstract/AbstractText") or "No abstract available"
|
||||||
|
journal = safe_find("MedlineCitation/Article/Journal/Title") or "Unknown Journal"
|
||||||
|
volume = safe_find("MedlineCitation/Article/Journal/JournalIssue/Volume") or "-"
|
||||||
|
issue = safe_find("MedlineCitation/Article/Journal/JournalIssue/Issue") or "-"
|
||||||
|
pages = safe_find("MedlineCitation/Article/Pagination/MedlinePgn") or "-"
|
||||||
|
|
||||||
|
# Authors
|
||||||
|
authors = []
|
||||||
|
for author in child.findall(".//AuthorList/Author"):
|
||||||
|
lastname = safe_find("LastName") or ""
|
||||||
|
forename = safe_find("ForeName") or ""
|
||||||
|
fullname = f"{forename} {lastname}".strip()
|
||||||
|
if fullname:
|
||||||
|
authors.append(fullname)
|
||||||
|
authors_str = ", ".join(authors) if authors else "Unknown Authors"
|
||||||
|
|
||||||
|
# DOI
|
||||||
|
doi = None
|
||||||
|
for eid in child.findall(".//ArticleId"):
|
||||||
|
if eid.attrib.get("IdType") == "doi":
|
||||||
|
doi = eid.text
|
||||||
|
break
|
||||||
|
|
||||||
|
return (
|
||||||
|
f"Title: {title}\n"
|
||||||
|
f"Authors: {authors_str}\n"
|
||||||
|
f"Journal: {journal}\n"
|
||||||
|
f"Volume: {volume}\n"
|
||||||
|
f"Issue: {issue}\n"
|
||||||
|
f"Pages: {pages}\n"
|
||||||
|
f"DOI: {doi or '-'}\n"
|
||||||
|
f"Abstract: {abstract.strip()}"
|
||||||
|
)
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Looking for scholarly papers on `{}`,” prioritising reputable sources.".format(self.get_input().get("query", "-_-!"))
|
return "Looking for scholarly papers on `{}`,” prioritising reputable sources.".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
@ -58,12 +58,18 @@ class QWeather(ComponentBase, ABC):
|
|||||||
component_name = "QWeather"
|
component_name = "QWeather"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
|
if self.check_if_canceled("Qweather processing"):
|
||||||
|
return
|
||||||
|
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not ans:
|
||||||
return QWeather.be_output("")
|
return QWeather.be_output("")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if self.check_if_canceled("Qweather processing"):
|
||||||
|
return
|
||||||
|
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
url="https://geoapi.qweather.com/v2/city/lookup?location=" + ans + "&key=" + self._param.web_apikey).json()
|
url="https://geoapi.qweather.com/v2/city/lookup?location=" + ans + "&key=" + self._param.web_apikey).json()
|
||||||
if response["code"] == "200":
|
if response["code"] == "200":
|
||||||
@ -71,16 +77,23 @@ class QWeather(ComponentBase, ABC):
|
|||||||
else:
|
else:
|
||||||
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||||
|
|
||||||
|
if self.check_if_canceled("Qweather processing"):
|
||||||
|
return
|
||||||
|
|
||||||
base_url = "https://api.qweather.com/v7/" if self._param.user_type == 'paid' else "https://devapi.qweather.com/v7/"
|
base_url = "https://api.qweather.com/v7/" if self._param.user_type == 'paid' else "https://devapi.qweather.com/v7/"
|
||||||
|
|
||||||
if self._param.type == "weather":
|
if self._param.type == "weather":
|
||||||
url = base_url + "weather/" + self._param.time_period + "?location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
url = base_url + "weather/" + self._param.time_period + "?location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||||
response = requests.get(url=url).json()
|
response = requests.get(url=url).json()
|
||||||
|
if self.check_if_canceled("Qweather processing"):
|
||||||
|
return
|
||||||
if response["code"] == "200":
|
if response["code"] == "200":
|
||||||
if self._param.time_period == "now":
|
if self._param.time_period == "now":
|
||||||
return QWeather.be_output(str(response["now"]))
|
return QWeather.be_output(str(response["now"]))
|
||||||
else:
|
else:
|
||||||
qweather_res = [{"content": str(i) + "\n"} for i in response["daily"]]
|
qweather_res = [{"content": str(i) + "\n"} for i in response["daily"]]
|
||||||
|
if self.check_if_canceled("Qweather processing"):
|
||||||
|
return
|
||||||
if not qweather_res:
|
if not qweather_res:
|
||||||
return QWeather.be_output("")
|
return QWeather.be_output("")
|
||||||
|
|
||||||
@ -92,6 +105,8 @@ class QWeather(ComponentBase, ABC):
|
|||||||
elif self._param.type == "indices":
|
elif self._param.type == "indices":
|
||||||
url = base_url + "indices/1d?type=0&location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
url = base_url + "indices/1d?type=0&location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||||
response = requests.get(url=url).json()
|
response = requests.get(url=url).json()
|
||||||
|
if self.check_if_canceled("Qweather processing"):
|
||||||
|
return
|
||||||
if response["code"] == "200":
|
if response["code"] == "200":
|
||||||
indices_res = response["daily"][0]["date"] + "\n" + "\n".join(
|
indices_res = response["daily"][0]["date"] + "\n" + "\n".join(
|
||||||
[i["name"] + ": " + i["category"] + ", " + i["text"] for i in response["daily"]])
|
[i["name"] + ": " + i["category"] + ", " + i["text"] for i in response["daily"]])
|
||||||
@ -103,9 +118,13 @@ class QWeather(ComponentBase, ABC):
|
|||||||
elif self._param.type == "airquality":
|
elif self._param.type == "airquality":
|
||||||
url = base_url + "air/now?location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
url = base_url + "air/now?location=" + location_id + "&key=" + self._param.web_apikey + "&lang=" + self._param.lang
|
||||||
response = requests.get(url=url).json()
|
response = requests.get(url=url).json()
|
||||||
|
if self.check_if_canceled("Qweather processing"):
|
||||||
|
return
|
||||||
if response["code"] == "200":
|
if response["code"] == "200":
|
||||||
return QWeather.be_output(str(response["now"]))
|
return QWeather.be_output(str(response["now"]))
|
||||||
else:
|
else:
|
||||||
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
return QWeather.be_output("**Error**" + self._param.error_code[response["code"]])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("Qweather processing"):
|
||||||
|
return
|
||||||
return QWeather.be_output("**Error**" + str(e))
|
return QWeather.be_output("**Error**" + str(e))
|
||||||
|
|||||||
@ -13,18 +13,21 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
from functools import partial
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from abc import ABC
|
from abc import ABC
|
||||||
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
||||||
from api.db import LLMType
|
from common.constants import LLMType
|
||||||
|
from api.db.services.document_service import DocumentService
|
||||||
|
from api.db.services.dialog_service import meta_filter
|
||||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||||
from api.db.services.llm_service import LLMBundle
|
from api.db.services.llm_service import LLMBundle
|
||||||
from api import settings
|
from common import settings
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
from rag.app.tag import label_question
|
from rag.app.tag import label_question
|
||||||
from rag.prompts import kb_prompt
|
from rag.prompts.generator import cross_languages, kb_prompt, gen_meta_filter
|
||||||
from rag.prompts.prompts import cross_languages
|
|
||||||
|
|
||||||
|
|
||||||
class RetrievalParam(ToolParamBase):
|
class RetrievalParam(ToolParamBase):
|
||||||
@ -58,6 +61,8 @@ class RetrievalParam(ToolParamBase):
|
|||||||
self.empty_response = ""
|
self.empty_response = ""
|
||||||
self.use_kg = False
|
self.use_kg = False
|
||||||
self.cross_languages = []
|
self.cross_languages = []
|
||||||
|
self.toc_enhance = False
|
||||||
|
self.meta_data_filter={}
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||||
@ -75,10 +80,14 @@ class RetrievalParam(ToolParamBase):
|
|||||||
class Retrieval(ToolBase, ABC):
|
class Retrieval(ToolBase, ABC):
|
||||||
component_name = "Retrieval"
|
component_name = "Retrieval"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Retrieval processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("formalized_content", self._param.empty_response)
|
self.set_output("formalized_content", self._param.empty_response)
|
||||||
|
return
|
||||||
|
|
||||||
kb_ids: list[str] = []
|
kb_ids: list[str] = []
|
||||||
for id in self._param.kb_ids:
|
for id in self._param.kb_ids:
|
||||||
@ -86,10 +95,16 @@ class Retrieval(ToolBase, ABC):
|
|||||||
kb_ids.append(id)
|
kb_ids.append(id)
|
||||||
continue
|
continue
|
||||||
kb_nm = self._canvas.get_variable_value(id)
|
kb_nm = self._canvas.get_variable_value(id)
|
||||||
e, kb = KnowledgebaseService.get_by_name(kb_nm, self._canvas._tenant_id)
|
# if kb_nm is a list
|
||||||
if not e:
|
kb_nm_list = kb_nm if isinstance(kb_nm, list) else [kb_nm]
|
||||||
raise Exception(f"Dataset({kb_nm}) does not exist.")
|
for nm_or_id in kb_nm_list:
|
||||||
kb_ids.append(kb.id)
|
e, kb = KnowledgebaseService.get_by_name(nm_or_id,
|
||||||
|
self._canvas._tenant_id)
|
||||||
|
if not e:
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(nm_or_id)
|
||||||
|
if not e:
|
||||||
|
raise Exception(f"Dataset({nm_or_id}) does not exist.")
|
||||||
|
kb_ids.append(kb.id)
|
||||||
|
|
||||||
filtered_kb_ids: list[str] = list(set([kb_id for kb_id in kb_ids if kb_id]))
|
filtered_kb_ids: list[str] = list(set([kb_id for kb_id in kb_ids if kb_id]))
|
||||||
|
|
||||||
@ -108,13 +123,58 @@ class Retrieval(ToolBase, ABC):
|
|||||||
if self._param.rerank_id:
|
if self._param.rerank_id:
|
||||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||||
|
|
||||||
query = kwargs["query"]
|
vars = self.get_input_elements_from_text(kwargs["query"])
|
||||||
|
vars = {k:o["value"] for k,o in vars.items()}
|
||||||
|
query = self.string_format(kwargs["query"], vars)
|
||||||
|
|
||||||
|
doc_ids=[]
|
||||||
|
if self._param.meta_data_filter!={}:
|
||||||
|
metas = DocumentService.get_meta_by_kbs(kb_ids)
|
||||||
|
if self._param.meta_data_filter.get("method") == "auto":
|
||||||
|
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT)
|
||||||
|
filters: dict = gen_meta_filter(chat_mdl, metas, query)
|
||||||
|
doc_ids.extend(meta_filter(metas, filters["conditions"], filters.get("logic", "and")))
|
||||||
|
if not doc_ids:
|
||||||
|
doc_ids = None
|
||||||
|
elif self._param.meta_data_filter.get("method") == "manual":
|
||||||
|
filters = self._param.meta_data_filter["manual"]
|
||||||
|
for flt in filters:
|
||||||
|
pat = re.compile(self.variable_ref_patt)
|
||||||
|
s = flt["value"]
|
||||||
|
out_parts = []
|
||||||
|
last = 0
|
||||||
|
|
||||||
|
for m in pat.finditer(s):
|
||||||
|
out_parts.append(s[last:m.start()])
|
||||||
|
key = m.group(1)
|
||||||
|
v = self._canvas.get_variable_value(key)
|
||||||
|
if v is None:
|
||||||
|
rep = ""
|
||||||
|
elif isinstance(v, partial):
|
||||||
|
buf = []
|
||||||
|
for chunk in v():
|
||||||
|
buf.append(chunk)
|
||||||
|
rep = "".join(buf)
|
||||||
|
elif isinstance(v, str):
|
||||||
|
rep = v
|
||||||
|
else:
|
||||||
|
rep = json.dumps(v, ensure_ascii=False)
|
||||||
|
|
||||||
|
out_parts.append(rep)
|
||||||
|
last = m.end()
|
||||||
|
|
||||||
|
out_parts.append(s[last:])
|
||||||
|
flt["value"] = "".join(out_parts)
|
||||||
|
doc_ids.extend(meta_filter(metas, filters, self._param.meta_data_filter.get("logic", "and")))
|
||||||
|
if filters and not doc_ids:
|
||||||
|
doc_ids = ["-999"]
|
||||||
|
|
||||||
if self._param.cross_languages:
|
if self._param.cross_languages:
|
||||||
query = cross_languages(kbs[0].tenant_id, None, query, self._param.cross_languages)
|
query = cross_languages(kbs[0].tenant_id, None, query, self._param.cross_languages)
|
||||||
|
|
||||||
if kbs:
|
if kbs:
|
||||||
query = re.sub(r"^user[::\s]*", "", query, flags=re.IGNORECASE)
|
query = re.sub(r"^user[::\s]*", "", query, flags=re.IGNORECASE)
|
||||||
kbinfos = settings.retrievaler.retrieval(
|
kbinfos = settings.retriever.retrieval(
|
||||||
query,
|
query,
|
||||||
embd_mdl,
|
embd_mdl,
|
||||||
[kb.tenant_id for kb in kbs],
|
[kb.tenant_id for kb in kbs],
|
||||||
@ -123,23 +183,39 @@ class Retrieval(ToolBase, ABC):
|
|||||||
self._param.top_n,
|
self._param.top_n,
|
||||||
self._param.similarity_threshold,
|
self._param.similarity_threshold,
|
||||||
1 - self._param.keywords_similarity_weight,
|
1 - self._param.keywords_similarity_weight,
|
||||||
|
doc_ids=doc_ids,
|
||||||
aggs=False,
|
aggs=False,
|
||||||
rerank_mdl=rerank_mdl,
|
rerank_mdl=rerank_mdl,
|
||||||
rank_feature=label_question(query, kbs),
|
rank_feature=label_question(query, kbs),
|
||||||
)
|
)
|
||||||
|
if self.check_if_canceled("Retrieval processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
if self._param.toc_enhance:
|
||||||
|
chat_mdl = LLMBundle(self._canvas._tenant_id, LLMType.CHAT)
|
||||||
|
cks = settings.retriever.retrieval_by_toc(query, kbinfos["chunks"], [kb.tenant_id for kb in kbs], chat_mdl, self._param.top_n)
|
||||||
|
if self.check_if_canceled("Retrieval processing"):
|
||||||
|
return
|
||||||
|
if cks:
|
||||||
|
kbinfos["chunks"] = cks
|
||||||
|
kbinfos["chunks"] = settings.retriever.retrieval_by_children(kbinfos["chunks"], [kb.tenant_id for kb in kbs])
|
||||||
if self._param.use_kg:
|
if self._param.use_kg:
|
||||||
ck = settings.kg_retrievaler.retrieval(query,
|
ck = settings.kg_retriever.retrieval(query,
|
||||||
[kb.tenant_id for kb in kbs],
|
[kb.tenant_id for kb in kbs],
|
||||||
kb_ids,
|
kb_ids,
|
||||||
embd_mdl,
|
embd_mdl,
|
||||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT))
|
LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT))
|
||||||
|
if self.check_if_canceled("Retrieval processing"):
|
||||||
|
return
|
||||||
if ck["content_with_weight"]:
|
if ck["content_with_weight"]:
|
||||||
kbinfos["chunks"].insert(0, ck)
|
kbinfos["chunks"].insert(0, ck)
|
||||||
else:
|
else:
|
||||||
kbinfos = {"chunks": [], "doc_aggs": []}
|
kbinfos = {"chunks": [], "doc_aggs": []}
|
||||||
|
|
||||||
if self._param.use_kg and kbs:
|
if self._param.use_kg and kbs:
|
||||||
ck = settings.kg_retrievaler.retrieval(query, [kb.tenant_id for kb in kbs], filtered_kb_ids, embd_mdl, LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
|
ck = settings.kg_retriever.retrieval(query, [kb.tenant_id for kb in kbs], filtered_kb_ids, embd_mdl, LLMBundle(kbs[0].tenant_id, LLMType.CHAT))
|
||||||
|
if self.check_if_canceled("Retrieval processing"):
|
||||||
|
return
|
||||||
if ck["content_with_weight"]:
|
if ck["content_with_weight"]:
|
||||||
ck["content"] = ck["content_with_weight"]
|
ck["content"] = ck["content_with_weight"]
|
||||||
del ck["content_with_weight"]
|
del ck["content_with_weight"]
|
||||||
@ -155,13 +231,20 @@ class Retrieval(ToolBase, ABC):
|
|||||||
self.set_output("formalized_content", self._param.empty_response)
|
self.set_output("formalized_content", self._param.empty_response)
|
||||||
return
|
return
|
||||||
|
|
||||||
self._canvas.add_refernce(kbinfos["chunks"], kbinfos["doc_aggs"])
|
# Format the chunks for JSON output (similar to how other tools do it)
|
||||||
|
json_output = kbinfos["chunks"].copy()
|
||||||
|
|
||||||
|
self._canvas.add_reference(kbinfos["chunks"], kbinfos["doc_aggs"])
|
||||||
form_cnt = "\n".join(kb_prompt(kbinfos, 200000, True))
|
form_cnt = "\n".join(kb_prompt(kbinfos, 200000, True))
|
||||||
|
|
||||||
|
# Set both formalized content and JSON output
|
||||||
self.set_output("formalized_content", form_cnt)
|
self.set_output("formalized_content", form_cnt)
|
||||||
|
self.set_output("json", json_output)
|
||||||
|
|
||||||
return form_cnt
|
return form_cnt
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return """
|
return """
|
||||||
Keywords: {}
|
Keywords: {}
|
||||||
Looking for the most relevant articles.
|
Looking for the most relevant articles.
|
||||||
""".format(self.get_input().get("query", "-_-!"))
|
""".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
169
agent/tools/searxng.py
Normal file
169
agent/tools/searxng.py
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from abc import ABC
|
||||||
|
import requests
|
||||||
|
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
||||||
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
|
class SearXNGParam(ToolParamBase):
|
||||||
|
"""
|
||||||
|
Define the SearXNG component parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.meta: ToolMeta = {
|
||||||
|
"name": "searxng_search",
|
||||||
|
"description": "SearXNG is a privacy-focused metasearch engine that aggregates results from multiple search engines without tracking users. It provides comprehensive web search capabilities.",
|
||||||
|
"parameters": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The search keywords to execute with SearXNG. The keywords should be the most important words/terms(includes synonyms) from the original request.",
|
||||||
|
"default": "{sys.query}",
|
||||||
|
"required": True
|
||||||
|
},
|
||||||
|
"searxng_url": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The base URL of your SearXNG instance (e.g., http://localhost:4000). This is required to connect to your SearXNG server.",
|
||||||
|
"required": False,
|
||||||
|
"default": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
super().__init__()
|
||||||
|
self.top_n = 10
|
||||||
|
self.searxng_url = ""
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
# Keep validation lenient so opening try-run panel won't fail without URL.
|
||||||
|
# Coerce top_n to int if it comes as string from UI.
|
||||||
|
try:
|
||||||
|
if isinstance(self.top_n, str):
|
||||||
|
self.top_n = int(self.top_n.strip())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
self.check_positive_integer(self.top_n, "Top N")
|
||||||
|
|
||||||
|
def get_input_form(self) -> dict[str, dict]:
|
||||||
|
return {
|
||||||
|
"query": {
|
||||||
|
"name": "Query",
|
||||||
|
"type": "line"
|
||||||
|
},
|
||||||
|
"searxng_url": {
|
||||||
|
"name": "SearXNG URL",
|
||||||
|
"type": "line",
|
||||||
|
"placeholder": "http://localhost:4000"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SearXNG(ToolBase, ABC):
|
||||||
|
component_name = "SearXNG"
|
||||||
|
|
||||||
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("SearXNG processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
# Gracefully handle try-run without inputs
|
||||||
|
query = kwargs.get("query")
|
||||||
|
if not query or not isinstance(query, str) or not query.strip():
|
||||||
|
self.set_output("formalized_content", "")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
searxng_url = (getattr(self._param, "searxng_url", "") or kwargs.get("searxng_url") or "").strip()
|
||||||
|
# In try-run, if no URL configured, just return empty instead of raising
|
||||||
|
if not searxng_url:
|
||||||
|
self.set_output("formalized_content", "")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
last_e = ""
|
||||||
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("SearXNG processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
search_params = {
|
||||||
|
'q': query,
|
||||||
|
'format': 'json',
|
||||||
|
'categories': 'general',
|
||||||
|
'language': 'auto',
|
||||||
|
'safesearch': 1,
|
||||||
|
'pageno': 1
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.get(
|
||||||
|
f"{searxng_url}/search",
|
||||||
|
params=search_params,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
if self.check_if_canceled("SearXNG processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
if not data or not isinstance(data, dict):
|
||||||
|
raise ValueError("Invalid response from SearXNG")
|
||||||
|
|
||||||
|
results = data.get("results", [])
|
||||||
|
if not isinstance(results, list):
|
||||||
|
raise ValueError("Invalid results format from SearXNG")
|
||||||
|
|
||||||
|
results = results[:self._param.top_n]
|
||||||
|
|
||||||
|
if self.check_if_canceled("SearXNG processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
self._retrieve_chunks(results,
|
||||||
|
get_title=lambda r: r.get("title", ""),
|
||||||
|
get_url=lambda r: r.get("url", ""),
|
||||||
|
get_content=lambda r: r.get("content", ""))
|
||||||
|
|
||||||
|
self.set_output("json", results)
|
||||||
|
return self.output("formalized_content")
|
||||||
|
|
||||||
|
except requests.RequestException as e:
|
||||||
|
if self.check_if_canceled("SearXNG processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
last_e = f"Network error: {e}"
|
||||||
|
logging.exception(f"SearXNG network error: {e}")
|
||||||
|
time.sleep(self._param.delay_after_error)
|
||||||
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("SearXNG processing"):
|
||||||
|
return
|
||||||
|
|
||||||
|
last_e = str(e)
|
||||||
|
logging.exception(f"SearXNG error: {e}")
|
||||||
|
time.sleep(self._param.delay_after_error)
|
||||||
|
|
||||||
|
if last_e:
|
||||||
|
self.set_output("_ERROR", last_e)
|
||||||
|
return f"SearXNG error: {last_e}"
|
||||||
|
|
||||||
|
assert False, self.output()
|
||||||
|
|
||||||
|
def thoughts(self) -> str:
|
||||||
|
return """
|
||||||
|
Keywords: {}
|
||||||
|
Searching with SearXNG for relevant results...
|
||||||
|
""".format(self.get_input().get("query", "-_-!"))
|
||||||
@ -19,7 +19,7 @@ import time
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
from tavily import TavilyClient
|
from tavily import TavilyClient
|
||||||
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
from agent.tools.base import ToolParamBase, ToolBase, ToolMeta
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class TavilySearchParam(ToolParamBase):
|
class TavilySearchParam(ToolParamBase):
|
||||||
@ -31,7 +31,7 @@ class TavilySearchParam(ToolParamBase):
|
|||||||
self.meta:ToolMeta = {
|
self.meta:ToolMeta = {
|
||||||
"name": "tavily_search",
|
"name": "tavily_search",
|
||||||
"description": """
|
"description": """
|
||||||
Tavily is a search engine optimized for LLMs, aimed at efficient, quick and persistent search results.
|
Tavily is a search engine optimized for LLMs, aimed at efficient, quick and persistent search results.
|
||||||
When searching:
|
When searching:
|
||||||
- Start with specific query which should focus on just a single aspect.
|
- Start with specific query which should focus on just a single aspect.
|
||||||
- Number of keywords in query should be less than 5.
|
- Number of keywords in query should be less than 5.
|
||||||
@ -101,8 +101,11 @@ When searching:
|
|||||||
class TavilySearch(ToolBase, ABC):
|
class TavilySearch(ToolBase, ABC):
|
||||||
component_name = "TavilySearch"
|
component_name = "TavilySearch"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("TavilySearch processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("formalized_content", "")
|
self.set_output("formalized_content", "")
|
||||||
return ""
|
return ""
|
||||||
@ -113,10 +116,16 @@ class TavilySearch(ToolBase, ABC):
|
|||||||
if fld not in kwargs:
|
if fld not in kwargs:
|
||||||
kwargs[fld] = getattr(self._param, fld)
|
kwargs[fld] = getattr(self._param, fld)
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("TavilySearch processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
kwargs["include_images"] = False
|
kwargs["include_images"] = False
|
||||||
kwargs["include_raw_content"] = False
|
kwargs["include_raw_content"] = False
|
||||||
res = self.tavily_client.search(**kwargs)
|
res = self.tavily_client.search(**kwargs)
|
||||||
|
if self.check_if_canceled("TavilySearch processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self._retrieve_chunks(res["results"],
|
self._retrieve_chunks(res["results"],
|
||||||
get_title=lambda r: r["title"],
|
get_title=lambda r: r["title"],
|
||||||
get_url=lambda r: r["url"],
|
get_url=lambda r: r["url"],
|
||||||
@ -125,6 +134,9 @@ class TavilySearch(ToolBase, ABC):
|
|||||||
self.set_output("json", res["results"])
|
self.set_output("json", res["results"])
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("TavilySearch processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"Tavily error: {e}")
|
logging.exception(f"Tavily error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -136,7 +148,7 @@ class TavilySearch(ToolBase, ABC):
|
|||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return """
|
return """
|
||||||
Keywords: {}
|
Keywords: {}
|
||||||
Looking for the most relevant articles.
|
Looking for the most relevant articles.
|
||||||
""".format(self.get_input().get("query", "-_-!"))
|
""".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|
||||||
@ -199,8 +211,11 @@ class TavilyExtractParam(ToolParamBase):
|
|||||||
class TavilyExtract(ToolBase, ABC):
|
class TavilyExtract(ToolBase, ABC):
|
||||||
component_name = "TavilyExtract"
|
component_name = "TavilyExtract"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("TavilyExtract processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self.tavily_client = TavilyClient(api_key=self._param.api_key)
|
self.tavily_client = TavilyClient(api_key=self._param.api_key)
|
||||||
last_e = None
|
last_e = None
|
||||||
for fld in ["urls", "extract_depth", "format"]:
|
for fld in ["urls", "extract_depth", "format"]:
|
||||||
@ -209,12 +224,21 @@ class TavilyExtract(ToolBase, ABC):
|
|||||||
if kwargs.get("urls") and isinstance(kwargs["urls"], str):
|
if kwargs.get("urls") and isinstance(kwargs["urls"], str):
|
||||||
kwargs["urls"] = kwargs["urls"].split(",")
|
kwargs["urls"] = kwargs["urls"].split(",")
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("TavilyExtract processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
kwargs["include_images"] = False
|
kwargs["include_images"] = False
|
||||||
res = self.tavily_client.extract(**kwargs)
|
res = self.tavily_client.extract(**kwargs)
|
||||||
|
if self.check_if_canceled("TavilyExtract processing"):
|
||||||
|
return
|
||||||
|
|
||||||
self.set_output("json", res["results"])
|
self.set_output("json", res["results"])
|
||||||
return self.output("json")
|
return self.output("json")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("TavilyExtract processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"Tavily error: {e}")
|
logging.exception(f"Tavily error: {e}")
|
||||||
if last_e:
|
if last_e:
|
||||||
@ -224,4 +248,4 @@ class TavilyExtract(ToolBase, ABC):
|
|||||||
assert False, self.output()
|
assert False, self.output()
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Opened {}—pulling out the main text…".format(self.get_input().get("urls", "-_-!"))
|
return "Opened {}—pulling out the main text…".format(self.get_input().get("urls", "-_-!"))
|
||||||
|
|||||||
@ -43,12 +43,18 @@ class TuShare(ComponentBase, ABC):
|
|||||||
component_name = "TuShare"
|
component_name = "TuShare"
|
||||||
|
|
||||||
def _run(self, history, **kwargs):
|
def _run(self, history, **kwargs):
|
||||||
|
if self.check_if_canceled("TuShare processing"):
|
||||||
|
return
|
||||||
|
|
||||||
ans = self.get_input()
|
ans = self.get_input()
|
||||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||||
if not ans:
|
if not ans:
|
||||||
return TuShare.be_output("")
|
return TuShare.be_output("")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if self.check_if_canceled("TuShare processing"):
|
||||||
|
return
|
||||||
|
|
||||||
tus_res = []
|
tus_res = []
|
||||||
params = {
|
params = {
|
||||||
"api_name": "news",
|
"api_name": "news",
|
||||||
@ -58,12 +64,18 @@ class TuShare(ComponentBase, ABC):
|
|||||||
}
|
}
|
||||||
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
response = requests.post(url="http://api.tushare.pro", data=json.dumps(params).encode('utf-8'))
|
||||||
response = response.json()
|
response = response.json()
|
||||||
|
if self.check_if_canceled("TuShare processing"):
|
||||||
|
return
|
||||||
if response['code'] != 0:
|
if response['code'] != 0:
|
||||||
return TuShare.be_output(response['msg'])
|
return TuShare.be_output(response['msg'])
|
||||||
df = pd.DataFrame(response['data']['items'])
|
df = pd.DataFrame(response['data']['items'])
|
||||||
df.columns = response['data']['fields']
|
df.columns = response['data']['fields']
|
||||||
|
if self.check_if_canceled("TuShare processing"):
|
||||||
|
return
|
||||||
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
tus_res.append({"content": (df[df['content'].str.contains(self._param.keyword, case=False)]).to_markdown()})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("TuShare processing"):
|
||||||
|
return
|
||||||
return TuShare.be_output("**ERROR**: " + str(e))
|
return TuShare.be_output("**ERROR**: " + str(e))
|
||||||
|
|
||||||
if not tus_res:
|
if not tus_res:
|
||||||
|
|||||||
@ -21,7 +21,7 @@ import pandas as pd
|
|||||||
import pywencai
|
import pywencai
|
||||||
|
|
||||||
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
from agent.tools.base import ToolParamBase, ToolMeta, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class WenCaiParam(ToolParamBase):
|
class WenCaiParam(ToolParamBase):
|
||||||
@ -68,21 +68,33 @@ fund selection platform: through AI technology, is committed to providing excell
|
|||||||
class WenCai(ToolBase, ABC):
|
class WenCai(ToolBase, ABC):
|
||||||
component_name = "WenCai"
|
component_name = "WenCai"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 12)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("WenCai processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("report", "")
|
self.set_output("report", "")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("WenCai processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
wencai_res = []
|
wencai_res = []
|
||||||
res = pywencai.get(query=kwargs["query"], query_type=self._param.query_type, perpage=self._param.top_n)
|
res = pywencai.get(query=kwargs["query"], query_type=self._param.query_type, perpage=self._param.top_n)
|
||||||
|
if self.check_if_canceled("WenCai processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if isinstance(res, pd.DataFrame):
|
if isinstance(res, pd.DataFrame):
|
||||||
wencai_res.append(res.to_markdown())
|
wencai_res.append(res.to_markdown())
|
||||||
elif isinstance(res, dict):
|
elif isinstance(res, dict):
|
||||||
for item in res.items():
|
for item in res.items():
|
||||||
|
if self.check_if_canceled("WenCai processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if isinstance(item[1], list):
|
if isinstance(item[1], list):
|
||||||
wencai_res.append(item[0] + "\n" + pd.DataFrame(item[1]).to_markdown())
|
wencai_res.append(item[0] + "\n" + pd.DataFrame(item[1]).to_markdown())
|
||||||
elif isinstance(item[1], str):
|
elif isinstance(item[1], str):
|
||||||
@ -100,6 +112,9 @@ class WenCai(ToolBase, ABC):
|
|||||||
self.set_output("report", "\n\n".join(wencai_res))
|
self.set_output("report", "\n\n".join(wencai_res))
|
||||||
return self.output("report")
|
return self.output("report")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("WenCai processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"WenCai error: {e}")
|
logging.exception(f"WenCai error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -111,4 +126,4 @@ class WenCai(ToolBase, ABC):
|
|||||||
assert False, self.output()
|
assert False, self.output()
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Pulling live financial data for `{}`.".format(self.get_input().get("query", "-_-!"))
|
return "Pulling live financial data for `{}`.".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
@ -19,7 +19,7 @@ import time
|
|||||||
from abc import ABC
|
from abc import ABC
|
||||||
import wikipedia
|
import wikipedia
|
||||||
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class WikipediaParam(ToolParamBase):
|
class WikipediaParam(ToolParamBase):
|
||||||
@ -64,19 +64,28 @@ class WikipediaParam(ToolParamBase):
|
|||||||
class Wikipedia(ToolBase, ABC):
|
class Wikipedia(ToolBase, ABC):
|
||||||
component_name = "Wikipedia"
|
component_name = "Wikipedia"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("Wikipedia processing"):
|
||||||
|
return
|
||||||
|
|
||||||
if not kwargs.get("query"):
|
if not kwargs.get("query"):
|
||||||
self.set_output("formalized_content", "")
|
self.set_output("formalized_content", "")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
|
if self.check_if_canceled("Wikipedia processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
wikipedia.set_lang(self._param.language)
|
wikipedia.set_lang(self._param.language)
|
||||||
wiki_engine = wikipedia
|
wiki_engine = wikipedia
|
||||||
pages = []
|
pages = []
|
||||||
for p in wiki_engine.search(kwargs["query"], results=self._param.top_n):
|
for p in wiki_engine.search(kwargs["query"], results=self._param.top_n):
|
||||||
|
if self.check_if_canceled("Wikipedia processing"):
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
pages.append(wikipedia.page(p))
|
pages.append(wikipedia.page(p))
|
||||||
except Exception:
|
except Exception:
|
||||||
@ -87,6 +96,9 @@ class Wikipedia(ToolBase, ABC):
|
|||||||
get_content=lambda r: r.summary)
|
get_content=lambda r: r.summary)
|
||||||
return self.output("formalized_content")
|
return self.output("formalized_content")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("Wikipedia processing"):
|
||||||
|
return
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"Wikipedia error: {e}")
|
logging.exception(f"Wikipedia error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -99,6 +111,6 @@ class Wikipedia(ToolBase, ABC):
|
|||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return """
|
return """
|
||||||
Keywords: {}
|
Keywords: {}
|
||||||
Looking for the most relevant articles.
|
Looking for the most relevant articles.
|
||||||
""".format(self.get_input().get("query", "-_-!"))
|
""".format(self.get_input().get("query", "-_-!"))
|
||||||
|
|||||||
@ -20,7 +20,7 @@ from abc import ABC
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import yfinance as yf
|
import yfinance as yf
|
||||||
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
from agent.tools.base import ToolMeta, ToolParamBase, ToolBase
|
||||||
from api.utils.api_utils import timeout
|
from common.connection_utils import timeout
|
||||||
|
|
||||||
|
|
||||||
class YahooFinanceParam(ToolParamBase):
|
class YahooFinanceParam(ToolParamBase):
|
||||||
@ -72,34 +72,46 @@ class YahooFinanceParam(ToolParamBase):
|
|||||||
class YahooFinance(ToolBase, ABC):
|
class YahooFinance(ToolBase, ABC):
|
||||||
component_name = "YahooFinance"
|
component_name = "YahooFinance"
|
||||||
|
|
||||||
@timeout(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60))
|
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 60)))
|
||||||
def _invoke(self, **kwargs):
|
def _invoke(self, **kwargs):
|
||||||
|
if self.check_if_canceled("YahooFinance processing"):
|
||||||
|
return None
|
||||||
|
|
||||||
if not kwargs.get("stock_code"):
|
if not kwargs.get("stock_code"):
|
||||||
self.set_output("report", "")
|
self.set_output("report", "")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
last_e = ""
|
last_e = ""
|
||||||
for _ in range(self._param.max_retries+1):
|
for _ in range(self._param.max_retries+1):
|
||||||
yohoo_res = []
|
if self.check_if_canceled("YahooFinance processing"):
|
||||||
|
return None
|
||||||
|
|
||||||
|
yahoo_res = []
|
||||||
try:
|
try:
|
||||||
msft = yf.Ticker(kwargs["stock_code"])
|
msft = yf.Ticker(kwargs["stock_code"])
|
||||||
|
if self.check_if_canceled("YahooFinance processing"):
|
||||||
|
return None
|
||||||
|
|
||||||
if self._param.info:
|
if self._param.info:
|
||||||
yohoo_res.append("# Information:\n" + pd.Series(msft.info).to_markdown() + "\n")
|
yahoo_res.append("# Information:\n" + pd.Series(msft.info).to_markdown() + "\n")
|
||||||
if self._param.history:
|
if self._param.history:
|
||||||
yohoo_res.append("# History:\n" + msft.history().to_markdown() + "\n")
|
yahoo_res.append("# History:\n" + msft.history().to_markdown() + "\n")
|
||||||
if self._param.financials:
|
if self._param.financials:
|
||||||
yohoo_res.append("# Calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n")
|
yahoo_res.append("# Calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n")
|
||||||
if self._param.balance_sheet:
|
if self._param.balance_sheet:
|
||||||
yohoo_res.append("# Balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n")
|
yahoo_res.append("# Balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n")
|
||||||
yohoo_res.append("# Quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n")
|
yahoo_res.append("# Quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n")
|
||||||
if self._param.cash_flow_statement:
|
if self._param.cash_flow_statement:
|
||||||
yohoo_res.append("# Cash flow statement:\n" + msft.cashflow.to_markdown() + "\n")
|
yahoo_res.append("# Cash flow statement:\n" + msft.cashflow.to_markdown() + "\n")
|
||||||
yohoo_res.append("# Quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n")
|
yahoo_res.append("# Quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n")
|
||||||
if self._param.news:
|
if self._param.news:
|
||||||
yohoo_res.append("# News:\n" + pd.DataFrame(msft.news).to_markdown() + "\n")
|
yahoo_res.append("# News:\n" + pd.DataFrame(msft.news).to_markdown() + "\n")
|
||||||
self.set_output("report", "\n\n".join(yohoo_res))
|
self.set_output("report", "\n\n".join(yahoo_res))
|
||||||
return self.output("report")
|
return self.output("report")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if self.check_if_canceled("YahooFinance processing"):
|
||||||
|
return None
|
||||||
|
|
||||||
last_e = e
|
last_e = e
|
||||||
logging.exception(f"YahooFinance error: {e}")
|
logging.exception(f"YahooFinance error: {e}")
|
||||||
time.sleep(self._param.delay_after_error)
|
time.sleep(self._param.delay_after_error)
|
||||||
@ -111,4 +123,4 @@ class YahooFinance(ToolBase, ABC):
|
|||||||
assert False, self.output()
|
assert False, self.output()
|
||||||
|
|
||||||
def thoughts(self) -> str:
|
def thoughts(self) -> str:
|
||||||
return "Pulling live financial data for `{}`.".format(self.get_input().get("stock_code", "-_-!"))
|
return "Pulling live financial data for `{}`.".format(self.get_input().get("stock_code", "-_-!"))
|
||||||
|
|||||||
@ -14,5 +14,5 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from beartype.claw import beartype_this_package
|
# from beartype.claw import beartype_this_package
|
||||||
beartype_this_package()
|
# beartype_this_package()
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user