mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Compare commits
1821 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| de24e74b4c | |||
| 83e80e3d7f | |||
| ea73f13ebf | |||
| af6eabad0e | |||
| 5fb5a51b2e | |||
| 37004ecfb3 | |||
| 6d333ec4bc | |||
| ac188b0486 | |||
| adeb9d87e2 | |||
| d121033208 | |||
| 494f84cd69 | |||
| f24d464a53 | |||
| 484c536f2e | |||
| f7112acd97 | |||
| de4f75dcd8 | |||
| 15fff5724e | |||
| d616354d66 | |||
| 1bad24e3ab | |||
| 4910146149 | |||
| 0e549e96ee | |||
| 318cb7d792 | |||
| 4d1255b231 | |||
| b30f0be858 | |||
| a82e9b3d91 | |||
| 02a452993e | |||
| 307cdc62ea | |||
| 2d491188b8 | |||
| acc0f7396e | |||
| 9a4cd81891 | |||
| 1694f32e8e | |||
| 41fade3fe6 | |||
| 8d333f3590 | |||
| cd77425b87 | |||
| 544c9990e3 | |||
| 41a647fe32 | |||
| 594bf485d4 | |||
| 863c3e3d9c | |||
| 1767039be3 | |||
| cd75fa02b1 | |||
| cfdd37820a | |||
| 9d12380806 | |||
| 866098634b | |||
| 8013505daf | |||
| deb81810e9 | |||
| 6ab96287c9 | |||
| aaa4776657 | |||
| 5b2e5dd334 | |||
| de46b0d46e | |||
| cc703da747 | |||
| d956a442ce | |||
| 5fc59a3132 | |||
| 1d955507e9 | |||
| cf09c2260a | |||
| c9b18cbe18 | |||
| 8123942ec1 | |||
| 685114d253 | |||
| c9e56d20cf | |||
| 8ee0b6ea54 | |||
| f50b2461cb | |||
| 617faee718 | |||
| b15643bd80 | |||
| f12290f04b | |||
| 15838a6673 | |||
| 39ad9490ac | |||
| 387baf858f | |||
| 2dba858c84 | |||
| 43ea312144 | |||
| ce05696d95 | |||
| 0f62bfda21 | |||
| 70ffe2b4e8 | |||
| e76db6e222 | |||
| 7b664b5a84 | |||
| 8a41057236 | |||
| 447041d265 | |||
| f0375c4acd | |||
| 8af769de41 | |||
| f808bc32ba | |||
| e8cb1d8fc4 | |||
| 4e86ee4ff9 | |||
| c99034f717 | |||
| 86b254d214 | |||
| 1c38f4cefb | |||
| 74c195cd36 | |||
| e48bec1cbf | |||
| 205a5eb9f5 | |||
| 8844826208 | |||
| 8fe4281d81 | |||
| fb1bedbd3c | |||
| 6e55b9146c | |||
| 071ea9c493 | |||
| 5037a28e4d | |||
| fdac4afd10 | |||
| 769d701f56 | |||
| 8b512cdadf | |||
| 3ae126836a | |||
| e8bfda6020 | |||
| 34c54cd459 | |||
| 3d873d98fb | |||
| fbe25b5add | |||
| 0c6c7c8fe7 | |||
| e266f9a66f | |||
| fde6e5ab39 | |||
| 67529825e2 | |||
| 738a7d5c24 | |||
| 83ec915d51 | |||
| e535099f36 | |||
| 16b5feadb7 | |||
| 960f47c4d4 | |||
| 51139de178 | |||
| 1f5167f1ca | |||
| 578ea34b3e | |||
| 5fb3d2f55c | |||
| d99d1e3518 | |||
| 5b387b68ba | |||
| f92a45dcc4 | |||
| c4b8e4845c | |||
| 87659dcd3a | |||
| 6fd9508017 | |||
| 113851a692 | |||
| 66c69d10fe | |||
| 781d49cd0e | |||
| aaae938f54 | |||
| 9e73f799b2 | |||
| 21a62130c8 | |||
| 68e47c81d4 | |||
| f11d8af936 | |||
| 74ec734d69 | |||
| 8c75803b70 | |||
| ff4239c7cf | |||
| cf5867b146 | |||
| 77481ab3ab | |||
| 9c53b3336a | |||
| 24481f0332 | |||
| 4e6b84bb41 | |||
| 65c3f0406c | |||
| 7fb8b30cc2 | |||
| acca3640f7 | |||
| 58836d84fe | |||
| ad56137a59 | |||
| 2828e321bc | |||
| 932781ea4e | |||
| 5200711441 | |||
| c21cea2038 | |||
| 6a0f448419 | |||
| 7d2f65671f | |||
| a0d5f81098 | |||
| 52f26f4643 | |||
| 313e92dd9b | |||
| fee757eb41 | |||
| b5ddc7ca05 | |||
| 534fa60b2a | |||
| 390b2b8f26 | |||
| 0283e4098f | |||
| 2cdba3d1e6 | |||
| eb0b37d7ee | |||
| 198e52e990 | |||
| a50ccf77f9 | |||
| deaf15a08b | |||
| 0d8791936e | |||
| 5d167cd772 | |||
| f35c5ed119 | |||
| fc46d6bb87 | |||
| 8252b1c5c0 | |||
| c802a6ffdd | |||
| 9b06734ced | |||
| 6ab4c1a6e9 | |||
| f631073ac2 | |||
| 8aabc2807c | |||
| d931c33ced | |||
| f4324e89d9 | |||
| f04c9e2937 | |||
| 1fc2889f98 | |||
| ee0c38da66 | |||
| c1806e1ab2 | |||
| 66d0d44a00 | |||
| 2078d88c28 | |||
| 7734ad7fcd | |||
| 1a47e136e3 | |||
| cbf04ee470 | |||
| ef0aecea3b | |||
| dfc5fa1f4d | |||
| f341dc03b8 | |||
| 4585edc20e | |||
| dba9158f9a | |||
| 82f572ff95 | |||
| 518a00630e | |||
| aa61ae24dc | |||
| fb950079ef | |||
| aec8c15e7e | |||
| 7c620bdc69 | |||
| e7dde69584 | |||
| d6eded1959 | |||
| 80f851922a | |||
| 17757930a3 | |||
| a8883905a7 | |||
| 8426cbbd02 | |||
| 0b759f559c | |||
| 2d5d10ecbf | |||
| 954bd5a1c2 | |||
| ccb1c269e8 | |||
| 6dfb0c245c | |||
| 72d1047a8f | |||
| bece37e6c8 | |||
| 59cb0eb8bc | |||
| fc56217eb3 | |||
| 723cf9443e | |||
| bd94b5dfb5 | |||
| ef59c5bab9 | |||
| 62b7c655c5 | |||
| b0b866c8fd | |||
| 3a831d0c28 | |||
| 9e323a9351 | |||
| 7ac95b759b | |||
| daea357940 | |||
| 4aa1abd8e5 | |||
| 922b5c652d | |||
| aaa97874c6 | |||
| 193d93d820 | |||
| 4058715df7 | |||
| 3f595029d7 | |||
| e8f5a4da56 | |||
| a9472e3652 | |||
| 4dd48b60f3 | |||
| e4ab8ba2de | |||
| a1f848bfe0 | |||
| f2309ff93e | |||
| 38be53cf31 | |||
| 65a06d62d8 | |||
| 10cbbb76f8 | |||
| 1c84d1b562 | |||
| 4eb7659499 | |||
| 46a61e5aff | |||
| da82566304 | |||
| c8b79dfed4 | |||
| da80fa40bc | |||
| 94dbd4aac9 | |||
| ca9f30e1a1 | |||
| 2e4295d5ca | |||
| d11b1628a1 | |||
| 45f9f428db | |||
| 902703d145 | |||
| 7ccca2143c | |||
| 70ce02faf4 | |||
| 3f1741c8c6 | |||
| 6c24ad7966 | |||
| 4846589599 | |||
| a24547aa66 | |||
| a04c5247ab | |||
| ed6a76dcc0 | |||
| a0ccbec8bd | |||
| 4693c5382a | |||
| ff3b4d0dcd | |||
| 62d35b1b73 | |||
| 91b609447d | |||
| c353840244 | |||
| f12b9fdcd4 | |||
| 80ede65bbe | |||
| 52cf186028 | |||
| ea0f1d47a5 | |||
| 9fe7c92217 | |||
| d353f7f7f8 | |||
| f3738b06f1 | |||
| 5a8bc88147 | |||
| 04ef5b2783 | |||
| c9ea22ef69 | |||
| 152111fd9d | |||
| 86f6da2f74 | |||
| 8c00cbc87a | |||
| 41e808f4e6 | |||
| bc0281040b | |||
| 341a7b1473 | |||
| c29c395390 | |||
| a23a0f230c | |||
| 2a88ce6be1 | |||
| 664b781d62 | |||
| 65571e5254 | |||
| aa30f20730 | |||
| b9b278d441 | |||
| e1d86cfee3 | |||
| 8ebd07337f | |||
| dd584d57b0 | |||
| 3d39b96c6f | |||
| 179091b1a4 | |||
| d14d92a900 | |||
| 1936ad82d2 | |||
| 8a09f07186 | |||
| df8d31451b | |||
| fc95d113c3 | |||
| 7d14455fbe | |||
| bbe6ed3b90 | |||
| 127af4e45c | |||
| 41cdba19ba | |||
| 0d9c1f1c3c | |||
| e650f0d368 | |||
| 067b4fc012 | |||
| 38ff2ffc01 | |||
| a9cc992d13 | |||
| 5cf2c97908 | |||
| 81fede0041 | |||
| 07a83f93d5 | |||
| 1a904edd94 | |||
| 906969fe4e | |||
| 776ea078a6 | |||
| fcdde26a7f | |||
| 79076ffb5f | |||
| e8dcdfb9f0 | |||
| c4f43a395d | |||
| a255c78b59 | |||
| 936f27e9e5 | |||
| 2616f651c9 | |||
| e8018fde83 | |||
| f514482c0a | |||
| e9ee9269f5 | |||
| cf18231713 | |||
| f48aed6d4a | |||
| b524cf0ec8 | |||
| 994517495f | |||
| 63781bde3f | |||
| 91d6fb8061 | |||
| 45f52e85d7 | |||
| 9aa8cfb73a | |||
| 79ca25ec7e | |||
| 6ff7cfe005 | |||
| 4e16936fa4 | |||
| 677c99b090 | |||
| 8e30a75e5c | |||
| b14052e5a2 | |||
| ddaed541ff | |||
| 1ee9c0b8d9 | |||
| 9b724b3b5e | |||
| 3b1ee769eb | |||
| 41cb94324a | |||
| 982ec24fa7 | |||
| 1f7a035340 | |||
| d04ae3f943 | |||
| abd19b0f48 | |||
| aa1251af9a | |||
| 483f3aa71d | |||
| 72bb79e8dd | |||
| 927a195008 | |||
| d13dc0c24d | |||
| 37ac7576f1 | |||
| c832e0b858 | |||
| 5d015e48c1 | |||
| b58e882eaa | |||
| 1bc33009c7 | |||
| cb731dce34 | |||
| 1595cdc48f | |||
| 4179ecd469 | |||
| cb14dafaca | |||
| c2567844ea | |||
| 757c5376be | |||
| 79968c37a8 | |||
| 2e00d8d3d4 | |||
| 0b456a18a3 | |||
| dd8e660f0a | |||
| 98ee3dee74 | |||
| d4b0cd8599 | |||
| 3398dac906 | |||
| 7eb25e0de6 | |||
| bed77ee28f | |||
| 56cd576876 | |||
| 4fbad2828c | |||
| e997bf6507 | |||
| 209b731541 | |||
| c47a38773c | |||
| fcd18d7d87 | |||
| fe9adbf0a5 | |||
| c7f7adf029 | |||
| c27172b3bc | |||
| a246949b77 | |||
| 0a954d720a | |||
| f89e55ec42 | |||
| 5fe8cf6018 | |||
| 4720849ac0 | |||
| d7721833e7 | |||
| 7332f1d0f3 | |||
| 2d101561f8 | |||
| 59590e9aae | |||
| bb9b9b8357 | |||
| a4b368e53f | |||
| c461261f0b | |||
| a1633e0a2f | |||
| 369add35b8 | |||
| 5abd0bbac1 | |||
| 2d89863fdd | |||
| 6cb3e08381 | |||
| 986b9cbb1a | |||
| 9c456adffd | |||
| c15b138839 | |||
| ff11348f7c | |||
| cbdabbb58f | |||
| cf0011be67 | |||
| 1f47001c82 | |||
| a914535344 | |||
| ba1063c2b9 | |||
| 2b4bca4447 | |||
| 11cf6ae313 | |||
| 88db5d90d1 | |||
| 209ef09dc3 | |||
| 370c8bc25b | |||
| e90a959b4d | |||
| ca320a8c30 | |||
| ae505e6165 | |||
| 63b5c2292d | |||
| 8d8a5f73b6 | |||
| d0fa66f4d5 | |||
| 9dd22e141b | |||
| b6c1ca828e | |||
| d367c7e226 | |||
| a3aa3f0d36 | |||
| 7b8752fe24 | |||
| 5e2c33e5b0 | |||
| e40be8e541 | |||
| 23d0b564d3 | |||
| ecaa9de843 | |||
| 2f74727bb9 | |||
| adbb038a87 | |||
| 3947da10ae | |||
| 4862be28ad | |||
| 035e8ed0f7 | |||
| cc167ae619 | |||
| f8847e7bcd | |||
| 3baebd709b | |||
| 3e6a4b2628 | |||
| 312635cb13 | |||
| 756d454122 | |||
| a4cab371fa | |||
| 0d7e52338e | |||
| 4110f7f5ce | |||
| 0af57ff772 | |||
| 0bd58038a8 | |||
| 0cbcfcfedf | |||
| fbdde0259a | |||
| d482173c9b | |||
| 929dc97509 | |||
| 30005c0203 | |||
| 382458ace7 | |||
| 4080f6a54a | |||
| 09570c7eef | |||
| 312f1a0477 | |||
| 1ca226e43b | |||
| 830cda6a3a | |||
| c66dbbe433 | |||
| 3b218b2dc0 | |||
| d58ef6127f | |||
| 55173c7201 | |||
| f860bdf0ad | |||
| 997627861a | |||
| 9f9d32d2cd | |||
| d55f44601a | |||
| abb6359547 | |||
| f55ff590d7 | |||
| 7d3bb3a2f9 | |||
| e6cb74b27f | |||
| 00f54c207e | |||
| d0dc56166c | |||
| e15e39f183 | |||
| 33f3e05b75 | |||
| b8bfbac2e5 | |||
| d5729e598f | |||
| f2c5ad170d | |||
| 0aa3c4cdae | |||
| f123587538 | |||
| a41a646909 | |||
| 787e0c6786 | |||
| 05ee1be1e9 | |||
| a0d630365c | |||
| b5b8032a56 | |||
| ccb9f0b0d7 | |||
| a0ab619aeb | |||
| 32349481ef | |||
| 2b9ed935f3 | |||
| 188c0f614b | |||
| dad97869b6 | |||
| 57c8a37285 | |||
| 9d0fed601d | |||
| fe32952825 | |||
| 5808aef28c | |||
| ca720bd811 | |||
| ba11312766 | |||
| c8bbf7452d | |||
| b08650bc4c | |||
| fb77f9917b | |||
| d874683ae4 | |||
| f9e5caa8ed | |||
| 99df0766fe | |||
| 3b50688228 | |||
| ffc095bd50 | |||
| 799c57287c | |||
| eef43fa25c | |||
| 5a4dfecfbe | |||
| 7f237fee16 | |||
| 30ae78755b | |||
| 2114e966d8 | |||
| 562349eb02 | |||
| 618d6bc924 | |||
| 762aa4b8c4 | |||
| 9cd09488ca | |||
| f2806a8332 | |||
| b6e34e3aa7 | |||
| 3ee9653170 | |||
| 6d1078b538 | |||
| 6e862553cb | |||
| b1baa91ff0 | |||
| b55c3d07dc | |||
| 2b3318cd3d | |||
| 434b55be70 | |||
| 98b4c67292 | |||
| 3d645ff31a | |||
| 5e8cd693a5 | |||
| 29f297b850 | |||
| 7235638607 | |||
| 00919fd599 | |||
| 43c0792ffd | |||
| 4b1b68c5fc | |||
| 3492f54c7a | |||
| da5cef0686 | |||
| 9098efb8aa | |||
| 421657f64b | |||
| 7ee5e0d152 | |||
| 22915223d4 | |||
| d7b4e84cda | |||
| e845d5f9f8 | |||
| 3d18284dd6 | |||
| 96783aa82c | |||
| a0c2da1219 | |||
| 79e2edc835 | |||
| 57b87fa9d9 | |||
| 153e430b00 | |||
| 3ccaa06031 | |||
| 569ab011c4 | |||
| 96b1538b3e | |||
| 735570486f | |||
| da68f541b6 | |||
| 83771e500c | |||
| a6d2119498 | |||
| 57b9f8cf52 | |||
| 5c3577c4c9 | |||
| 76118000c1 | |||
| 9433f64fe2 | |||
| d7c9611d45 | |||
| 79399f7f25 | |||
| 23522f1ea8 | |||
| 46dc3f1c48 | |||
| c9b156fa6d | |||
| 83939b1a63 | |||
| 7f08ba47d7 | |||
| ce3dd019c3 | |||
| 476c56868d | |||
| b9c4954c2f | |||
| a060672b31 | |||
| f022504ef9 | |||
| 1a78b8b295 | |||
| 017dd85ccf | |||
| 4c7b2ef46e | |||
| 597d88bf9a | |||
| 9b026fc5b6 | |||
| 90eb5fd31b | |||
| b9eeb8e64f | |||
| 4c99988c3e | |||
| 4f2e9ef248 | |||
| 4a3871090d | |||
| 7ce64cb265 | |||
| d102a6bb71 | |||
| a02ca16260 | |||
| cd3bb0ed7c | |||
| 86fb710e52 | |||
| 7713e14d6a | |||
| 392f5f4ce9 | |||
| 79481becea | |||
| 58a64000ea | |||
| 1bd64dafcb | |||
| 07354f4a1a | |||
| d628234942 | |||
| 5749aa30b0 | |||
| a2e1f5618d | |||
| dc48c3863d | |||
| 23062cb27a | |||
| 63c2f5b821 | |||
| 0a0bfc02a0 | |||
| f0c34d4454 | |||
| 7c719f8365 | |||
| 4fc9e42e74 | |||
| 35539092d0 | |||
| 581a54fbbb | |||
| 9ca86d801e | |||
| fb0426419e | |||
| 1409bb30df | |||
| 7efeaf6548 | |||
| 46a35f44da | |||
| a7eba61067 | |||
| 465f7e036a | |||
| 7a27d5e463 | |||
| 6a0d6d2565 | |||
| f359f2c44e | |||
| 9295c23170 | |||
| 023b090fa4 | |||
| 2124329e95 | |||
| ed9757b0c7 | |||
| f235a38225 | |||
| 550e65bb22 | |||
| a264c629b5 | |||
| e6bad45c6d | |||
| 0a303d9ae1 | |||
| 98a83543e8 | |||
| afd3a508e5 | |||
| 1deb0a2d42 | |||
| dd055deee9 | |||
| a249803961 | |||
| 6ec3f18e22 | |||
| 7724acbadb | |||
| a36ba95c1c | |||
| 30ccc4a66c | |||
| dda5a0080a | |||
| 9db999ccae | |||
| 5f5c6a7990 | |||
| 53618d13bb | |||
| 60d652d2e1 | |||
| 448bdda73d | |||
| 26b85a10d1 | |||
| cae11201ef | |||
| 6ad8b54754 | |||
| 83aca2d07b | |||
| 34f829e1b1 | |||
| 52a349349d | |||
| 45bf294117 | |||
| 667c5812d0 | |||
| 30e9212db9 | |||
| e9cbf4611d | |||
| d4b1d163dd | |||
| fca94509e8 | |||
| 47ba683728 | |||
| a16cd4f110 | |||
| c5823a33a3 | |||
| 85e07e06a2 | |||
| 95534f5cf2 | |||
| b89ddd07f0 | |||
| 21ddcd3c39 | |||
| 01bf799a59 | |||
| 8fd12b670e | |||
| 6591031bad | |||
| b26088ab70 | |||
| 2a79f4fc7f | |||
| 4b98119c52 | |||
| ac53ef6216 | |||
| 5ccdb95008 | |||
| cdac51f145 | |||
| 0aa160a990 | |||
| 89c2067a16 | |||
| 26042343d8 | |||
| 3f6177b5e5 | |||
| 0d7a83f05f | |||
| aeaeb169e4 | |||
| 46ded9d329 | |||
| 20b4d88098 | |||
| 0327fd848e | |||
| e9c5c7bc7c | |||
| 2bf4ed6512 | |||
| 6a170b2f6e | |||
| d9fe279dde | |||
| 07e37560fc | |||
| db6d4307f2 | |||
| 840abd5239 | |||
| ffff5c2e8c | |||
| 391c5586dd | |||
| b638d3f773 | |||
| 30356b0b79 | |||
| 523e61ae18 | |||
| 021e8b57ae | |||
| e26f37351d | |||
| 5c761174c2 | |||
| 4f8e7ef763 | |||
| 39ef2ffba9 | |||
| ba563f8095 | |||
| cfc339e4f3 | |||
| 98c78073c7 | |||
| b9d3846bb4 | |||
| ec51508f3e | |||
| b6745e50c6 | |||
| f7164f686b | |||
| 7e7619bdc0 | |||
| 342a04ec8a | |||
| 28f7b33a74 | |||
| 5e7aaf2c41 | |||
| 35b1a5b7e0 | |||
| 381f9df941 | |||
| cc0227cf6e | |||
| 905dab22a6 | |||
| 86b4da0844 | |||
| 0fccd1fef3 | |||
| ad77f504f9 | |||
| c63d12b936 | |||
| 5cc570f5e0 | |||
| b5ffca332a | |||
| 53b0b0e583 | |||
| 6aaad85cc6 | |||
| 03daf4618c | |||
| bcaac061ac | |||
| 49f3f26622 | |||
| 6529bb433b | |||
| 5c6e586251 | |||
| 88910449e4 | |||
| 2ae8f2cf00 | |||
| ae856b8faa | |||
| b47dcc9108 | |||
| 3db819f011 | |||
| 34c35cf8ae | |||
| dc95bd6a7c | |||
| 93c94fda7b | |||
| 3eca9d3a5f | |||
| 03e39ca9be | |||
| ad177951e9 | |||
| a2f73af1a4 | |||
| 7ebc1f0943 | |||
| 5f0ec005ba | |||
| 8345e92671 | |||
| 03165a1efa | |||
| b4b6d296ea | |||
| d16505691c | |||
| 509a7fa4dc | |||
| ec21d9a98f | |||
| 0d7244e4a4 | |||
| 175f5eaa90 | |||
| 935ce872d8 | |||
| 0020c50000 | |||
| c3b8d8b4ba | |||
| f63ad6b725 | |||
| e992bc5307 | |||
| 131fc10af5 | |||
| af00f2cad8 | |||
| 95b9208b13 | |||
| 45b01e1bcb | |||
| 6691532079 | |||
| dbc267758e | |||
| b8891fdbeb | |||
| 933e075f8b | |||
| 0b487dee43 | |||
| 7eb5ea3814 | |||
| c783d90ba3 | |||
| e101c35c0b | |||
| 46caf6ae72 | |||
| fca9203f18 | |||
| ab53a73768 | |||
| 77deaf390b | |||
| 24b719ddba | |||
| e2f10fbd3e | |||
| 92cfbcb382 | |||
| 3722fec921 | |||
| daa1113513 | |||
| 412a088008 | |||
| 9767c26535 | |||
| 71efd8d765 | |||
| ecdb1701df | |||
| 606bf20a3f | |||
| 96fe9c0acf | |||
| 729e6098f9 | |||
| a422367a40 | |||
| fa1d6ed683 | |||
| fd97ce3e5a | |||
| c0de0f3a60 | |||
| 38b34116dd | |||
| 7120b37882 | |||
| fbd115773b | |||
| b3018a455f | |||
| d2df669135 | |||
| 8b7dbb349e | |||
| 9e45fcfdb3 | |||
| ed7bea060f | |||
| 9bd2127ba1 | |||
| a9abf9df48 | |||
| 6ca502c1d1 | |||
| 30d7f31875 | |||
| f2909ea0c4 | |||
| 9371d7b19c | |||
| faebb519f7 | |||
| e9b14142a5 | |||
| aa4a725529 | |||
| ed8d7291ff | |||
| 148fde8b1b | |||
| 24c41d2a61 | |||
| 5fa6f2f151 | |||
| 51a8604dcb | |||
| c08ed28f09 | |||
| dbc2a8689a | |||
| 451e0a92db | |||
| f683580310 | |||
| c642dbefca | |||
| ce140f1393 | |||
| ab4ad0f373 | |||
| 237e59532b | |||
| 5383e254c4 | |||
| dc068bbd1e | |||
| 504e453ae5 | |||
| f6ae570417 | |||
| 3f4f203215 | |||
| 72c19b44c3 | |||
| f569401398 | |||
| bc0cc8559a | |||
| d05b405394 | |||
| 2b7adbd2d1 | |||
| 52dce4329d | |||
| 07208e519b | |||
| e8aee8d720 | |||
| 1895667573 | |||
| fc0c81acc6 | |||
| 98829f5dbe | |||
| 512772c45a | |||
| 8281ceb406 | |||
| 9f94d88acd | |||
| 2e0905d06a | |||
| cedcd13204 | |||
| 8d027813f5 | |||
| 2a11b2c331 | |||
| f8524462b0 | |||
| aae9fbb9de | |||
| cf0a1366af | |||
| 779932dcb0 | |||
| 19419281c3 | |||
| 2f79a2a04d | |||
| c1f6e6f00e | |||
| f7af0fc71e | |||
| 00c954755e | |||
| d42e6fb955 | |||
| addda5ccbe | |||
| edb32b1304 | |||
| 3fe143d84a | |||
| 2a03d49a84 | |||
| 8af0d04ad0 | |||
| 01f81b24f6 | |||
| 3e1e908422 | |||
| 30065e2f43 | |||
| 5b52b7561a | |||
| 441fb92aa7 | |||
| e60ec0a31b | |||
| 2259bb2586 | |||
| 4d7bfd2ba3 | |||
| 789ae87727 | |||
| 07eee8329c | |||
| 4a9708889e | |||
| 9580e99650 | |||
| ae3683c346 | |||
| 1e6bda735a | |||
| ebf827a956 | |||
| 8a3b5d1d76 | |||
| 1ac61c0f0f | |||
| 39799469d1 | |||
| 7f707ef5ed | |||
| a306a6f158 | |||
| 1cf24be04b | |||
| b382b63f9a | |||
| 9fbb36ca40 | |||
| d5f6335f99 | |||
| 194e088d01 | |||
| f8a6987f1e | |||
| 3234a15aae | |||
| 9771b521cd | |||
| a4d97dcf12 | |||
| 612abd6d89 | |||
| 1dd18f95e9 | |||
| 747da87a1e | |||
| 4243330d5c | |||
| 140d4f0d30 | |||
| 83c8af1b59 | |||
| 62b63acbb5 | |||
| fffb7c0bba | |||
| 898da23caa | |||
| 56e6f37ffa | |||
| 040e4ad8a5 | |||
| 695bfe34a2 | |||
| d343cb4deb | |||
| 9dd3dfaab0 | |||
| 212d5ce7ff | |||
| 0b40eb3e90 | |||
| f586dd0a96 | |||
| 93a8f4a4c8 | |||
| 6b04b07eb4 | |||
| 1c77b4ed9b | |||
| e3edcc3064 | |||
| 103027580e | |||
| 32f8b3ad77 | |||
| d4da6dce6e | |||
| 7f19f604a9 | |||
| 4a1680a799 | |||
| 8801de2772 | |||
| d620432e3b | |||
| cf8c063a69 | |||
| 40b1684c1e | |||
| d46c24045f | |||
| 10f12fa149 | |||
| 356d1f3485 | |||
| aafeffa292 | |||
| e441c17c2c | |||
| 8e1f8a0c48 | |||
| 0f7c955634 | |||
| 5a2099a1c7 | |||
| a10f05f4d7 | |||
| 0478f36e36 | |||
| 303c6dd1a8 | |||
| 7dbe06f7d8 | |||
| be712714af | |||
| 938d8dd878 | |||
| daf6c82066 | |||
| f7b6c4ca99 | |||
| 2990779d59 | |||
| d768130204 | |||
| 05bf01b058 | |||
| d11cfd4e45 | |||
| 32a7ad3cba | |||
| 42a570a64d | |||
| 6d256ff0f5 | |||
| 0eb90e73a5 | |||
| 6b1221d2f6 | |||
| f09ca8e795 | |||
| c4bfd9fa2c | |||
| 7353070f49 | |||
| dac5bcdf17 | |||
| 340354b79c | |||
| c4b58ed195 | |||
| b705ff08fe | |||
| d632046032 | |||
| de8ba7298c | |||
| ece27c66e9 | |||
| 5256980ffb | |||
| f21827bc28 | |||
| 8d9d2cc0a9 | |||
| af6850c8d8 | |||
| 18fd7983f1 | |||
| d6a941ebf5 | |||
| 1c68c9ebd6 | |||
| bc1b837616 | |||
| 9f9acf0c49 | |||
| 382d2d0373 | |||
| 07545fbfd3 | |||
| 49d67cbcb7 | |||
| 96b63cc81f | |||
| fd7ac17605 | |||
| e9c6891e24 | |||
| 03656da4dd | |||
| 0427eebe94 | |||
| 244d8a47b9 | |||
| 4760e317d5 | |||
| 71afebb2c0 | |||
| f0e0783618 | |||
| d4e6e2bd21 | |||
| 81a4c0698c | |||
| 83e23f1e8a | |||
| 794a4102c2 | |||
| 3a50908946 | |||
| db9e91152d | |||
| 887651e5fa | |||
| bb3d3f921a | |||
| 8695d60055 | |||
| 936a91c5fe | |||
| ef5e7d8c44 | |||
| 80f1f2723c | |||
| c4e081d4c6 | |||
| 972fd919b4 | |||
| fa3e90c72e | |||
| 403efe81a1 | |||
| 7e87eb2e23 | |||
| 9077ee8d15 | |||
| 4784aa5b0b | |||
| 8f3fe63d73 | |||
| c8b1790c92 | |||
| d6adcc2d50 | |||
| 1b022116d5 | |||
| 311e20599f | |||
| 35034fed73 | |||
| e470645efd | |||
| e96cf89524 | |||
| 3671d20e43 | |||
| c01237ec0f | |||
| 371f61972d | |||
| 6ce282d462 | |||
| 4a2ff633e0 | |||
| 09b7ac26ad | |||
| 0a13d79b94 | |||
| 64e281b398 | |||
| 307d5299e7 | |||
| a9532cb9e7 | |||
| efc3caf702 | |||
| 12303ff18f | |||
| a3bebeb599 | |||
| bde76d2f55 | |||
| 36ee1d271d | |||
| 601e024d77 | |||
| 6287efde18 | |||
| 8f9bcb1c74 | |||
| b1117a8717 | |||
| 0fa1a1469e | |||
| dabbc852c8 | |||
| 545ea229b6 | |||
| df17294865 | |||
| b8e3852d3b | |||
| 0bde5397d0 | |||
| f7074037ef | |||
| 1aa991d914 | |||
| b2eed8fed1 | |||
| 0c0188b688 | |||
| 6b58b67d12 | |||
| 64af09ce7b | |||
| 8f9e7a6f6f | |||
| 65d5268439 | |||
| 6aa0b0819d | |||
| 3d0b440e9f | |||
| 800e263f64 | |||
| ce65ea1fc1 | |||
| 2341939376 | |||
| a9d9215547 | |||
| 99725444f1 | |||
| 1ab0f52832 | |||
| 24ca4cc6b7 | |||
| d36c8d18b1 | |||
| 86a1411b07 | |||
| 54a465f9e8 | |||
| bf7f7c7027 | |||
| 7fbbc9650d | |||
| d0c5ff04a6 | |||
| d5236b71f4 | |||
| e7c85e569b | |||
| 84b4e32c34 | |||
| 56ee69e9d9 | |||
| 44287fb05f | |||
| cef587abc2 | |||
| 1a5f991d86 | |||
| 713b574c9d | |||
| 60c1bf5a19 | |||
| d331866a12 | |||
| 69e1fc496d | |||
| e87ad8126c | |||
| 5e30426916 | |||
| 6aff3e052a | |||
| f29d9fa3f9 | |||
| 31003cd5f6 | |||
| f0a3d91171 | |||
| e6d36f3a3a | |||
| c8269206d7 | |||
| ab67292aa3 | |||
| 4f92af3cd4 | |||
| a43adafc6b | |||
| c5e4684b44 | |||
| 3a34def55f | |||
| e6f68e1ccf | |||
| 60ab7027c0 | |||
| 08f2223a6a | |||
| 9c6c6c51e0 | |||
| baf32ee461 | |||
| 8fb6b5d945 | |||
| 5cc2eda362 | |||
| 9a69d5f367 | |||
| d9b98cbb18 | |||
| 24625e0695 | |||
| 4649accd54 | |||
| 968ffc7ef3 | |||
| 2337bbf6ca | |||
| ad1f89fea0 | |||
| 2ff911b08c | |||
| 1ed0b25910 | |||
| 5825a24d26 | |||
| 157cd8b1b0 | |||
| 06463135ef | |||
| 7ed9efcd4e | |||
| 0bc1f45634 | |||
| 1885a4a4b8 | |||
| 0e03542db5 | |||
| 2e44c3b743 | |||
| d1ff588d46 | |||
| cc1b2c8f09 | |||
| 100ea574a7 | |||
| 92625e1ca9 | |||
| f007c1c772 | |||
| 841291dda0 | |||
| 6488f22540 | |||
| 6953ae89c4 | |||
| 7c7359a9b2 | |||
| ee52000870 | |||
| 91804f28f1 | |||
| 8b7c424617 | |||
| 640fca7dc9 | |||
| de89b84661 | |||
| f819378fb0 | |||
| c163b799d2 | |||
| 4f3abb855a | |||
| a374816fb2 | |||
| ab5e3ded68 | |||
| ec60b322ab | |||
| 8445143359 | |||
| 9938a4cbb6 | |||
| 73f9c226d3 | |||
| 52c814b89d | |||
| b832372c98 | |||
| 7b268eb134 | |||
| 31d2b3cb5a | |||
| ef899a8859 | |||
| e47186cc42 | |||
| b6f1cd7809 | |||
| f56f7a5f94 | |||
| 4cd0df0567 | |||
| e64da8b2aa | |||
| e702431fcb | |||
| 156290f8d0 | |||
| 37075eab98 | |||
| 37998abef3 | |||
| 09f8dfe456 | |||
| 259a7fc7f1 | |||
| 93f5df716f | |||
| 9f38b22a3f | |||
| bd4678bca6 | |||
| 31f4d44c73 | |||
| 241fdf266a | |||
| 62611809e0 | |||
| a835e97440 | |||
| 62de535ac8 | |||
| f0879563d0 | |||
| 02db995e94 | |||
| a31ad7f960 | |||
| e97fd2b5e6 | |||
| 49ff1ca934 | |||
| 46963ab1ca | |||
| 6ba5a4348a | |||
| f584f5c3d0 | |||
| a0f76b7a4d | |||
| 3f695a542c | |||
| 64f930b1c5 | |||
| 81b306aac9 | |||
| 0c562f0a9f | |||
| 7c098f9fd1 | |||
| b95747be4c | |||
| 1239f5afc8 | |||
| 243ed4bc35 | |||
| 47d40806a4 | |||
| 91df073653 | |||
| 20ab6aad4a | |||
| a71376ad6a | |||
| 4d835b7303 | |||
| b922dd06a5 | |||
| 84f5ae20be | |||
| 273f36cc54 | |||
| 28cb4df127 | |||
| bc578e1e83 | |||
| ff0e82988f | |||
| 13528ec328 | |||
| 590070e47d | |||
| 959793e83c | |||
| aaefc3f44c | |||
| 48294e624c | |||
| add4b13856 | |||
| 5d6bf2224a | |||
| c09bd9fe4a | |||
| c7db0eaca6 | |||
| 78fa37f8ae | |||
| be83074131 | |||
| 1f756947da | |||
| ae171956e8 | |||
| 1f32e6e4f4 | |||
| 2f4d803db1 | |||
| 552023ee4b | |||
| 6c9b8ec860 | |||
| f9e6ad86b7 | |||
| e604634d2a | |||
| 590b9dabab | |||
| c283ea57fd | |||
| 50ff16e7a4 | |||
| 453287b06b | |||
| e166f132b3 | |||
| 42f4d4dbc8 | |||
| 7cb8368e0f | |||
| 0d7cfce6e1 | |||
| 2d7c1368f0 | |||
| db4371c745 | |||
| e6cd799d8a | |||
| ab29b58316 | |||
| 3f037c9786 | |||
| 53b991aa0e | |||
| 9e80f39caa | |||
| bdc2b74e8f | |||
| 1fd92e6bee | |||
| 02fd381072 | |||
| b6f3a6a68a | |||
| ae70512f5d | |||
| d4a123d6dd | |||
| ce816edb5f | |||
| ac2643700b | |||
| 558b252c5a | |||
| 754a5e1cee | |||
| e3e7c7ddaa | |||
| 76b278af8e | |||
| 1c6320828c | |||
| d72468426e | |||
| 796f4032b8 | |||
| 1ae7b942d9 | |||
| fed1221302 | |||
| 6ed81d6774 | |||
| 115850945e | |||
| 8e87436725 | |||
| e8e2a95165 | |||
| b908c33464 | |||
| 0ebf05440e | |||
| 7df1bd4b4a | |||
| 5d21cc3660 | |||
| b0275b8483 | |||
| 86c6fee320 | |||
| c0bee906d2 | |||
| bfaa469b9a | |||
| d73a08b9eb | |||
| a1f06a4fdc | |||
| cb26564d50 | |||
| 59705a1c1d | |||
| 205974c359 | |||
| 04edf9729f | |||
| bb1268ef4b | |||
| c5826d4720 | |||
| deb2faf7aa | |||
| 2777941b4e | |||
| ae8b628f0a | |||
| 0e9ff8c1f7 | |||
| d373c46976 | |||
| 008e55a65e | |||
| 772992812a | |||
| a8542508b7 | |||
| 0b4d366514 | |||
| e7a84bdac2 | |||
| d2b346cf9e | |||
| 1d0dcddf61 | |||
| d49025b501 | |||
| dd0fd13ea8 | |||
| 36e32dde1a | |||
| 53a2c8e452 | |||
| 5218ff775c | |||
| 5d5dbb3bcb | |||
| 5a0273e3ea | |||
| ce81e470e3 | |||
| 4ac61fc470 | |||
| bfe97d896d | |||
| e7a6a9e47e | |||
| d06431f670 | |||
| 2fa8e3309f | |||
| fe3b2acde0 | |||
| 01330fa428 | |||
| b4cc37f3c1 | |||
| a8dbb5d3b0 | |||
| 321a280031 | |||
| 5c9025918a | |||
| 573d46a4ef | |||
| 4ae8f87754 | |||
| 63af158086 | |||
| 3877bcfc21 | |||
| f8cc557892 | |||
| e39ceb2bd1 | |||
| 992398bca3 | |||
| baa108f5cc | |||
| 4a891f2d67 | |||
| 514c08a932 | |||
| d05e8a173d | |||
| ad412380cb | |||
| af35e84655 | |||
| 29f45a85e4 | |||
| ea5e8caa69 | |||
| 473aa28422 | |||
| ef0c4b134d | |||
| 35e36cb945 | |||
| 31718581b5 | |||
| 6bd7d572ec | |||
| 5b626870d0 | |||
| 2ccec93d71 | |||
| 2fe332d01d | |||
| a14865e6bb | |||
| d66c17ab5c | |||
| b781207752 | |||
| 34ec550014 | |||
| c2c63b07c3 | |||
| 332e6ffbd4 | |||
| 5352bdf4da | |||
| 138778b51b | |||
| 17e7571639 | |||
| 0fbca63e9d | |||
| 1657755b5d | |||
| 9d3dd13fef | |||
| 3827c47515 | |||
| e9053b6ed4 | |||
| e349635a3d | |||
| 014a1535f2 | |||
| 7b57ab5dea | |||
| e300d90c00 | |||
| 87317bcfc4 | |||
| 9849230a04 | |||
| fa32a2d0fd | |||
| 27ffc0ed74 | |||
| 539876af11 | |||
| b1c8746984 | |||
| bc3160f75a | |||
| 75b24ba02a | |||
| 953b3e1b3f | |||
| c98933499a | |||
| 2f768b96e8 | |||
| d6cc6453d1 | |||
| 45dfaf230c | |||
| 65537b8200 | |||
| 60787f8d5d | |||
| c4b3d3af95 | |||
| f29a5de9f5 | |||
| cb37f00a8f | |||
| fc379e90d1 | |||
| fea9d970ec | |||
| 6e7dd54a50 | |||
| f56b651acb | |||
| 2dbcc0a1bf | |||
| 1f82889001 | |||
| e6c824e606 | |||
| e2b0bceb1b | |||
| 713c055e04 | |||
| 1fc52033ba | |||
| ab27609a64 | |||
| 538a408608 | |||
| 093d280528 | |||
| de166d0ff2 | |||
| 942b94fc3c | |||
| 77bb7750e9 | |||
| 78380fa181 | |||
| c88e4b3fc0 | |||
| 552475dd5c | |||
| c69fbca24f | |||
| 5bb1c383ac | |||
| c7310f7fb2 | |||
| 3a43043c8a | |||
| dbfa859ca3 | |||
| 53c59c47a1 | |||
| af393b0003 | |||
| 1a5608d0f8 | |||
| 23dcbc94ef | |||
| af770c5ced | |||
| 8ce5e69b2f | |||
| 1aa97600df | |||
| 969c596d4c | |||
| 67b087019c | |||
| 6a45d93005 | |||
| 43e507d554 | |||
| a4be6c50cf | |||
| 5043143bc5 | |||
| bdebd1b2e3 | |||
| dadd8d9f94 | |||
| 3da8776a3c | |||
| 3052006ba8 | |||
| 1662c7eda3 | |||
| fef44a71c5 | |||
| b271cc34b3 | |||
| eead838353 | |||
| 02cc867c06 | |||
| 6e98cd311c | |||
| 97a13ef1ab | |||
| 7e1464a950 | |||
| e6a4d6bcf0 | |||
| c8c3b756b0 | |||
| 9a8dda8fc7 | |||
| ff442c48b5 | |||
| 216cd7474b | |||
| 2c62652ea8 | |||
| 4e8fd73a20 | |||
| 19931cd9ed | |||
| 0b460a9a12 | |||
| 4e31eea55f | |||
| 1366712560 | |||
| 51d9bde5a3 | |||
| 94181a990b | |||
| 03672df691 | |||
| e9669e7fb1 | |||
| 9a1ac8020d | |||
| b44bbd11b8 | |||
| 1e91318445 | |||
| f35ff65c36 | |||
| ba0e363d5a | |||
| dde8c26feb | |||
| 64dd187498 | |||
| 67dee2d74e | |||
| bcac195a0c | |||
| 8fca8faa7d | |||
| 1cc17eb611 | |||
| c8194f5fd0 | |||
| f2c9ffc056 | |||
| 10432a1be7 | |||
| e7f83b13ca | |||
| ad220a0a3c | |||
| 91c5a5c08f | |||
| 8362ab405c | |||
| 68b9dae6c0 | |||
| 9b956ac1a9 | |||
| d4dbdfb61d | |||
| 487aed419e | |||
| 8b8a2f2949 | |||
| 42e236f464 | |||
| 1b4016317e | |||
| b1798bafb0 | |||
| 86f76df586 | |||
| db82c15de4 | |||
| 627fd002ae | |||
| 9e7d052c8d | |||
| d9927f5185 | |||
| 5d253e0a34 | |||
| de5727f90a | |||
| 9c2dd70839 | |||
| e0e78112a2 | |||
| 48730e00a8 | |||
| e5f9d148e7 | |||
| f6b280e372 | |||
| 5af2d57086 | |||
| 7a34159737 | |||
| b1fa5a0754 | |||
| 018ff4dd0a | |||
| ed352710ec | |||
| 0a0c1edce3 | |||
| 18eb76f6b8 | |||
| ed5f81b02e | |||
| 23c5ce48d1 | |||
| de766ba628 | |||
| 5aae73c230 | |||
| b578451e6a | |||
| 53c653b099 | |||
| b70abe52b2 | |||
| 98670c3755 | |||
| 9b789c2ae9 | |||
| ffb9f01bea | |||
| ed7244f5f5 | |||
| e54c0e39b5 | |||
| 056ea68e52 | |||
| d9266ed65a | |||
| 6051abb4a3 | |||
| 4b125f0ffe | |||
| 43cf321942 | |||
| 9283e91aa0 | |||
| dc59aba132 | |||
| 8fb5edd927 | |||
| 3bb1e012e6 | |||
| 22758a2763 | |||
| a008b38cf5 | |||
| d0897312ac | |||
| aa99c6b896 | |||
| ae107f31d9 | |||
| 9d9f2dacd2 | |||
| 08bc5d3521 | |||
| 6e7fb75618 | |||
| c26c38ee12 | |||
| dc2c74b249 | |||
| a20439bf81 | |||
| a1fb32908d | |||
| 0b89458eb8 | |||
| 14a3efd756 | |||
| d64c6870bb | |||
| dc87c91f9d | |||
| d4574ffb49 | |||
| 5a8c479ff3 | |||
| c6b26a3159 | |||
| 2a5ad74ac6 | |||
| 2caf15b24c | |||
| f49588756e | |||
| 57e760883e | |||
| b213e88cca | |||
| e8f46c9207 | |||
| cded812b97 | |||
| 2acb02366e | |||
| 9ecc78feeb | |||
| fdc410e743 | |||
| 5b5558300a | |||
| b5918e7158 | |||
| 58f8026632 | |||
| a73fbc61ff | |||
| 0d1c5fdd2f | |||
| 6c77ef5a5e | |||
| e7a2a4b7ff | |||
| 724a36fcdb | |||
| 9ce6521582 | |||
| 160bf4ccb3 | |||
| aa25d09b0c | |||
| 2471a6e115 | |||
| fc02929946 | |||
| 3ae1e9e3c4 | |||
| 117f18240d | |||
| 31296ad70f | |||
| 132eae9d5b | |||
| ead5f7aba9 | |||
| 58e6e7b668 | |||
| 20b8ccd1e9 | |||
| d0dca16fee | |||
| fc21dd0a4a | |||
| 61c0dfab70 | |||
| 67330833af | |||
| ece59034f7 | |||
| 0a42e5777e | |||
| e2b66628f4 | |||
| 46b5e32cd7 | |||
| 7d9dd1e5d3 | |||
| 1985ff7918 | |||
| 60b9c027c8 | |||
| 2793c8e4fe | |||
| 805a8f1f47 | |||
| d4a3e9a7cc | |||
| ad4e59edb2 | |||
| aca4cf4369 | |||
| 9aa047257a | |||
| 65a8cd1772 | |||
| 563a84beaf | |||
| d32a35d8fd | |||
| 2632493c8b | |||
| c61df5dd25 | |||
| 1fbc4870f0 | |||
| f304492716 | |||
| f35c226ce7 | |||
| 0b48a2e0d1 | |||
| fd614a7aef | |||
| 0758c04941 | |||
| fe0396bbb9 | |||
| 974a467cf6 | |||
| 36b62e0fab | |||
| d2043ff9f2 | |||
| ecc9605a32 | |||
| 70dc56d26b | |||
| 82ccbd2cba | |||
| c4998d0e09 | |||
| 5eabfe3912 | |||
| df3890827d | |||
| 6599db1e99 | |||
| b7d7ad536a | |||
| 24d8ff7425 | |||
| 735d9dd949 | |||
| cc5f4a5efa | |||
| 93c26ae1ef | |||
| cc8029a732 | |||
| 6bf26e2a81 | |||
| 7a677cb095 | |||
| 12ad746ee6 | |||
| 163e71d06f | |||
| c8c91fd827 | |||
| d17970ebd0 | |||
| bf483fdf02 | |||
| b2b7ed8927 | |||
| 0a79dfd5cf | |||
| 1d73baf3d8 | |||
| f3ae4a3bae | |||
| 814a210f5d | |||
| 60c3a253ad | |||
| 384b6549a6 | |||
| b2ec39c59d | |||
| 095fc84cf2 | |||
| 542cf16292 | |||
| 27989eb9a5 | |||
| 05997e8215 | |||
| 5d9afce12d | |||
| ee6a0bd9db | |||
| b6f3242c6c | |||
| 390086c6ab | |||
| a40c5aea83 | |||
| f691b4ddd2 | |||
| 3c57a9986c | |||
| 5e0a77df2b | |||
| 66e557b6c0 | |||
| 200b6f55c6 | |||
| b77ce4e846 | |||
| 85eb367ede | |||
| 0b63346a1a | |||
| 85eb3775d6 | |||
| e4c8d703b5 | |||
| 60afb63d44 | |||
| ee5aa51d43 | |||
| a6aed0da46 | |||
| d77380f024 | |||
| efc4796f01 | |||
| d869e4d43f | |||
| 8eefc8b5fe | |||
| 4091af4560 | |||
| 394d1a86f6 | |||
| d88964f629 | |||
| 0e0ebaac5f | |||
| 8b7e53e643 | |||
| 979cdc3626 | |||
| a2a4bfe3e3 | |||
| 85480f6292 | |||
| f537b6ca00 | |||
| b5471978b0 | |||
| efdfb39a33 | |||
| 7cc5603a82 | |||
| 9ed004e90d | |||
| d83911b632 | |||
| bc58ecbfd7 | |||
| 221eae2c59 | |||
| 37303e38ec | |||
| b754bd523a | |||
| 1bb990719e | |||
| 7f80d7304d | |||
| ca9c3e59fa | |||
| 674f94228b | |||
| ef7e96e486 | |||
| dba0caa00b | |||
| 1d9ca172e3 | |||
| f0c4b28c6b | |||
| 6784e0dfee | |||
| 95497b4aab | |||
| 5b04b7d972 | |||
| 4eb3a8e1cc | |||
| 9611185eb4 | |||
| e4380843c4 | |||
| 046f0bba74 | |||
| e0c436b616 | |||
| dbf2ee56c6 | |||
| 1d6760dd84 | |||
| 344727f9ba | |||
| d17ec26c56 | |||
| 4236d81cfc | |||
| bb869aca33 | |||
| 9cad60fa6d | |||
| 42e89e4a92 | |||
| 8daec9a4c5 | |||
| 53ac27c3ff | |||
| e689532e6e | |||
| c2302abaf1 | |||
| 8157285a79 | |||
| c6e1a2ca8a | |||
| 41e112294b | |||
| 49086964b8 | |||
| dd81c30976 | |||
| 1d8daad223 | |||
| f540559c41 | |||
| d16033dd2c | |||
| 7eb417b24f | |||
| 9515ed401f | |||
| f982771131 | |||
| a087d13ccb | |||
| 6e5cbd0196 | |||
| 6e8d0e3177 | |||
| 5cf610af40 | |||
| 897fe85b5c | |||
| 57cbefa589 | |||
| 09291db805 | |||
| e9a6675c40 | |||
| 1333d3c02a | |||
| 222a2c8fa5 | |||
| 5841aa8189 | |||
| 1b9f63f799 | |||
| 1b130546f8 | |||
| 7e4d693054 | |||
| b0b4b7ba33 | |||
| d0eda83697 | |||
| 503e5829bb | |||
| 79482ff672 | |||
| 3a99c2b5f4 | |||
| 45fe02c8b3 | |||
| 2c3c4274be | |||
| 501c017a26 | |||
| d36420a87a | |||
| 5983803c8b | |||
| fabc5e9259 | |||
| 5748d58c74 | |||
| bfa8d342b3 | |||
| 37f3486483 | |||
| 3e19044dee | |||
| 8495036ff9 | |||
| 7f701a5756 | |||
| 634e7a41c5 | |||
| d1d651080a | |||
| 0fa44c5dd3 | |||
| 89a69eed72 | |||
| 1842ca0334 | |||
| e5a8b23684 | |||
| 4fffee6695 | |||
| 485bc7d7d6 | |||
| b5ba8b783a | |||
| d7774cf049 | |||
| 9d94acbedb | |||
| b77e844fc3 | |||
| a6ab2c71c3 | |||
| 5c8ad6702a | |||
| f0601afa75 | |||
| 56e984f657 | |||
| 5d75b6be62 | |||
| 12c3023a22 | |||
| 56b228f187 | |||
| 42eb99554f | |||
| c85b468b8d | |||
| 7463241896 | |||
| c00def5b71 | |||
| f16418ccf7 | |||
| 2d4a60cae6 | |||
| 47926f7d21 | |||
| 940072592f | |||
| 4ff609b6a8 | |||
| 0a877941f4 | |||
| baf3b9be7c | |||
| 4df4bf68a2 | |||
| 471bd92b4c | |||
| 3af1063737 | |||
| 9c8060f619 | |||
| e213873852 | |||
| 56acb340d2 | |||
| e05cdc2f9c | |||
| 3571270191 | |||
| bd5eb47441 | |||
| 7cd37c37cd | |||
| d660f6b9a5 | |||
| 80389ae61e | |||
| 6e13922bdc | |||
| c57f16d16f | |||
| 3c43a7aee8 | |||
| dd8779b257 | |||
| 46bdfb9661 | |||
| e3ea4b7ec2 | |||
| 41c67ce8dd | |||
| 870a6e93da | |||
| 80f87913bb | |||
| 45123dcc0a | |||
| 49d560583f | |||
| 1c663b32b9 | |||
| caecaa7562 | |||
| ed11be23bf | |||
| 7bd5a52019 | |||
| 87763ef0a0 | |||
| 939e668096 | |||
| 45318e7575 | |||
| 8250b9f6b0 | |||
| 1abf03351d | |||
| 46b95d5cfe | |||
| 59ba4777ee | |||
| d44739283c | |||
| 9c953a67a6 | |||
| bd3fa317e7 | |||
| 715e2b48ca | |||
| 90d18143ba | |||
| 4b6809b32d | |||
| 7b96146d3f | |||
| 21c55a2e0f | |||
| 8e965040ce | |||
| 780ee2b2be | |||
| 6f9cd96ec5 | |||
| 47e244ee9f | |||
| df11fe75d3 | |||
| bf0d516e49 | |||
| b18da35da6 | |||
| 8ba1e6c183 | |||
| d4f84f0b54 | |||
| 6ec6ca6971 | |||
| 1163e9e409 | |||
| 15736c57c3 | |||
| fa817a8ab3 | |||
| 8b99635eb3 | |||
| 1919780880 | |||
| 82f5d901c8 | |||
| dc4d4342cd | |||
| e05658685c | |||
| b29539b442 | |||
| b1a46d5adc | |||
| 50c510d16b | |||
| 8a84d1048c | |||
| 2ad852d8df | |||
| ca39f5204d | |||
| 5b0e38060a | |||
| 66938e0b68 | |||
| 64c6cc4cf3 | |||
| 3418984848 | |||
| 3c79990934 | |||
| da3f279495 | |||
| b1bbb9e210 | |||
| 0e3e129a83 | |||
| c87b58511e | |||
| 8d61dcc8ab | |||
| 06b29d7da4 | |||
| 5229a76f68 | |||
| 4f9504305a | |||
| 27153dde85 | |||
| 9fc7174612 | |||
| 8fb8374dfc | |||
| ff35c140dc | |||
| df9b7b2fe9 | |||
| 48f3f49e80 | |||
| 94d7af00b8 | |||
| 251ba7f058 | |||
| 28296955f1 | |||
| 1b2fc3cc9a | |||
| b8da2eeb69 | |||
| 5f62f0c9d7 | |||
| a54843cc65 | |||
| 4326873af6 | |||
| a64f4539e7 | |||
| ec68ab1c8c | |||
| e5041749a2 | |||
| 78b2e0be89 | |||
| b6aded378d | |||
| 11e3f5e8b2 | |||
| f65c3ae62b | |||
| 02c955babb | |||
| ca04ae9540 | |||
| b0c21b00d9 | |||
| 47684fa17c | |||
| 148a7e7002 | |||
| 76e8285904 | |||
| 555c70672e | |||
| 850e218051 | |||
| fb4b5b0a06 | |||
| f256e1a59a | |||
| 9816b868f9 | |||
| 6e828f0fcb | |||
| 4d6484b03e | |||
| afe9269534 | |||
| 688cb8f19d | |||
| f6dd2cd1af | |||
| 69dc14f5d6 | |||
| 202acbd628 | |||
| a283fefd18 | |||
| d9bbaf5d6c | |||
| 1075b975c5 | |||
| c813c1ff4c | |||
| abac2ca2c5 | |||
| 64e9702a26 | |||
| 76cb4cd174 | |||
| 65d7c19979 | |||
| b67697b6f2 | |||
| 131f272e69 | |||
| 03d1265cfd | |||
| c190086707 | |||
| 5d89a8010b | |||
| 7a81fa00e9 | |||
| 606ed0c8ab | |||
| 8b1a4365ed | |||
| 8a2542157f |
3
.gitattributes
vendored
3
.gitattributes
vendored
@ -1 +1,2 @@
|
||||
*.sh text eol=lf
|
||||
*.sh text eol=lf
|
||||
docker/entrypoint.sh text eol=lf executable
|
||||
|
||||
46
.github/ISSUE_TEMPLATE/agent_scenario_request.yml
vendored
Normal file
46
.github/ISSUE_TEMPLATE/agent_scenario_request.yml
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
name: "❤️🔥ᴬᴳᴱᴺᵀ Agent scenario request"
|
||||
description: Propose a agent scenario request for RAGFlow.
|
||||
title: "[Agent Scenario Request]: "
|
||||
labels: ["❤️🔥ᴬᴳᴱᴺᵀ agent scenario"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "Please check the following in order to be responded in time :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Is your feature request related to a scenario?
|
||||
description: |
|
||||
A clear and concise description of what the scenario is. Ex. I'm always frustrated when [...]
|
||||
render: Markdown
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the feature you'd like
|
||||
description: A clear and concise description of what you want to happen.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Documentation, adoption, use case
|
||||
description: If you can, explain some scenarios how users might use this, situations it would be helpful in. Any API designs, mockups, or diagrams are also helpful.
|
||||
render: Markdown
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: |
|
||||
Add any other context or screenshots about the feature request here.
|
||||
validations:
|
||||
required: false
|
||||
18
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
18
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,15 +1,21 @@
|
||||
name: Bug Report
|
||||
name: "🐞 Bug Report"
|
||||
description: Create a bug issue for RAGFlow
|
||||
title: "[Bug]: "
|
||||
labels: [bug]
|
||||
labels: ["🐞 bug"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for the same bug?
|
||||
description: Please check if an issue already exists for the bug you encountered.
|
||||
label: Self Checks
|
||||
description: "Please check the following in order to be responded in time :)"
|
||||
options:
|
||||
- label: I have checked the existing issues.
|
||||
required: true
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: "Please provide the following information to help us understand the issue."
|
||||
|
||||
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,10 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
title: '[Feature Request]: '
|
||||
about: Suggest an idea for RAGFlow
|
||||
labels: ''
|
||||
---
|
||||
|
||||
**Summary**
|
||||
|
||||
Description for this feature.
|
||||
16
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
16
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@ -1,14 +1,20 @@
|
||||
name: Feature request
|
||||
name: "💞 Feature request"
|
||||
description: Propose a feature request for RAGFlow.
|
||||
title: "[Feature Request]: "
|
||||
labels: [feature request]
|
||||
labels: ["💞 feature"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for the same feature request?
|
||||
description: Please check if an issue already exists for the feature you request.
|
||||
label: Self Checks
|
||||
description: "Please check the following in order to be responded in time :)"
|
||||
options:
|
||||
- label: I have checked the existing issues.
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
|
||||
17
.github/ISSUE_TEMPLATE/question.yml
vendored
17
.github/ISSUE_TEMPLATE/question.yml
vendored
@ -1,8 +1,21 @@
|
||||
name: Question
|
||||
name: "🙋♀️ Question"
|
||||
description: Ask questions on RAGFlow
|
||||
title: "[Question]: "
|
||||
labels: [question]
|
||||
labels: ["🙋♀️ question"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Self Checks
|
||||
description: "Please check the following in order to be responded in time :)"
|
||||
options:
|
||||
- label: I have searched for existing issues [search for existing issues](https://github.com/infiniflow/ragflow/issues), including closed ones.
|
||||
required: true
|
||||
- label: I confirm that I am using English to submit this report ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: Non-english title submitions will be closed directly ( 非英文标题的提交将会被直接关闭 ) ([Language Policy](https://github.com/infiniflow/ragflow/issues/5910)).
|
||||
required: true
|
||||
- label: "Please do not modify this template :) and fill in all the required fields."
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
|
||||
26
.github/workflows/release.yml
vendored
26
.github/workflows/release.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||
token: ${{ secrets.GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
@ -69,7 +69,7 @@ jobs:
|
||||
# https://github.com/actions/upload-release-asset has been replaced by https://github.com/softprops/action-gh-release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
token: ${{ secrets.MY_GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||
token: ${{ secrets.GITHUB_TOKEN }} # Use the secret as an environment variable
|
||||
prerelease: ${{ env.PRERELEASE }}
|
||||
tag_name: ${{ env.RELEASE_TAG }}
|
||||
# The body field does not support environment variable substitution directly.
|
||||
@ -88,7 +88,9 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}
|
||||
tags: |
|
||||
infiniflow/ragflow:${{ env.RELEASE_TAG }}
|
||||
infiniflow/ragflow:latest-full
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
|
||||
@ -98,7 +100,9 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim
|
||||
tags: |
|
||||
infiniflow/ragflow:${{ env.RELEASE_TAG }}-slim
|
||||
infiniflow/ragflow:latest-slim
|
||||
file: Dockerfile
|
||||
build-args: LIGHTEN=1
|
||||
platforms: linux/amd64
|
||||
@ -116,3 +120,17 @@ jobs:
|
||||
packages-dir: sdk/python/dist/
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
- name: Build ragflow-cli
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
cd admin/client && \
|
||||
uv build
|
||||
|
||||
- name: Publish client package distributions to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: admin/client/dist/
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
verbose: true
|
||||
|
||||
104
.github/workflows/tests.yml
vendored
104
.github/workflows/tests.yml
vendored
@ -15,6 +15,8 @@ on:
|
||||
- 'docs/**'
|
||||
- '*.md'
|
||||
- '*.mdx'
|
||||
schedule:
|
||||
- cron: '0 16 * * *' # This schedule runs every 16:00:00Z(00:00:00+08:00)
|
||||
|
||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||
concurrency:
|
||||
@ -32,12 +34,10 @@ jobs:
|
||||
# https://github.com/hmarr/debug-action
|
||||
#- uses: hmarr/debug-action@v2
|
||||
|
||||
- name: Show who triggered this workflow
|
||||
- name: Ensure workspace ownership
|
||||
run: |
|
||||
echo "Workflow triggered by ${{ github.event_name }}"
|
||||
|
||||
- name: Ensure workspace ownership
|
||||
run: echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
echo "chown -R $USER $GITHUB_WORKSPACE" && sudo chown -R $USER $GITHUB_WORKSPACE
|
||||
|
||||
# https://github.com/actions/checkout/issues/1781
|
||||
- name: Check out code
|
||||
@ -46,25 +46,64 @@ jobs:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Check workflow duplication
|
||||
if: ${{ !cancelled() && !failure() && (github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'ci')) }}
|
||||
run: |
|
||||
if [[ ${{ github.event_name }} != 'pull_request' ]]; then
|
||||
HEAD=$(git rev-parse HEAD)
|
||||
# Find a PR that introduced a given commit
|
||||
gh auth login --with-token <<< "${{ secrets.GITHUB_TOKEN }}"
|
||||
PR_NUMBER=$(gh pr list --search ${HEAD} --state merged --json number --jq .[0].number)
|
||||
echo "HEAD=${HEAD}"
|
||||
echo "PR_NUMBER=${PR_NUMBER}"
|
||||
if [[ -n ${PR_NUMBER} ]]; then
|
||||
PR_SHA_FP=${RUNNER_WORKSPACE_PREFIX}/artifacts/${GITHUB_REPOSITORY}/PR_${PR_NUMBER}
|
||||
if [[ -f ${PR_SHA_FP} ]]; then
|
||||
read -r PR_SHA PR_RUN_ID < "${PR_SHA_FP}"
|
||||
# Calculate the hash of the current workspace content
|
||||
HEAD_SHA=$(git rev-parse HEAD^{tree})
|
||||
if [[ ${HEAD_SHA} == ${PR_SHA} ]]; then
|
||||
echo "Cancel myself since the workspace content hash is the same with PR #${PR_NUMBER} merged. See ${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${PR_RUN_ID} for details."
|
||||
gh run cancel ${GITHUB_RUN_ID}
|
||||
while true; do
|
||||
status=$(gh run view ${GITHUB_RUN_ID} --json status -q .status)
|
||||
[ "$status" = "completed" ] && break
|
||||
sleep 5
|
||||
done
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else
|
||||
PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
PR_SHA_FP=${RUNNER_WORKSPACE_PREFIX}/artifacts/${GITHUB_REPOSITORY}/PR_${PR_NUMBER}
|
||||
# Calculate the hash of the current workspace content
|
||||
PR_SHA=$(git rev-parse HEAD^{tree})
|
||||
echo "PR #${PR_NUMBER} workspace content hash: ${PR_SHA}"
|
||||
mkdir -p ${RUNNER_WORKSPACE_PREFIX}/artifacts/${GITHUB_REPOSITORY}
|
||||
echo "${PR_SHA} ${GITHUB_RUN_ID}" > ${PR_SHA_FP}
|
||||
fi
|
||||
|
||||
# https://github.com/astral-sh/ruff-action
|
||||
- name: Static check with Ruff
|
||||
uses: astral-sh/ruff-action@v2
|
||||
uses: astral-sh/ruff-action@v3
|
||||
with:
|
||||
version: ">=0.8.2"
|
||||
args: "check --ignore E402"
|
||||
version: ">=0.11.x"
|
||||
args: "check"
|
||||
|
||||
- name: Build ragflow:nightly-slim
|
||||
run: |
|
||||
RUNNER_WORKSPACE_PREFIX=${RUNNER_WORKSPACE_PREFIX:-$HOME}
|
||||
sudo docker pull ubuntu:22.04
|
||||
sudo docker build --progress=plain --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
sudo DOCKER_BUILDKIT=1 docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
|
||||
- name: Build ragflow:nightly
|
||||
run: |
|
||||
sudo docker build --progress=plain --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
sudo DOCKER_BUILDKIT=1 docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
- name: Start ragflow:nightly-slim
|
||||
run: |
|
||||
sudo docker compose -f docker/docker-compose.yml down --volumes --remove-orphans
|
||||
echo -e "\nRAGFLOW_IMAGE=infiniflow/ragflow:nightly-slim" >> docker/.env
|
||||
sudo docker compose -f docker/docker-compose.yml up -d
|
||||
|
||||
@ -86,7 +125,12 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv pip install sdk/python && uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_sdk_api
|
||||
|
||||
- name: Run frontend api tests against Elasticsearch
|
||||
run: |
|
||||
@ -96,8 +140,22 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && UV_LINK_MODE=copy uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Elasticsearch
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_http_api
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
@ -116,7 +174,12 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_sdk_api && pytest -s --tb=short get_email.py t_dataset.py t_chat.py t_session.py t_document.py t_chunk.py
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && uv pip install sdk/python && DOC_ENGINE=infinity uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_sdk_api
|
||||
|
||||
- name: Run frontend api tests against Infinity
|
||||
run: |
|
||||
@ -126,7 +189,22 @@ jobs:
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
cd sdk/python && uv sync --python 3.10 --frozen && uv pip install . && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
cd sdk/python && UV_LINK_MODE=copy uv sync --python 3.10 --group test --frozen && source .venv/bin/activate && cd test/test_frontend_api && pytest -s --tb=short get_email.py test_dataset.py
|
||||
|
||||
- name: Run http api tests against Infinity
|
||||
run: |
|
||||
export http_proxy=""; export https_proxy=""; export no_proxy=""; export HTTP_PROXY=""; export HTTPS_PROXY=""; export NO_PROXY=""
|
||||
export HOST_ADDRESS=http://host.docker.internal:9380
|
||||
until sudo docker exec ragflow-server curl -s --connect-timeout 5 ${HOST_ADDRESS} > /dev/null; do
|
||||
echo "Waiting for service to be available..."
|
||||
sleep 5
|
||||
done
|
||||
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
|
||||
export HTTP_API_TEST_LEVEL=p3
|
||||
else
|
||||
export HTTP_API_TEST_LEVEL=p2
|
||||
fi
|
||||
UV_LINK_MODE=copy uv sync --python 3.10 --only-group test --no-default-groups --frozen && DOC_ENGINE=infinity uv run --only-group test --no-default-groups pytest -s --tb=short --level=${HTTP_API_TEST_LEVEL} test/testcases/test_http_api
|
||||
|
||||
- name: Stop ragflow:nightly
|
||||
if: always() # always run this step even if previous steps failed
|
||||
|
||||
154
.gitignore
vendored
154
.gitignore
vendored
@ -36,8 +36,162 @@ sdk/python/ragflow.egg-info/
|
||||
sdk/python/build/
|
||||
sdk/python/dist/
|
||||
sdk/python/ragflow_sdk.egg-info/
|
||||
|
||||
# Exclude dep files
|
||||
libssl*.deb
|
||||
tika-server*.jar*
|
||||
cl100k_base.tiktoken
|
||||
chrome*
|
||||
huggingface.co/
|
||||
nltk_data/
|
||||
|
||||
# Exclude hash-like temporary files like 9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||
*[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]*
|
||||
.lh/
|
||||
.venv
|
||||
docker/data
|
||||
|
||||
|
||||
#--------------------------------------------------#
|
||||
# The following was generated with gitignore.nvim: #
|
||||
#--------------------------------------------------#
|
||||
# Gitignore for the following technologies: Node
|
||||
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
.stylelintcache
|
||||
|
||||
# Microbundle cache
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
.cache
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
.nuxt
|
||||
dist
|
||||
ragflow_cli.egg-info
|
||||
# Gatsby files
|
||||
.cache/
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
.temp
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
.vscode-test
|
||||
|
||||
# yarn v2
|
||||
.yarn/cache
|
||||
.yarn/unplugged
|
||||
.yarn/build-state.yml
|
||||
.yarn/install-state.gz
|
||||
.pnp.*
|
||||
|
||||
# Serverless Webpack directories
|
||||
.webpack/
|
||||
|
||||
# SvelteKit build / generate output
|
||||
.svelte-kit
|
||||
|
||||
# Default backup dir
|
||||
backup
|
||||
|
||||
19
.pre-commit-config.yaml
Normal file
19
.pre-commit-config.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: check-yaml
|
||||
- id: check-json
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
- id: check-case-conflict
|
||||
- id: check-merge-conflict
|
||||
- id: mixed-line-ending
|
||||
- id: check-symlinks
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.11.6
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: [ --fix ]
|
||||
- id: ruff-format
|
||||
15
.trivyignore
Normal file
15
.trivyignore
Normal file
@ -0,0 +1,15 @@
|
||||
**/*.md
|
||||
**/*.min.js
|
||||
**/*.min.css
|
||||
**/*.svg
|
||||
**/*.png
|
||||
**/*.jpg
|
||||
**/*.jpeg
|
||||
**/*.gif
|
||||
**/*.woff
|
||||
**/*.woff2
|
||||
**/*.map
|
||||
**/*.webp
|
||||
**/*.ico
|
||||
**/*.ttf
|
||||
**/*.eot
|
||||
14
Dockerfile
14
Dockerfile
@ -21,9 +21,7 @@ RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/huggingface.co
|
||||
if [ "$LIGHTEN" != "1" ]; then \
|
||||
(tar -cf - \
|
||||
/huggingface.co/BAAI/bge-large-zh-v1.5 \
|
||||
/huggingface.co/BAAI/bge-reranker-v2-m3 \
|
||||
/huggingface.co/maidalun1020/bce-embedding-base_v1 \
|
||||
/huggingface.co/maidalun1020/bce-reranker-base_v1 \
|
||||
| tar -xf - --strip-components=2 -C /root/.ragflow) \
|
||||
fi
|
||||
|
||||
@ -46,7 +44,8 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
# Building C extensions: libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev
|
||||
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
sed -i 's|http://archive.ubuntu.com|https://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||
sed -i 's|http://ports.ubuntu.com|http://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||
sed -i 's|http://archive.ubuntu.com|http://mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list; \
|
||||
fi; \
|
||||
rm -f /etc/apt/apt.conf.d/docker-clean && \
|
||||
echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache && \
|
||||
@ -59,7 +58,9 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
|
||||
apt install -y default-jdk && \
|
||||
apt install -y libatk-bridge2.0-0 && \
|
||||
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less
|
||||
apt install -y libjemalloc-dev && \
|
||||
apt install -y python3-pip pipx nginx unzip curl wget git vim less && \
|
||||
apt install -y ghostscript
|
||||
|
||||
RUN if [ "$NEED_MIRROR" == "1" ]; then \
|
||||
pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
|
||||
@ -190,6 +191,7 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
ENV PYTHONPATH=/ragflow/
|
||||
|
||||
COPY web web
|
||||
COPY admin admin
|
||||
COPY api api
|
||||
COPY conf conf
|
||||
COPY deepdoc deepdoc
|
||||
@ -198,9 +200,11 @@ COPY agent agent
|
||||
COPY graphrag graphrag
|
||||
COPY agentic_reasoning agentic_reasoning
|
||||
COPY pyproject.toml uv.lock ./
|
||||
COPY mcp mcp
|
||||
COPY plugin plugin
|
||||
|
||||
COPY docker/service_conf.yaml.template ./conf/service_conf.yaml.template
|
||||
COPY docker/entrypoint.sh docker/entrypoint-parser.sh ./
|
||||
COPY docker/entrypoint.sh ./
|
||||
RUN chmod +x ./entrypoint*.sh
|
||||
|
||||
# Copy compiled web pages
|
||||
|
||||
@ -33,6 +33,7 @@ ADD ./rag ./rag
|
||||
ADD ./requirements.txt ./requirements.txt
|
||||
ADD ./agent ./agent
|
||||
ADD ./graphrag ./graphrag
|
||||
ADD ./plugin ./plugin
|
||||
|
||||
RUN dnf install -y openmpi openmpi-devel python3-openmpi
|
||||
ENV C_INCLUDE_PATH /usr/include/openmpi-x86_64:$C_INCLUDE_PATH
|
||||
|
||||
125
README.md
125
README.md
@ -1,17 +1,17 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||
<img src="web/src/assets/logo-with-text.svg" width="520" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DBEDFA"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.21.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,16 +30,25 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<div align="center">
|
||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</div>
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 Table of Contents</b></summary>
|
||||
|
||||
@ -62,28 +71,27 @@
|
||||
|
||||
## 💡 What is RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) is an open-source RAG (Retrieval-Augmented Generation) engine based on deep document
|
||||
understanding. It offers a streamlined RAG workflow for businesses of any scale, combining LLM (Large Language Models)
|
||||
to provide truthful question-answering capabilities, backed by well-founded citations from various complex formatted
|
||||
data.
|
||||
[RAGFlow](https://ragflow.io/) is a leading open-source Retrieval-Augmented Generation (RAG) engine that fuses cutting-edge RAG with Agent capabilities to create a superior context layer for LLMs. It offers a streamlined RAG workflow adaptable to enterprises of any scale. Powered by a converged context engine and pre-built agent templates, RAGFlow enables developers to transform complex data into high-fidelity, production-ready AI systems with exceptional efficiency and precision.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
Try our demo at [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/chunking.gif" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/agentic-dark.gif" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 Latest Updates
|
||||
|
||||
- 2025-02-05 Updates the model list of 'SILICONFLOW' and adds support for Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-01-26 Optimizes knowledge graph extraction and application, offering various configuration options.
|
||||
- 2025-10-15 Supports orchestrable ingestion pipeline.
|
||||
- 2025-08-08 Supports OpenAI's latest GPT-5 series models.
|
||||
- 2025-08-01 Supports agentic workflow and MCP.
|
||||
- 2025-05-23 Adds a Python/JavaScript code executor component to Agent.
|
||||
- 2025-05-05 Supports cross-language query.
|
||||
- 2025-03-19 Supports using a multi-modal model to make sense of images within PDF or DOCX files.
|
||||
- 2025-02-28 Combined with Internet search (Tavily), supports reasoning like Deep Research for any LLMs.
|
||||
- 2024-12-18 Upgrades Document Layout Analysis model in DeepDoc.
|
||||
- 2024-12-04 Adds support for pagerank score in knowledge base.
|
||||
- 2024-11-22 Adds more variables to Agent.
|
||||
- 2024-11-01 Adds keyword extraction and related question generation to the parsed chunks to improve the accuracy of retrieval.
|
||||
- 2024-08-22 Support text to SQL statements through RAG.
|
||||
|
||||
## 🎉 Stay Tuned
|
||||
@ -127,7 +135,7 @@ releases! 🌟
|
||||
## 🔎 System Architecture
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 Get Started
|
||||
@ -138,8 +146,10 @@ releases! 🌟
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux),
|
||||
> see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Required only if you intend to use the code executor (sandbox) feature of RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> If you have not installed Docker on your local machine (Windows, Mac, or Linux), see [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Start up the server
|
||||
|
||||
@ -173,19 +183,27 @@ releases! 🌟
|
||||
|
||||
3. Start up the server using the pre-built Docker images:
|
||||
|
||||
> The command below downloads the `v0.17.0-slim` edition of the RAGFlow Docker image. Refer to the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.17.0-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` for the full edition `v0.17.0`.
|
||||
> [!CAUTION]
|
||||
> All Docker images are built for x86 platforms. We don't currently offer Docker images for ARM64.
|
||||
> If you are on an ARM64 platform, follow [this guide](https://ragflow.io/docs/dev/build_docker_image) to build a Docker image compatible with your system.
|
||||
|
||||
> The command below downloads the `v0.21.1-slim` edition of the RAGFlow Docker image. See the following table for descriptions of different RAGFlow editions. To download a RAGFlow edition different from `v0.21.1-slim`, update the `RAGFLOW_IMAGE` variable accordingly in **docker/.env** before using `docker compose` to start the server. For example: set `RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1` for the full edition `v0.21.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
|-------------------|-----------------|-----------------------|--------------------------|
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.21.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. Check the server status after having the server up and running:
|
||||
|
||||
@ -271,7 +289,7 @@ This image is approximately 2 GB in size and relies on external LLM and embeddin
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Build a Docker image including embedding models
|
||||
@ -281,15 +299,15 @@ This image is approximately 9 GB in size. As it includes embedding models, it re
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Launch service from source for development
|
||||
|
||||
1. Install uv, or skip this step if it is already installed:
|
||||
1. Install `uv` and `pre-commit`, or skip this step if they are already installed:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone the source code and install Python dependencies:
|
||||
@ -298,6 +316,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Launch the dependent services (MinIO, Elasticsearch, Redis, and MySQL) using Docker Compose:
|
||||
@ -309,7 +329,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
Add the following line to `/etc/hosts` to resolve all hosts specified in **docker/.env** to `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. If you cannot access HuggingFace, set the `HF_ENDPOINT` environment variable to use a mirror site:
|
||||
@ -318,7 +338,20 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Launch backend service:
|
||||
5. If your operating system does not have jemalloc, please install it as follows:
|
||||
|
||||
```bash
|
||||
# Ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# CentOS
|
||||
sudo yum install jemalloc
|
||||
# OpenSUSE
|
||||
sudo zypper install jemalloc
|
||||
# macOS
|
||||
sudo brew install jemalloc
|
||||
```
|
||||
|
||||
6. Launch backend service:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -326,12 +359,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Install frontend dependencies:
|
||||
7. Install frontend dependencies:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. Launch frontend service:
|
||||
|
||||
8. Launch frontend service:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -341,12 +376,22 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
9. Stop RAGFlow front-end and back-end service after development is complete:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
@ -354,11 +399,11 @@ See the [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214
|
||||
|
||||
## 🏄 Community
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Contributing
|
||||
|
||||
RAGFlow flourishes via open-source collaboration. In this spirit, we embrace diverse contributions from the community.
|
||||
If you would like to be a part, review our [Contribution Guidelines](./CONTRIBUTING.md) first.
|
||||
If you would like to be a part, review our [Contribution Guidelines](https://ragflow.io/docs/dev/contributing) first.
|
||||
|
||||
137
README_id.md
137
README_id.md
@ -1,17 +1,17 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="Logo ragflow">
|
||||
<img src="web/src/assets/logo-with-text.svg" width="520" alt="Logo ragflow">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體中文版自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DBEDFA"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Lencana Daring" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.21.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Rilis%20Terbaru" alt="Rilis Terbaru">
|
||||
@ -30,16 +30,21 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/Lisensi-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="Lisensi">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Dokumentasi</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Peta Jalan</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 Daftar Isi </b> </summary>
|
||||
|
||||
@ -62,25 +67,27 @@
|
||||
|
||||
## 💡 Apa Itu RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) adalah mesin RAG (Retrieval-Augmented Generation) open-source berbasis pemahaman dokumen yang mendalam. Platform ini menyediakan alur kerja RAG yang efisien untuk bisnis dengan berbagai skala, menggabungkan LLM (Large Language Models) untuk menyediakan kemampuan tanya-jawab yang benar dan didukung oleh referensi dari data terstruktur kompleks.
|
||||
[RAGFlow](https://ragflow.io/) adalah mesin RAG (Retrieval-Augmented Generation) open-source terkemuka yang mengintegrasikan teknologi RAG mutakhir dengan kemampuan Agent untuk menciptakan lapisan kontekstual superior bagi LLM. Menyediakan alur kerja RAG yang efisien dan dapat diadaptasi untuk perusahaan segala skala. Didukung oleh mesin konteks terkonvergensi dan template Agent yang telah dipra-bangun, RAGFlow memungkinkan pengembang mengubah data kompleks menjadi sistem AI kesetiaan-tinggi dan siap-produksi dengan efisiensi dan presisi yang luar biasa.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/chunking.gif" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/agentic-dark.gif" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 Pembaruan Terbaru
|
||||
|
||||
- 2025-02-05 Memperbarui daftar model 'SILICONFLOW' dan menambahkan dukungan untuk Deepseek-R1/DeepSeek-V3.
|
||||
- 2025-01-26 Optimalkan ekstraksi dan penerapan grafik pengetahuan dan sediakan berbagai opsi konfigurasi.
|
||||
- 2025-10-15 Dukungan untuk jalur data yang terorkestrasi.
|
||||
- 2025-08-08 Mendukung model seri GPT-5 terbaru dari OpenAI.
|
||||
- 2025-08-01 Mendukung alur kerja agen dan MCP.
|
||||
- 2025-05-23 Menambahkan komponen pelaksana kode Python/JS ke Agen.
|
||||
- 2025-05-05 Mendukung kueri lintas bahasa.
|
||||
- 2025-03-19 Mendukung penggunaan model multi-modal untuk memahami gambar di dalam file PDF atau DOCX.
|
||||
- 2025-02-28 dikombinasikan dengan pencarian Internet (TAVILY), mendukung penelitian mendalam untuk LLM apa pun.
|
||||
- 2024-12-18 Meningkatkan model Analisis Tata Letak Dokumen di DeepDoc.
|
||||
- 2024-12-04 Mendukung skor pagerank ke basis pengetahuan.
|
||||
- 2024-11-22 Peningkatan definisi dan penggunaan variabel di Agen.
|
||||
- 2024-11-01 Penambahan ekstraksi kata kunci dan pembuatan pertanyaan terkait untuk meningkatkan akurasi pengambilan.
|
||||
- 2024-08-22 Dukungan untuk teks ke pernyataan SQL melalui RAG.
|
||||
|
||||
## 🎉 Tetap Terkini
|
||||
@ -122,7 +129,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
## 🔎 Arsitektur Sistem
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 Mulai
|
||||
@ -133,6 +140,10 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Hanya diperlukan jika Anda ingin menggunakan fitur eksekutor kode (sandbox) dari RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> Jika Anda belum menginstal Docker di komputer lokal Anda (Windows, Mac, atau Linux), lihat [Install Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Menjalankan Server
|
||||
|
||||
@ -166,21 +177,29 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Bangun image Docker pre-built dan jalankan server:
|
||||
|
||||
> Perintah di bawah ini mengunduh edisi v0.17.0-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.17.0-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0 untuk edisi lengkap v0.17.0.
|
||||
> [!CAUTION]
|
||||
> Semua gambar Docker dibangun untuk platform x86. Saat ini, kami tidak menawarkan gambar Docker untuk ARM64.
|
||||
> Jika Anda menggunakan platform ARM64, [silakan gunakan panduan ini untuk membangun gambar Docker yang kompatibel dengan sistem Anda](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
> Perintah di bawah ini mengunduh edisi v0.21.1-slim dari gambar Docker RAGFlow. Silakan merujuk ke tabel berikut untuk deskripsi berbagai edisi RAGFlow. Untuk mengunduh edisi RAGFlow yang berbeda dari v0.21.1-slim, perbarui variabel RAGFLOW_IMAGE di docker/.env sebelum menggunakan docker compose untuk memulai server. Misalnya, atur RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1 untuk edisi lengkap v0.21.1.
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
4. Periksa status server setelah server aktif dan berjalan:
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.21.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
1. Periksa status server setelah server aktif dan berjalan:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -202,10 +221,10 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
> Jika Anda melewatkan langkah ini dan langsung login ke RAGFlow, browser Anda mungkin menampilkan error `network anormal`
|
||||
> karena RAGFlow mungkin belum sepenuhnya siap.
|
||||
|
||||
5. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
2. Buka browser web Anda, masukkan alamat IP server Anda, dan login ke RAGFlow.
|
||||
> Dengan pengaturan default, Anda hanya perlu memasukkan `http://IP_DEVICE_ANDA` (**tanpa** nomor port) karena
|
||||
> port HTTP default `80` bisa dihilangkan saat menggunakan konfigurasi default.
|
||||
6. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
3. Dalam [service_conf.yaml.template](./docker/service_conf.yaml.template), pilih LLM factory yang diinginkan di `user_default_llm` dan perbarui
|
||||
bidang `API_KEY` dengan kunci API yang sesuai.
|
||||
|
||||
> Lihat [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) untuk informasi lebih lanjut.
|
||||
@ -237,7 +256,7 @@ Image ini berukuran sekitar 2 GB dan bergantung pada aplikasi LLM eksternal dan
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Membangun Docker Image Termasuk Model Embedding
|
||||
@ -247,15 +266,15 @@ Image ini berukuran sekitar 9 GB. Karena sudah termasuk model embedding, ia hany
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Menjalankan Aplikasi dari untuk Pengembangan
|
||||
|
||||
1. Instal uv, atau lewati langkah ini jika sudah terinstal:
|
||||
1. Instal `uv` dan `pre-commit`, atau lewati langkah ini jika sudah terinstal:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone kode sumber dan instal dependensi Python:
|
||||
@ -264,6 +283,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Jalankan aplikasi yang diperlukan (MinIO, Elasticsearch, Redis, dan MySQL) menggunakan Docker Compose:
|
||||
@ -275,7 +296,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
Tambahkan baris berikut ke `/etc/hosts` untuk memetakan semua host yang ditentukan di **conf/service_conf.yaml** ke `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. Jika Anda tidak dapat mengakses HuggingFace, atur variabel lingkungan `HF_ENDPOINT` untuk menggunakan situs mirror:
|
||||
@ -284,7 +305,18 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Jalankan aplikasi backend:
|
||||
5. Jika sistem operasi Anda tidak memiliki jemalloc, instal sebagai berikut:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
# mac
|
||||
sudo brew install jemalloc
|
||||
```
|
||||
|
||||
6. Jalankan aplikasi backend:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -292,12 +324,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instal dependensi frontend:
|
||||
7. Instal dependensi frontend:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. Jalankan aplikasi frontend:
|
||||
|
||||
8. Jalankan aplikasi frontend:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -307,12 +341,23 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
|
||||
9. Hentikan layanan front-end dan back-end RAGFlow setelah pengembangan selesai:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Dokumentasi
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [Panduan Pengguna](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Referensi](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
@ -320,11 +365,11 @@ Lihat [Roadmap RAGFlow 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Komunitas
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Kontribusi
|
||||
|
||||
RAGFlow berkembang melalui kolaborasi open-source. Dalam semangat ini, kami menerima kontribusi dari komunitas.
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](./CONTRIBUTING.md).
|
||||
Jika Anda ingin berpartisipasi, tinjau terlebih dahulu [Panduan Kontribusi](https://ragflow.io/docs/dev/contributing).
|
||||
|
||||
121
README_ja.md
121
README_ja.md
@ -1,17 +1,17 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||
<img src="web/src/assets/logo-with-text.svg" width="350" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體中文版自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DBEDFA"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.21.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,37 +30,44 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
## 💡 RAGFlow とは?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) は、深い文書理解に基づいたオープンソースの RAG (Retrieval-Augmented Generation) エンジンである。LLM(大規模言語モデル)を組み合わせることで、様々な複雑なフォーマットのデータから根拠のある引用に裏打ちされた、信頼できる質問応答機能を実現し、あらゆる規模のビジネスに適した RAG ワークフローを提供します。
|
||||
[RAGFlow](https://ragflow.io/) は、先進的なRAG(Retrieval-Augmented Generation)技術と Agent 機能を融合し、大規模言語モデル(LLM)に優れたコンテキスト層を構築する最先端のオープンソース RAG エンジンです。あらゆる規模の企業に対応可能な合理化された RAG ワークフローを提供し、統合型コンテキストエンジンと事前構築されたAgentテンプレートにより、開発者が複雑なデータを驚異的な効率性と精度で高精細なプロダクションレディAIシステムへ変換することを可能にします。
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
デモをお試しください:[https://demo.ragflow.io](https://demo.ragflow.io)。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/chunking.gif" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/agentic-dark.gif" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 最新情報
|
||||
|
||||
- 2025-02-05 シリコン フローの St およびモデル リストを更新し、Deep Seek-R1/Deep Seek-V3 のサポートを追加しました。
|
||||
- 2025-01-26 ナレッジ グラフの抽出と適用を最適化し、さまざまな構成オプションを提供します。
|
||||
- 2025-10-15 オーケストレーションされたデータパイプラインのサポート。
|
||||
- 2025-08-08 OpenAI の最新 GPT-5 シリーズモデルをサポートします。
|
||||
- 2025-08-01 エージェントワークフローとMCPをサポート。
|
||||
- 2025-05-23 エージェントに Python/JS コードエグゼキュータコンポーネントを追加しました。
|
||||
- 2025-05-05 言語間クエリをサポートしました。
|
||||
- 2025-03-19 PDFまたはDOCXファイル内の画像を理解するために、多モーダルモデルを使用することをサポートします。
|
||||
- 2025-02-28 インターネット検索 (TAVILY) と組み合わせて、あらゆる LLM の詳細な調査をサポートします。
|
||||
- 2024-12-18 DeepDoc のドキュメント レイアウト分析モデルをアップグレードします。
|
||||
- 2024-12-04 ナレッジ ベースへのページランク スコアをサポートしました。
|
||||
- 2024-11-22 エージェントでの変数の定義と使用法を改善しました。
|
||||
- 2024-11-01 再現の精度を向上させるために、解析されたチャンクにキーワード抽出と関連質問の生成を追加しました。
|
||||
- 2024-08-22 RAG を介して SQL ステートメントへのテキストをサポートします。
|
||||
|
||||
## 🎉 続きを楽しみに
|
||||
@ -102,7 +109,7 @@
|
||||
## 🔎 システム構成
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 初期設定
|
||||
@ -113,7 +120,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): RAGFlowのコード実行(サンドボックス)機能を利用する場合のみ必要です。
|
||||
|
||||
> [!TIP]
|
||||
> ローカルマシン(Windows、Mac、または Linux)に Docker をインストールしていない場合は、[Docker Engine のインストール](https://docs.docker.com/engine/install/) を参照してください。
|
||||
|
||||
### 🚀 サーバーを起動
|
||||
|
||||
@ -146,21 +156,29 @@
|
||||
|
||||
3. ビルド済みの Docker イメージをビルドし、サーバーを起動する:
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.17.0-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.17.0-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.17.0 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0 と設定します。
|
||||
> [!CAUTION]
|
||||
> 現在、公式に提供されているすべての Docker イメージは x86 アーキテクチャ向けにビルドされており、ARM64 用の Docker イメージは提供されていません。
|
||||
> ARM64 アーキテクチャのオペレーティングシステムを使用している場合は、[このドキュメント](https://ragflow.io/docs/dev/build_docker_image)を参照して Docker イメージを自分でビルドしてください。
|
||||
|
||||
> 以下のコマンドは、RAGFlow Docker イメージの v0.21.1-slim エディションをダウンロードします。異なる RAGFlow エディションの説明については、以下の表を参照してください。v0.21.1-slim とは異なるエディションをダウンロードするには、docker/.env ファイルの RAGFLOW_IMAGE 変数を適宜更新し、docker compose を使用してサーバーを起動してください。例えば、完全版 v0.21.1 をダウンロードするには、RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1 と設定します。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.21.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
1. サーバーを立ち上げた後、サーバーの状態を確認する:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -180,9 +198,9 @@
|
||||
|
||||
> もし確認ステップをスキップして直接 RAGFlow にログインした場合、その時点で RAGFlow が完全に初期化されていない可能性があるため、ブラウザーがネットワーク異常エラーを表示するかもしれません。
|
||||
|
||||
5. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
2. ウェブブラウザで、プロンプトに従ってサーバーの IP アドレスを入力し、RAGFlow にログインします。
|
||||
> デフォルトの設定を使用する場合、デフォルトの HTTP サービングポート `80` は省略できるので、与えられたシナリオでは、`http://IP_OF_YOUR_MACHINE`(ポート番号は省略)だけを入力すればよい。
|
||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) で、`user_default_llm` で希望の LLM ファクトリを選択し、`API_KEY` フィールドを対応する API キーで更新する。
|
||||
|
||||
> 詳しくは [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup) を参照してください。
|
||||
|
||||
@ -223,7 +241,7 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
||||
```bash
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
> [!WARNING]
|
||||
> [!WARNING]
|
||||
> Linux/arm64 マシンでの Infinity への切り替えは正式にサポートされていません。
|
||||
|
||||
## 🔧 ソースコードで Docker イメージを作成(埋め込みモデルなし)
|
||||
@ -233,7 +251,7 @@ RAGFlow はデフォルトで Elasticsearch を使用して全文とベクトル
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 ソースコードをコンパイルした Docker イメージ(埋め込みモデルを含む)
|
||||
@ -243,15 +261,15 @@ docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-s
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 ソースコードからサービスを起動する方法
|
||||
|
||||
1. uv をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
1. `uv` と `pre-commit` をインストールする。すでにインストールされている場合は、このステップをスキップしてください:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. ソースコードをクローンし、Python の依存関係をインストールする:
|
||||
@ -260,6 +278,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Docker Compose を使用して依存サービス(MinIO、Elasticsearch、Redis、MySQL)を起動する:
|
||||
@ -271,7 +291,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
`/etc/hosts` に以下の行を追加して、**conf/service_conf.yaml** に指定されたすべてのホストを `127.0.0.1` に解決します:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. HuggingFace にアクセスできない場合は、`HF_ENDPOINT` 環境変数を設定してミラーサイトを使用してください:
|
||||
@ -280,7 +300,18 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. バックエンドサービスを起動する:
|
||||
5. オペレーティングシステムにjemallocがない場合は、次のようにインストールします:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
# mac
|
||||
sudo brew install jemalloc
|
||||
```
|
||||
|
||||
6. バックエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -288,12 +319,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. フロントエンドの依存関係をインストールする:
|
||||
7. フロントエンドの依存関係をインストールする:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. フロントエンドサービスを起動する:
|
||||
|
||||
8. フロントエンドサービスを起動する:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -303,12 +336,22 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
9. 開発が完了したら、RAGFlow のフロントエンド サービスとバックエンド サービスを停止します:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 ドキュメンテーション
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 ロードマップ
|
||||
|
||||
@ -316,10 +359,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 🏄 コミュニティ
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 コントリビュート
|
||||
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](./CONTRIBUTING.md)をご覧ください。
|
||||
RAGFlow はオープンソースのコラボレーションによって発展してきました。この精神に基づき、私たちはコミュニティからの多様なコントリビュートを受け入れています。 参加を希望される方は、まず [コントリビューションガイド](https://ragflow.io/docs/dev/contributing)をご覧ください。
|
||||
|
||||
121
README_ko.md
121
README_ko.md
@ -1,17 +1,17 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||
<img src="web/src/assets/logo-with-text.svg" width="520" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DBEDFA"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.21.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,38 +30,44 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
## 💡 RAGFlow란?
|
||||
|
||||
[RAGFlow](https://ragflow.io/)는 심층 문서 이해에 기반한 오픈소스 RAG (Retrieval-Augmented Generation) 엔진입니다. 이 엔진은 대규모 언어 모델(LLM)과 결합하여 정확한 질문 응답 기능을 제공하며, 다양한 복잡한 형식의 데이터에서 신뢰할 수 있는 출처를 바탕으로 한 인용을 통해 이를 뒷받침합니다. RAGFlow는 규모에 상관없이 모든 기업에 최적화된 RAG 워크플로우를 제공합니다.
|
||||
[RAGFlow](https://ragflow.io/) 는 최첨단 RAG(Retrieval-Augmented Generation)와 Agent 기능을 융합하여 대규모 언어 모델(LLM)을 위한 우수한 컨텍스트 계층을 생성하는 선도적인 오픈소스 RAG 엔진입니다. 모든 규모의 기업에 적용 가능한 효율적인 RAG 워크플로를 제공하며, 통합 컨텍스트 엔진과 사전 구축된 Agent 템플릿을 통해 개발자들이 복잡한 데이터를 예외적인 효율성과 정밀도로 고급 구현도의 프로덕션 준비 완료 AI 시스템으로 변환할 수 있도록 지원합니다.
|
||||
|
||||
## 🎮 데모
|
||||
|
||||
데모를 [https://demo.ragflow.io](https://demo.ragflow.io)에서 실행해 보세요.
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/chunking.gif" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/agentic-dark.gif" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 업데이트
|
||||
|
||||
- 2025-02-05 'SILICONFLOW' 모델 목록을 업데이트하고 Deepseek-R1/DeepSeek-V3에 대한 지원을 추가합니다.
|
||||
- 2025-01-26 지식 그래프 추출 및 적용을 최적화하고 다양한 구성 옵션을 제공합니다.
|
||||
- 2025-10-15 조정된 데이터 파이프라인 지원.
|
||||
- 2025-08-08 OpenAI의 최신 GPT-5 시리즈 모델을 지원합니다.
|
||||
- 2025-08-01 에이전트 워크플로우와 MCP를 지원합니다.
|
||||
- 2025-05-23 Agent에 Python/JS 코드 실행기 구성 요소를 추가합니다.
|
||||
- 2025-05-05 언어 간 쿼리를 지원합니다.
|
||||
- 2025-03-19 PDF 또는 DOCX 파일 내의 이미지를 이해하기 위해 다중 모드 모델을 사용하는 것을 지원합니다.
|
||||
- 2025-02-28 인터넷 검색(TAVILY)과 결합되어 모든 LLM에 대한 심층 연구를 지원합니다.
|
||||
- 2024-12-18 DeepDoc의 문서 레이아웃 분석 모델 업그레이드.
|
||||
- 2024-12-04 지식베이스에 대한 페이지랭크 점수를 지원합니다.
|
||||
|
||||
- 2024-11-22 에이전트의 변수 정의 및 사용을 개선했습니다.
|
||||
- 2024-11-01 파싱된 청크에 키워드 추출 및 관련 질문 생성을 추가하여 재현율을 향상시킵니다.
|
||||
- 2024-08-22 RAG를 통해 SQL 문에 텍스트를 지원합니다.
|
||||
|
||||
## 🎉 계속 지켜봐 주세요
|
||||
@ -103,7 +109,7 @@
|
||||
## 🔎 시스템 아키텍처
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 시작하기
|
||||
@ -114,7 +120,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): RAGFlow의 코드 실행기(샌드박스) 기능을 사용하려는 경우에만 필요합니다.
|
||||
|
||||
> [!TIP]
|
||||
> 로컬 머신(Windows, Mac, Linux)에 Docker가 설치되지 않은 경우, [Docker 엔진 설치](<(https://docs.docker.com/engine/install/)>)를 참조하세요.
|
||||
|
||||
### 🚀 서버 시작하기
|
||||
|
||||
@ -147,21 +156,29 @@
|
||||
|
||||
3. 미리 빌드된 Docker 이미지를 생성하고 서버를 시작하세요:
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.17.0-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.17.0-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.17.0을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0로 설정합니다.
|
||||
> [!CAUTION]
|
||||
> 모든 Docker 이미지는 x86 플랫폼을 위해 빌드되었습니다. 우리는 현재 ARM64 플랫폼을 위한 Docker 이미지를 제공하지 않습니다.
|
||||
> ARM64 플랫폼을 사용 중이라면, [시스템과 호환되는 Docker 이미지를 빌드하려면 이 가이드를 사용해 주세요](https://ragflow.io/docs/dev/build_docker_image).
|
||||
|
||||
> 아래 명령어는 RAGFlow Docker 이미지의 v0.21.1-slim 버전을 다운로드합니다. 다양한 RAGFlow 버전에 대한 설명은 다음 표를 참조하십시오. v0.21.1-slim과 다른 RAGFlow 버전을 다운로드하려면, docker/.env 파일에서 RAGFLOW_IMAGE 변수를 적절히 업데이트한 후 docker compose를 사용하여 서버를 시작하십시오. 예를 들어, 전체 버전인 v0.21.1을 다운로드하려면 RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1로 설정합니다.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.21.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
4. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
1. 서버가 시작된 후 서버 상태를 확인하세요:
|
||||
|
||||
```bash
|
||||
$ docker logs -f ragflow-server
|
||||
@ -181,9 +198,9 @@
|
||||
|
||||
> 만약 확인 단계를 건너뛰고 바로 RAGFlow에 로그인하면, RAGFlow가 완전히 초기화되지 않았기 때문에 브라우저에서 `network anormal` 오류가 발생할 수 있습니다.
|
||||
|
||||
5. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
2. 웹 브라우저에 서버의 IP 주소를 입력하고 RAGFlow에 로그인하세요.
|
||||
> 기본 설정을 사용할 경우, `http://IP_OF_YOUR_MACHINE`만 입력하면 됩니다 (포트 번호는 제외). 기본 HTTP 서비스 포트 `80`은 기본 구성으로 사용할 때 생략할 수 있습니다.
|
||||
6. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
3. [service_conf.yaml.template](./docker/service_conf.yaml.template) 파일에서 원하는 LLM 팩토리를 `user_default_llm`에 선택하고, `API_KEY` 필드를 해당 API 키로 업데이트하세요.
|
||||
|
||||
> 자세한 내용은 [llm_api_key_setup](https://ragflow.io/docs/dev/llm_api_key_setup)를 참조하세요.
|
||||
|
||||
@ -233,7 +250,7 @@ RAGFlow 는 기본적으로 Elasticsearch 를 사용하여 전체 텍스트 및
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 소스 코드로 Docker 이미지를 컴파일합니다(임베딩 모델 포함)
|
||||
@ -243,15 +260,15 @@ docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-s
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 소스 코드로 서비스를 시작합니다.
|
||||
|
||||
1. uv를 설치하거나 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
1. `uv` 와 `pre-commit` 을 설치하거나, 이미 설치된 경우 이 단계를 건너뜁니다:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. 소스 코드를 클론하고 Python 의존성을 설치합니다:
|
||||
@ -260,6 +277,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Docker Compose를 사용하여 의존 서비스(MinIO, Elasticsearch, Redis 및 MySQL)를 시작합니다:
|
||||
@ -271,7 +290,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
`/etc/hosts` 에 다음 줄을 추가하여 **conf/service_conf.yaml** 에 지정된 모든 호스트를 `127.0.0.1` 로 해결합니다:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. HuggingFace에 접근할 수 없는 경우, `HF_ENDPOINT` 환경 변수를 설정하여 미러 사이트를 사용하세요:
|
||||
@ -280,7 +299,18 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 백엔드 서비스를 시작합니다:
|
||||
5. 만약 운영 체제에 jemalloc이 없으면 다음 방식으로 설치하세요:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
# mac
|
||||
sudo brew install jemalloc
|
||||
```
|
||||
|
||||
6. 백엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -288,12 +318,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 프론트엔드 의존성을 설치합니다:
|
||||
7. 프론트엔드 의존성을 설치합니다:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
8. 프론트엔드 서비스를 시작합니다:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -303,12 +335,23 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
|
||||
9. 개발이 완료된 후 RAGFlow 프론트엔드 및 백엔드 서비스를 중지합니다.
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 문서
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 로드맵
|
||||
|
||||
@ -316,10 +359,10 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||
## 🏄 커뮤니티
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 컨트리뷰션
|
||||
|
||||
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](./CONTRIBUTING.md)을 검토해 주세요.
|
||||
RAGFlow는 오픈소스 협업을 통해 발전합니다. 이러한 정신을 바탕으로, 우리는 커뮤니티의 다양한 기여를 환영합니다. 참여하고 싶으시다면, 먼저 [가이드라인](https://ragflow.io/docs/dev/contributing)을 검토해 주세요.
|
||||
|
||||
115
README_pt_br.md
115
README_pt_br.md
@ -1,17 +1,17 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="520" alt="ragflow logo">
|
||||
<img src="web/src/assets/logo-with-text.svg" width="520" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DBEDFA"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Badge Estático" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.21.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Última%20Relese" alt="Última Versão">
|
||||
@ -30,16 +30,21 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="licença">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Documentação</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 Índice</b></summary>
|
||||
|
||||
@ -62,25 +67,27 @@
|
||||
|
||||
## 💡 O que é o RAGFlow?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) é um mecanismo RAG (Geração Aumentada por Recuperação) de código aberto baseado em entendimento profundo de documentos. Ele oferece um fluxo de trabalho RAG simplificado para empresas de qualquer porte, combinando LLMs (Modelos de Linguagem de Grande Escala) para fornecer capacidades de perguntas e respostas verídicas, respaldadas por citações bem fundamentadas de diversos dados complexos formatados.
|
||||
[RAGFlow](https://ragflow.io/) é um mecanismo de RAG (Retrieval-Augmented Generation) open-source líder que fusiona tecnologias RAG de ponta com funcionalidades Agent para criar uma camada contextual superior para LLMs. Oferece um fluxo de trabalho RAG otimizado adaptável a empresas de qualquer escala. Alimentado por um motor de contexto convergente e modelos Agent pré-construídos, o RAGFlow permite que desenvolvedores transformem dados complexos em sistemas de IA de alta fidelidade e pronto para produção com excepcional eficiência e precisão.
|
||||
|
||||
## 🎮 Demo
|
||||
|
||||
Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/chunking.gif" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/agentic-dark.gif" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 Últimas Atualizações
|
||||
|
||||
- 05-02-2025 Atualiza a lista de modelos de 'SILICONFLOW' e adiciona suporte para Deepseek-R1/DeepSeek-V3.
|
||||
- 26-01-2025 Otimize a extração e aplicação de gráficos de conhecimento e forneça uma variedade de opções de configuração.
|
||||
- 10-15-2025 Suporte para pipelines de dados orquestrados.
|
||||
- 08-08-2025 Suporta a mais recente série GPT-5 da OpenAI.
|
||||
- 01-08-2025 Suporta fluxo de trabalho agente e MCP.
|
||||
- 23-05-2025 Adicione o componente executor de código Python/JS ao Agente.
|
||||
- 05-05-2025 Suporte a consultas entre idiomas.
|
||||
- 19-03-2025 Suporta o uso de um modelo multi-modal para entender imagens dentro de arquivos PDF ou DOCX.
|
||||
- 28-02-2025 combinado com a pesquisa na Internet (T AVI LY), suporta pesquisas profundas para qualquer LLM.
|
||||
- 18-12-2024 Atualiza o modelo de Análise de Layout de Documentos no DeepDoc.
|
||||
- 04-12-2024 Adiciona suporte para pontuação de pagerank na base de conhecimento.
|
||||
- 22-11-2024 Adiciona mais variáveis para o Agente.
|
||||
- 01-11-2024 Adiciona extração de palavras-chave e geração de perguntas relacionadas aos blocos analisados para melhorar a precisão da recuperação.
|
||||
- 22-08-2024 Suporta conversão de texto para comandos SQL via RAG.
|
||||
|
||||
## 🎉 Fique Ligado
|
||||
@ -122,7 +129,7 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
## 🔎 Arquitetura do Sistema
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 Primeiros Passos
|
||||
@ -133,7 +140,10 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
- RAM >= 16 GB
|
||||
- Disco >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): Necessário apenas se você pretende usar o recurso de executor de código (sandbox) do RAGFlow.
|
||||
|
||||
> [!TIP]
|
||||
> Se você não instalou o Docker na sua máquina local (Windows, Mac ou Linux), veja [Instalar Docker Engine](https://docs.docker.com/engine/install/).
|
||||
|
||||
### 🚀 Iniciar o servidor
|
||||
|
||||
@ -166,17 +176,25 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
|
||||
|
||||
3. Inicie o servidor usando as imagens Docker pré-compiladas:
|
||||
|
||||
> O comando abaixo baixa a edição `v0.17.0-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.17.0-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` para a edição completa `v0.17.0`.
|
||||
> [!CAUTION]
|
||||
> Todas as imagens Docker são construídas para plataformas x86. Atualmente, não oferecemos imagens Docker para ARM64.
|
||||
> Se você estiver usando uma plataforma ARM64, por favor, utilize [este guia](https://ragflow.io/docs/dev/build_docker_image) para construir uma imagem Docker compatível com o seu sistema.
|
||||
|
||||
> O comando abaixo baixa a edição `v0.21.1-slim` da imagem Docker do RAGFlow. Consulte a tabela a seguir para descrições de diferentes edições do RAGFlow. Para baixar uma edição do RAGFlow diferente da `v0.21.1-slim`, atualize a variável `RAGFLOW_IMAGE` conforme necessário no **docker/.env** antes de usar `docker compose` para iniciar o servidor. Por exemplo: defina `RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1` para a edição completa `v0.21.1`.
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| Tag da imagem RAGFlow | Tamanho da imagem (GB) | Possui modelos de incorporação? | Estável? |
|
||||
| --------------------- | ---------------------- | ------------------------------- | ------------------------ |
|
||||
| v0.17.0 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.17.0-slim | ~2 | ❌ | Lançamento estável |
|
||||
| v0.21.1 | ~9 | :heavy_check_mark: | Lançamento estável |
|
||||
| v0.21.1-slim | ~2 | ❌ | Lançamento estável |
|
||||
| nightly | ~9 | :heavy_check_mark: | _Instável_ build noturno |
|
||||
| nightly-slim | ~2 | ❌ | _Instável_ build noturno |
|
||||
|
||||
@ -256,7 +274,7 @@ Esta imagem tem cerca de 2 GB de tamanho e depende de serviços externos de LLM
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 Criar uma imagem Docker incluindo modelos de incorporação
|
||||
@ -266,15 +284,15 @@ Esta imagem tem cerca de 9 GB de tamanho. Como inclui modelos de incorporação,
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 Lançar o serviço a partir do código-fonte para desenvolvimento
|
||||
|
||||
1. Instale o `uv`, ou pule esta etapa se ele já estiver instalado:
|
||||
1. Instale o `uv` e o `pre-commit`, ou pule esta etapa se eles já estiverem instalados:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
```
|
||||
|
||||
2. Clone o código-fonte e instale as dependências Python:
|
||||
@ -283,6 +301,8 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # instala os módulos Python dependentes do RAGFlow
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. Inicie os serviços dependentes (MinIO, Elasticsearch, Redis e MySQL) usando Docker Compose:
|
||||
@ -294,7 +314,7 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
Adicione a seguinte linha ao arquivo `/etc/hosts` para resolver todos os hosts especificados em **docker/.env** para `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. Se não conseguir acessar o HuggingFace, defina a variável de ambiente `HF_ENDPOINT` para usar um site espelho:
|
||||
@ -303,7 +323,18 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. Lance o serviço de back-end:
|
||||
5. Se o seu sistema operacional não tiver jemalloc, instale-o da seguinte maneira:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum instalar jemalloc
|
||||
# mac
|
||||
sudo brew install jemalloc
|
||||
```
|
||||
|
||||
6. Lance o serviço de back-end:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -311,14 +342,14 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. Instale as dependências do front-end:
|
||||
7. Instale as dependências do front-end:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
7. Lance o serviço de front-end:
|
||||
8. Lance o serviço de front-end:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -328,12 +359,22 @@ docker build -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
|
||||

|
||||
|
||||
9. Pare os serviços de front-end e back-end do RAGFlow após a conclusão do desenvolvimento:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 Documentação
|
||||
|
||||
- [Início rápido](https://ragflow.io/docs/dev/)
|
||||
- [Guia do usuário](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Referências](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 Roadmap
|
||||
|
||||
@ -341,11 +382,11 @@ Veja o [RAGFlow Roadmap 2025](https://github.com/infiniflow/ragflow/issues/4214)
|
||||
|
||||
## 🏄 Comunidade
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/NjYzJD3GM3)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 Contribuindo
|
||||
|
||||
O RAGFlow prospera por meio da colaboração de código aberto. Com esse espírito, abraçamos contribuições diversas da comunidade.
|
||||
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](./CONTRIBUTING.md).
|
||||
Se você deseja fazer parte, primeiro revise nossas [Diretrizes de Contribuição](https://ragflow.io/docs/dev/contributing).
|
||||
|
||||
156
README_tzh.md
156
README_tzh.md
@ -1,16 +1,17 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||
<img src="web/src/assets/logo-with-text.svg" width="350" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DFE0E5"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DBEDFA"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -21,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.21.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -29,37 +30,67 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<div align="center">
|
||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</div>
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 目錄</b></summary>
|
||||
|
||||
- 💡 [RAGFlow 是什麼?](#-RAGFlow-是什麼)
|
||||
- 🎮 [Demo-試用](#-demo-試用)
|
||||
- 📌 [近期更新](#-近期更新)
|
||||
- 🌟 [主要功能](#-主要功能)
|
||||
- 🔎 [系統架構](#-系統架構)
|
||||
- 🎬 [快速開始](#-快速開始)
|
||||
- 🔧 [系統配置](#-系統配置)
|
||||
- 🔨 [以原始碼啟動服務](#-以原始碼啟動服務)
|
||||
- 📚 [技術文檔](#-技術文檔)
|
||||
- 📜 [路線圖](#-路線圖)
|
||||
- 🏄 [貢獻指南](#-貢獻指南)
|
||||
- 🙌 [加入社區](#-加入社區)
|
||||
- 🤝 [商務合作](#-商務合作)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 RAGFlow 是什麼?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款基於深度文件理解所建構的開源 RAG(Retrieval-Augmented Generation)引擎。 RAGFlow 可以為各種規模的企業及個人提供一套精簡的 RAG 工作流程,結合大語言模型(LLM)針對用戶各類不同的複雜格式數據提供可靠的問答以及有理有據的引用。
|
||||
[RAGFlow](https://ragflow.io/) 是一款領先的開源 RAG(Retrieval-Augmented Generation)引擎,通過融合前沿的 RAG 技術與 Agent 能力,為大型語言模型提供卓越的上下文層。它提供可適配任意規模企業的端到端 RAG 工作流,憑藉融合式上下文引擎與預置的 Agent 模板,助力開發者以極致效率與精度將複雜數據轉化為高可信、生產級的人工智能系統。
|
||||
|
||||
## 🎮 Demo 試用
|
||||
|
||||
請登入網址 [https://demo.ragflow.io](https://demo.ragflow.io) 試用 demo。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/chunking.gif" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/agentic-dark.gif" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-02-05 更新「SILICONFLOW」的型號清單並新增 Deepseek-R1/DeepSeek-V3 的支援。
|
||||
- 2025-01-26 最佳化知識圖譜的擷取與應用,提供了多種配置選擇。
|
||||
- 2025-10-15 支援可編排的資料管道。
|
||||
- 2025-08-08 支援 OpenAI 最新的 GPT-5 系列模型。
|
||||
- 2025-08-01 支援 agentic workflow 和 MCP
|
||||
- 2025-05-23 為 Agent 新增 Python/JS 程式碼執行器元件。
|
||||
- 2025-05-05 支援跨語言查詢。
|
||||
- 2025-03-19 PDF和DOCX中的圖支持用多模態大模型去解析得到描述.
|
||||
- 2025-02-28 結合網路搜尋(Tavily),對於任意大模型實現類似 Deep Research 的推理功能.
|
||||
- 2024-12-18 升級了 DeepDoc 的文檔佈局分析模型。
|
||||
- 2024-12-04 支援知識庫的 Pagerank 分數。
|
||||
- 2024-11-22 完善了 Agent 中的變數定義和使用。
|
||||
- 2024-11-01 對解析後的 chunk 加入關鍵字抽取和相關問題產生以提高回想的準確度。
|
||||
- 2024-08-22 支援用 RAG 技術實現從自然語言到 SQL 語句的轉換。
|
||||
|
||||
## 🎉 關注項目
|
||||
@ -101,7 +132,7 @@
|
||||
## 🔎 系統架構
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 快速開始
|
||||
@ -112,7 +143,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): 僅在您打算使用 RAGFlow 的代碼執行器(沙箱)功能時才需要安裝。
|
||||
|
||||
> [!TIP]
|
||||
> 如果你並沒有在本機安裝 Docker(Windows、Mac,或 Linux), 可以參考文件 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安裝。
|
||||
|
||||
### 🚀 啟動伺服器
|
||||
|
||||
@ -145,17 +179,25 @@
|
||||
|
||||
3. 進入 **docker** 資料夾,利用事先編譯好的 Docker 映像啟動伺服器:
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.17.0-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.17.0-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` 來下載 RAGFlow 鏡像的 `v0.17.0` 完整發行版。
|
||||
> [!CAUTION]
|
||||
> 所有 Docker 映像檔都是為 x86 平台建置的。目前,我們不提供 ARM64 平台的 Docker 映像檔。
|
||||
> 如果您使用的是 ARM64 平台,請使用 [這份指南](https://ragflow.io/docs/dev/build_docker_image) 來建置適合您系統的 Docker 映像檔。
|
||||
|
||||
> 執行以下指令會自動下載 RAGFlow slim Docker 映像 `v0.21.1-slim`。請參考下表查看不同 Docker 發行版的說明。如需下載不同於 `v0.21.1-slim` 的 Docker 映像,請在執行 `docker compose` 啟動服務之前先更新 **docker/.env** 檔案內的 `RAGFLOW_IMAGE` 變數。例如,你可以透過設定 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1` 來下載 RAGFlow 鏡像的 `v0.21.1` 完整發行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.21.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -244,7 +286,7 @@ RAGFlow 預設使用 Elasticsearch 儲存文字和向量資料. 如果要切換
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 原始碼編譯 Docker 映像(包含 embedding 模型)
|
||||
@ -254,15 +296,15 @@ docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t in
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 以原始碼啟動服務
|
||||
|
||||
1. 安裝 uv。如已安裝,可跳過此步驟:
|
||||
1. 安裝 `uv` 和 `pre-commit`。如已安裝,可跳過此步驟:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
@ -272,6 +314,8 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. 透過 Docker Compose 啟動依賴的服務(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
@ -283,7 +327,7 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
在 `/etc/hosts` 中加入以下程式碼,將 **conf/service_conf.yaml** 檔案中的所有 host 位址都解析為 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. 如果無法存取 HuggingFace,可以把環境變數 `HF_ENDPOINT` 設為對應的鏡像網站:
|
||||
@ -292,24 +336,36 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5.啟動後端服務:
|
||||
『`bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||
|
||||
```
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
# mac
|
||||
sudo brew install jemalloc
|
||||
```
|
||||
|
||||
6. 安裝前端依賴:
|
||||
『`bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
6. 啟動後端服務:
|
||||
|
||||
7. 啟動前端服務:
|
||||
『`bash
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
export PYTHONPATH=$(pwd)
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
7. 安裝前端依賴:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
|
||||
8. 啟動前端服務:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
|
||||
```
|
||||
|
||||
以下界面說明系統已成功啟動:_
|
||||
@ -317,12 +373,22 @@ npm install
|
||||

|
||||
```
|
||||
|
||||
9. 開發完成後停止 RAGFlow 前端和後端服務:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 技術文檔
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 路線圖
|
||||
|
||||
@ -330,13 +396,13 @@ npm install
|
||||
|
||||
## 🏄 開源社群
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/zd4qPW6t)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 貢獻指南
|
||||
|
||||
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](./CONTRIBUTING.md) 。
|
||||
RAGFlow 只有透過開源協作才能蓬勃發展。秉持這項精神,我們歡迎來自社區的各種貢獻。如果您有意參與其中,請查閱我們的 [貢獻者指南](https://ragflow.io/docs/dev/contributing) 。
|
||||
|
||||
## 🤝 商務合作
|
||||
|
||||
|
||||
139
README_zh.md
139
README_zh.md
@ -1,17 +1,17 @@
|
||||
<div align="center">
|
||||
<a href="https://demo.ragflow.io/">
|
||||
<img src="web/src/assets/logo-with-text.png" width="350" alt="ragflow logo">
|
||||
<img src="web/src/assets/logo-with-text.svg" width="350" alt="ragflow logo">
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_zh.md">简体中文</a> |
|
||||
<a href="./README_tzh.md">繁体中文</a> |
|
||||
<a href="./README_ja.md">日本語</a> |
|
||||
<a href="./README_ko.md">한국어</a> |
|
||||
<a href="./README_id.md">Bahasa Indonesia</a> |
|
||||
<a href="/README_pt_br.md">Português (Brasil)</a>
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-DFE0E5"></a>
|
||||
<a href="./README_zh.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-DBEDFA"></a>
|
||||
<a href="./README_tzh.md"><img alt="繁體版中文自述文件" src="https://img.shields.io/badge/繁體中文-DFE0E5"></a>
|
||||
<a href="./README_ja.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-DFE0E5"></a>
|
||||
<a href="./README_ko.md"><img alt="한국어" src="https://img.shields.io/badge/한국어-DFE0E5"></a>
|
||||
<a href="./README_id.md"><img alt="Bahasa Indonesia" src="https://img.shields.io/badge/Bahasa Indonesia-DFE0E5"></a>
|
||||
<a href="./README_pt_br.md"><img alt="Português(Brasil)" src="https://img.shields.io/badge/Português(Brasil)-DFE0E5"></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@ -22,7 +22,7 @@
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/Online-Demo-4e6b99">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/infiniflow/ragflow" target="_blank">
|
||||
<img src="https://img.shields.io/badge/docker_pull-ragflow:v0.17.0-brightgreen" alt="docker pull infiniflow/ragflow:v0.17.0">
|
||||
<img src="https://img.shields.io/docker/pulls/infiniflow/ragflow?label=Docker%20Pulls&color=0db7ed&logo=docker&logoColor=white&style=flat-square" alt="docker pull infiniflow/ragflow:v0.21.1">
|
||||
</a>
|
||||
<a href="https://github.com/infiniflow/ragflow/releases/latest">
|
||||
<img src="https://img.shields.io/github/v/release/infiniflow/ragflow?color=blue&label=Latest%20Release" alt="Latest Release">
|
||||
@ -30,37 +30,67 @@
|
||||
<a href="https://github.com/infiniflow/ragflow/blob/main/LICENSE">
|
||||
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?labelColor=d4eaf7&color=2e6cc4" alt="license">
|
||||
</a>
|
||||
<a href="https://deepwiki.com/infiniflow/ragflow">
|
||||
<img alt="Ask DeepWiki" src="https://deepwiki.com/badge.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<a href="https://ragflow.io/docs/dev/">Document</a> |
|
||||
<a href="https://github.com/infiniflow/ragflow/issues/4214">Roadmap</a> |
|
||||
<a href="https://twitter.com/infiniflowai">Twitter</a> |
|
||||
<a href="https://discord.gg/4XxujFgUN7">Discord</a> |
|
||||
<a href="https://discord.gg/NjYzJD3GM3">Discord</a> |
|
||||
<a href="https://demo.ragflow.io">Demo</a>
|
||||
</h4>
|
||||
|
||||
#
|
||||
|
||||
<div align="center">
|
||||
<a href="https://trendshift.io/repositories/9064" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9064" alt="infiniflow%2Fragflow | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</div>
|
||||
|
||||
<details open>
|
||||
<summary><b>📕 目录</b></summary>
|
||||
|
||||
- 💡 [RAGFlow 是什么?](#-RAGFlow-是什么)
|
||||
- 🎮 [Demo](#-demo)
|
||||
- 📌 [近期更新](#-近期更新)
|
||||
- 🌟 [主要功能](#-主要功能)
|
||||
- 🔎 [系统架构](#-系统架构)
|
||||
- 🎬 [快速开始](#-快速开始)
|
||||
- 🔧 [系统配置](#-系统配置)
|
||||
- 🔨 [以源代码启动服务](#-以源代码启动服务)
|
||||
- 📚 [技术文档](#-技术文档)
|
||||
- 📜 [路线图](#-路线图)
|
||||
- 🏄 [贡献指南](#-贡献指南)
|
||||
- 🙌 [加入社区](#-加入社区)
|
||||
- 🤝 [商务合作](#-商务合作)
|
||||
|
||||
</details>
|
||||
|
||||
## 💡 RAGFlow 是什么?
|
||||
|
||||
[RAGFlow](https://ragflow.io/) 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。
|
||||
[RAGFlow](https://ragflow.io/) 是一款领先的开源检索增强生成(RAG)引擎,通过融合前沿的 RAG 技术与 Agent 能力,为大型语言模型提供卓越的上下文层。它提供可适配任意规模企业的端到端 RAG 工作流,凭借融合式上下文引擎与预置的 Agent 模板,助力开发者以极致效率与精度将复杂数据转化为高可信、生产级的人工智能系统。
|
||||
|
||||
## 🎮 Demo 试用
|
||||
|
||||
请登录网址 [https://demo.ragflow.io](https://demo.ragflow.io) 试用 demo。
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/7248/2f6baa3e-1092-4f11-866d-36f6a9d075e5" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/504bbbf1-c9f7-4d83-8cc5-e9cb63c26db6" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/chunking.gif" width="1200"/>
|
||||
<img src="https://raw.githubusercontent.com/infiniflow/ragflow-docs/refs/heads/image/image/agentic-dark.gif" width="1200"/>
|
||||
</div>
|
||||
|
||||
## 🔥 近期更新
|
||||
|
||||
- 2025-02-05 更新硅基流动的模型列表,增加了对 Deepseek-R1/DeepSeek-V3 的支持。
|
||||
- 2025-01-26 优化知识图谱的提取和应用,提供了多种配置选择。
|
||||
- 2025-10-15 支持可编排的数据管道。
|
||||
- 2025-08-08 支持 OpenAI 最新的 GPT-5 系列模型。
|
||||
- 2025-08-01 支持 agentic workflow 和 MCP。
|
||||
- 2025-05-23 Agent 新增 Python/JS 代码执行器组件。
|
||||
- 2025-05-05 支持跨语言查询。
|
||||
- 2025-03-19 PDF 和 DOCX 中的图支持用多模态大模型去解析得到描述.
|
||||
- 2025-02-28 结合互联网搜索(Tavily),对于任意大模型实现类似 Deep Research 的推理功能.
|
||||
- 2024-12-18 升级了 DeepDoc 的文档布局分析模型。
|
||||
- 2024-12-04 支持知识库的 Pagerank 分数。
|
||||
- 2024-11-22 完善了 Agent 中的变量定义和使用。
|
||||
- 2024-11-01 对解析后的 chunk 加入关键词抽取和相关问题生成以提高召回的准确度。
|
||||
- 2024-08-22 支持用 RAG 技术实现从自然语言到 SQL 语句的转换。
|
||||
|
||||
## 🎉 关注项目
|
||||
@ -102,7 +132,7 @@
|
||||
## 🔎 系统架构
|
||||
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
||||
<img src="https://github.com/user-attachments/assets/31b0dd6f-ca4f-445a-9457-70cb44a381b2" width="1000"/>
|
||||
</div>
|
||||
|
||||
## 🎬 快速开始
|
||||
@ -113,7 +143,10 @@
|
||||
- RAM >= 16 GB
|
||||
- Disk >= 50 GB
|
||||
- Docker >= 24.0.0 & Docker Compose >= v2.26.1
|
||||
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
||||
- [gVisor](https://gvisor.dev/docs/user_guide/install/): 仅在你打算使用 RAGFlow 的代码执行器(沙箱)功能时才需要安装。
|
||||
|
||||
> [!TIP]
|
||||
> 如果你并没有在本机安装 Docker(Windows、Mac,或者 Linux), 可以参考文档 [Install Docker Engine](https://docs.docker.com/engine/install/) 自行安装。
|
||||
|
||||
### 🚀 启动服务器
|
||||
|
||||
@ -146,17 +179,25 @@
|
||||
|
||||
3. 进入 **docker** 文件夹,利用提前编译好的 Docker 镜像启动服务器:
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.17.0-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.17.0-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.0` 来下载 RAGFlow 镜像的 `v0.17.0` 完整发行版。
|
||||
> [!CAUTION]
|
||||
> 请注意,目前官方提供的所有 Docker 镜像均基于 x86 架构构建,并不提供基于 ARM64 的 Docker 镜像。
|
||||
> 如果你的操作系统是 ARM64 架构,请参考[这篇文档](https://ragflow.io/docs/dev/build_docker_image)自行构建 Docker 镜像。
|
||||
|
||||
> 运行以下命令会自动下载 RAGFlow slim Docker 镜像 `v0.21.1-slim`。请参考下表查看不同 Docker 发行版的描述。如需下载不同于 `v0.21.1-slim` 的 Docker 镜像,请在运行 `docker compose` 启动服务之前先更新 **docker/.env** 文件内的 `RAGFLOW_IMAGE` 变量。比如,你可以通过设置 `RAGFLOW_IMAGE=infiniflow/ragflow:v0.21.1` 来下载 RAGFlow 镜像的 `v0.21.1` 完整发行版。
|
||||
|
||||
```bash
|
||||
$ cd ragflow/docker
|
||||
# Use CPU for embedding and DeepDoc tasks:
|
||||
$ docker compose -f docker-compose.yml up -d
|
||||
|
||||
# To use GPU to accelerate embedding and DeepDoc tasks:
|
||||
# docker compose -f docker-compose-gpu.yml up -d
|
||||
```
|
||||
|
||||
| RAGFlow image tag | Image size (GB) | Has embedding models? | Stable? |
|
||||
| ----------------- | --------------- | --------------------- | ------------------------ |
|
||||
| v0.17.0 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.17.0-slim | ≈2 | ❌ | Stable release |
|
||||
| v0.21.1 | ≈9 | :heavy_check_mark: | Stable release |
|
||||
| v0.21.1-slim | ≈2 | ❌ | Stable release |
|
||||
| nightly | ≈9 | :heavy_check_mark: | _Unstable_ nightly build |
|
||||
| nightly-slim | ≈2 | ❌ | _Unstable_ nightly build |
|
||||
|
||||
@ -245,7 +286,7 @@ RAGFlow 默认使用 Elasticsearch 存储文本和向量数据. 如果要切换
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
docker build --platform linux/amd64 --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly-slim .
|
||||
```
|
||||
|
||||
## 🔧 源码编译 Docker 镜像(包含 embedding 模型)
|
||||
@ -255,15 +296,15 @@ docker build --build-arg LIGHTEN=1 --build-arg NEED_MIRROR=1 -f Dockerfile -t in
|
||||
```bash
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
docker build --platform linux/amd64 --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:nightly .
|
||||
```
|
||||
|
||||
## 🔨 以源代码启动服务
|
||||
|
||||
1. 安装 uv。如已经安装,可跳过本步骤:
|
||||
1. 安装 `uv` 和 `pre-commit`。如已经安装,可跳过本步骤:
|
||||
|
||||
```bash
|
||||
pipx install uv
|
||||
pipx install uv pre-commit
|
||||
export UV_INDEX=https://mirrors.aliyun.com/pypi/simple
|
||||
```
|
||||
|
||||
@ -273,6 +314,8 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
git clone https://github.com/infiniflow/ragflow.git
|
||||
cd ragflow/
|
||||
uv sync --python 3.10 --all-extras # install RAGFlow dependent python modules
|
||||
uv run download_deps.py
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
3. 通过 Docker Compose 启动依赖的服务(MinIO, Elasticsearch, Redis, and MySQL):
|
||||
@ -281,19 +324,29 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
docker compose -f docker/docker-compose-base.yml up -d
|
||||
```
|
||||
|
||||
在 `/etc/hosts` 中添加以下代码,将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
在 `/etc/hosts` 中添加以下代码,目的是将 **conf/service_conf.yaml** 文件中的所有 host 地址都解析为 `127.0.0.1`:
|
||||
|
||||
```
|
||||
127.0.0.1 es01 infinity mysql minio redis
|
||||
127.0.0.1 es01 infinity mysql minio redis sandbox-executor-manager
|
||||
```
|
||||
|
||||
4. 如果无法访问 HuggingFace,可以把环境变量 `HF_ENDPOINT` 设成相应的镜像站点:
|
||||
|
||||
```bash
|
||||
export HF_ENDPOINT=https://hf-mirror.com
|
||||
```
|
||||
|
||||
5. 启动后端服务:
|
||||
5. 如果你的操作系统没有 jemalloc,请按照如下方式安装:
|
||||
|
||||
```bash
|
||||
# ubuntu
|
||||
sudo apt-get install libjemalloc-dev
|
||||
# centos
|
||||
sudo yum install jemalloc
|
||||
# mac
|
||||
sudo brew install jemalloc
|
||||
```
|
||||
|
||||
6. 启动后端服务:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
@ -301,12 +354,14 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
bash docker/launch_backend_service.sh
|
||||
```
|
||||
|
||||
6. 安装前端依赖:
|
||||
7. 安装前端依赖:
|
||||
|
||||
```bash
|
||||
cd web
|
||||
npm install
|
||||
```
|
||||
7. 启动前端服务:
|
||||
|
||||
8. 启动前端服务:
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
@ -316,12 +371,22 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
|
||||

|
||||
|
||||
9. 开发完成后停止 RAGFlow 前端和后端服务:
|
||||
|
||||
```bash
|
||||
pkill -f "ragflow_server.py|task_executor.py"
|
||||
```
|
||||
|
||||
|
||||
## 📚 技术文档
|
||||
|
||||
- [Quickstart](https://ragflow.io/docs/dev/)
|
||||
- [User guide](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Configuration](https://ragflow.io/docs/dev/configurations)
|
||||
- [Release notes](https://ragflow.io/docs/dev/release_notes)
|
||||
- [User guides](https://ragflow.io/docs/dev/category/guides)
|
||||
- [Developer guides](https://ragflow.io/docs/dev/category/developers)
|
||||
- [References](https://ragflow.io/docs/dev/category/references)
|
||||
- [FAQ](https://ragflow.io/docs/dev/faq)
|
||||
- [FAQs](https://ragflow.io/docs/dev/faq)
|
||||
|
||||
## 📜 路线图
|
||||
|
||||
@ -329,13 +394,13 @@ docker build --build-arg NEED_MIRROR=1 -f Dockerfile -t infiniflow/ragflow:night
|
||||
|
||||
## 🏄 开源社区
|
||||
|
||||
- [Discord](https://discord.gg/4XxujFgUN7)
|
||||
- [Discord](https://discord.gg/zd4qPW6t)
|
||||
- [Twitter](https://twitter.com/infiniflowai)
|
||||
- [GitHub Discussions](https://github.com/orgs/infiniflow/discussions)
|
||||
|
||||
## 🙌 贡献指南
|
||||
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](./CONTRIBUTING.md) 。
|
||||
RAGFlow 只有通过开源协作才能蓬勃发展。秉持这一精神,我们欢迎来自社区的各种贡献。如果您有意参与其中,请查阅我们的 [贡献者指南](https://ragflow.io/docs/dev/contributing) 。
|
||||
|
||||
## 🤝 商务合作
|
||||
|
||||
|
||||
47
admin/build_cli_release.sh
Executable file
47
admin/build_cli_release.sh
Executable file
@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "🚀 Start building..."
|
||||
echo "================================"
|
||||
|
||||
PROJECT_NAME="ragflow-cli"
|
||||
|
||||
RELEASE_DIR="release"
|
||||
BUILD_DIR="dist"
|
||||
SOURCE_DIR="src"
|
||||
PACKAGE_DIR="ragflow_cli"
|
||||
|
||||
echo "🧹 Clean old build folder..."
|
||||
rm -rf release/
|
||||
|
||||
echo "📁 Prepare source code..."
|
||||
mkdir release/$PROJECT_NAME/$SOURCE_DIR -p
|
||||
cp pyproject.toml release/$PROJECT_NAME/pyproject.toml
|
||||
cp README.md release/$PROJECT_NAME/README.md
|
||||
|
||||
mkdir release/$PROJECT_NAME/$SOURCE_DIR/$PACKAGE_DIR -p
|
||||
cp admin_client.py release/$PROJECT_NAME/$SOURCE_DIR/$PACKAGE_DIR/admin_client.py
|
||||
|
||||
if [ -d "release/$PROJECT_NAME/$SOURCE_DIR" ]; then
|
||||
echo "✅ source dir: release/$PROJECT_NAME/$SOURCE_DIR"
|
||||
else
|
||||
echo "❌ source dir not exist: release/$PROJECT_NAME/$SOURCE_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🔨 Make build file..."
|
||||
cd release/$PROJECT_NAME
|
||||
export PYTHONPATH=$(pwd)
|
||||
python -m build
|
||||
|
||||
echo "✅ check build result..."
|
||||
if [ -d "$BUILD_DIR" ]; then
|
||||
echo "📦 Package generated:"
|
||||
ls -la $BUILD_DIR/
|
||||
else
|
||||
echo "❌ Build Failed: $BUILD_DIR not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🎉 Build finished successfully!"
|
||||
136
admin/client/README.md
Normal file
136
admin/client/README.md
Normal file
@ -0,0 +1,136 @@
|
||||
# RAGFlow Admin Service & CLI
|
||||
|
||||
### Introduction
|
||||
|
||||
Admin Service is a dedicated management component designed to monitor, maintain, and administrate the RAGFlow system. It provides comprehensive tools for ensuring system stability, performing operational tasks, and managing users and permissions efficiently.
|
||||
|
||||
The service offers real-time monitoring of critical components, including the RAGFlow server, Task Executor processes, and dependent services such as MySQL, Elasticsearch, Redis, and MinIO. It automatically checks their health status, resource usage, and uptime, and performs restarts in case of failures to minimize downtime.
|
||||
|
||||
For user and system management, it supports listing, creating, modifying, and deleting users and their associated resources like knowledge bases and Agents.
|
||||
|
||||
Built with scalability and reliability in mind, the Admin Service ensures smooth system operation and simplifies maintenance workflows.
|
||||
|
||||
It consists of a server-side Service and a command-line client (CLI), both implemented in Python. User commands are parsed using the Lark parsing toolkit.
|
||||
|
||||
- **Admin Service**: A backend service that interfaces with the RAGFlow system to execute administrative operations and monitor its status.
|
||||
- **Admin CLI**: A command-line interface that allows users to connect to the Admin Service and issue commands for system management.
|
||||
|
||||
|
||||
|
||||
### Starting the Admin Service
|
||||
|
||||
#### Launching from source code
|
||||
|
||||
1. Before start Admin Service, please make sure RAGFlow system is already started.
|
||||
|
||||
2. Launch from source code:
|
||||
|
||||
```bash
|
||||
python admin/server/admin_server.py
|
||||
```
|
||||
The service will start and listen for incoming connections from the CLI on the configured port.
|
||||
|
||||
#### Using docker image
|
||||
|
||||
1. Before startup, please configure the `docker_compose.yml` file to enable admin server:
|
||||
|
||||
```bash
|
||||
command:
|
||||
- --enable-adminserver
|
||||
```
|
||||
|
||||
2. Start the containers, the service will start and listen for incoming connections from the CLI on the configured port.
|
||||
|
||||
|
||||
|
||||
### Using the Admin CLI
|
||||
|
||||
1. Ensure the Admin Service is running.
|
||||
2. Install ragflow-cli.
|
||||
```bash
|
||||
pip install ragflow-cli==0.21.1
|
||||
```
|
||||
3. Launch the CLI client:
|
||||
```bash
|
||||
ragflow-cli -h 127.0.0.1 -p 9381
|
||||
```
|
||||
You will be prompted to enter the superuser's password to log in.
|
||||
The default password is admin.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- -h: RAGFlow admin server host address
|
||||
|
||||
- -p: RAGFlow admin server port
|
||||
|
||||
|
||||
|
||||
## Supported Commands
|
||||
|
||||
Commands are case-insensitive and must be terminated with a semicolon (`;`).
|
||||
|
||||
### Service Management Commands
|
||||
|
||||
- `LIST SERVICES;`
|
||||
- Lists all available services within the RAGFlow system.
|
||||
- `SHOW SERVICE <id>;`
|
||||
- Shows detailed status information for the service identified by `<id>`.
|
||||
|
||||
|
||||
### User Management Commands
|
||||
|
||||
- `LIST USERS;`
|
||||
- Lists all users known to the system.
|
||||
- `SHOW USER '<username>';`
|
||||
- Shows details and permissions for the specified user. The username must be enclosed in single or double quotes.
|
||||
|
||||
- `CREATE USER <username> <password>;`
|
||||
- Create user by username and password. The username and password must be enclosed in single or double quotes.
|
||||
|
||||
- `DROP USER '<username>';`
|
||||
- Removes the specified user from the system. Use with caution.
|
||||
- `ALTER USER PASSWORD '<username>' '<new_password>';`
|
||||
- Changes the password for the specified user.
|
||||
- `ALTER USER ACTIVE <username> <on/off>;`
|
||||
- Changes the user to active or inactive.
|
||||
|
||||
|
||||
### Data and Agent Commands
|
||||
|
||||
- `LIST DATASETS OF '<username>';`
|
||||
- Lists the datasets associated with the specified user.
|
||||
- `LIST AGENTS OF '<username>';`
|
||||
- Lists the agents associated with the specified user.
|
||||
|
||||
### Meta-Commands
|
||||
|
||||
Meta-commands are prefixed with a backslash (`\`).
|
||||
|
||||
- `\?` or `\help`
|
||||
- Shows help information for the available commands.
|
||||
- `\q` or `\quit`
|
||||
- Exits the CLI application.
|
||||
|
||||
## Examples
|
||||
|
||||
```commandline
|
||||
admin> list users;
|
||||
+-------------------------------+------------------------+-----------+-------------+
|
||||
| create_date | email | is_active | nickname |
|
||||
+-------------------------------+------------------------+-----------+-------------+
|
||||
| Fri, 22 Nov 2024 16:03:41 GMT | jeffery@infiniflow.org | 1 | Jeffery |
|
||||
| Fri, 22 Nov 2024 16:10:55 GMT | aya@infiniflow.org | 1 | Waterdancer |
|
||||
+-------------------------------+------------------------+-----------+-------------+
|
||||
|
||||
admin> list services;
|
||||
+-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+
|
||||
| extra | host | id | name | port | service_type |
|
||||
+-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+
|
||||
| {} | 0.0.0.0 | 0 | ragflow_0 | 9380 | ragflow_server |
|
||||
| {'meta_type': 'mysql', 'password': 'infini_rag_flow', 'username': 'root'} | localhost | 1 | mysql | 5455 | meta_data |
|
||||
| {'password': 'infini_rag_flow', 'store_type': 'minio', 'user': 'rag_flow'} | localhost | 2 | minio | 9000 | file_store |
|
||||
| {'password': 'infini_rag_flow', 'retrieval_type': 'elasticsearch', 'username': 'elastic'} | localhost | 3 | elasticsearch | 1200 | retrieval |
|
||||
| {'db_name': 'default_db', 'retrieval_type': 'infinity'} | localhost | 4 | infinity | 23817 | retrieval |
|
||||
| {'database': 1, 'mq_type': 'redis', 'password': 'infini_rag_flow'} | localhost | 5 | redis | 6379 | message_queue |
|
||||
+-------------------------------------------------------------------------------------------+-----------+----+---------------+-------+----------------+
|
||||
```
|
||||
931
admin/client/admin_client.py
Normal file
931
admin/client/admin_client.py
Normal file
@ -0,0 +1,931 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import base64
|
||||
from cmd import Cmd
|
||||
|
||||
from Cryptodome.PublicKey import RSA
|
||||
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
|
||||
from typing import Dict, List, Any
|
||||
from lark import Lark, Transformer, Tree
|
||||
import requests
|
||||
|
||||
GRAMMAR = r"""
|
||||
start: command
|
||||
|
||||
command: sql_command | meta_command
|
||||
|
||||
sql_command: list_services
|
||||
| show_service
|
||||
| startup_service
|
||||
| shutdown_service
|
||||
| restart_service
|
||||
| list_users
|
||||
| show_user
|
||||
| drop_user
|
||||
| alter_user
|
||||
| create_user
|
||||
| activate_user
|
||||
| list_datasets
|
||||
| list_agents
|
||||
| create_role
|
||||
| drop_role
|
||||
| alter_role
|
||||
| list_roles
|
||||
| show_role
|
||||
| grant_permission
|
||||
| revoke_permission
|
||||
| alter_user_role
|
||||
| show_user_permission
|
||||
|
||||
// meta command definition
|
||||
meta_command: "\\" meta_command_name [meta_args]
|
||||
|
||||
meta_command_name: /[a-zA-Z?]+/
|
||||
meta_args: (meta_arg)+
|
||||
|
||||
meta_arg: /[^\\s"']+/ | quoted_string
|
||||
|
||||
// command definition
|
||||
|
||||
LIST: "LIST"i
|
||||
SERVICES: "SERVICES"i
|
||||
SHOW: "SHOW"i
|
||||
CREATE: "CREATE"i
|
||||
SERVICE: "SERVICE"i
|
||||
SHUTDOWN: "SHUTDOWN"i
|
||||
STARTUP: "STARTUP"i
|
||||
RESTART: "RESTART"i
|
||||
USERS: "USERS"i
|
||||
DROP: "DROP"i
|
||||
USER: "USER"i
|
||||
ALTER: "ALTER"i
|
||||
ACTIVE: "ACTIVE"i
|
||||
PASSWORD: "PASSWORD"i
|
||||
DATASETS: "DATASETS"i
|
||||
OF: "OF"i
|
||||
AGENTS: "AGENTS"i
|
||||
ROLE: "ROLE"i
|
||||
ROLES: "ROLES"i
|
||||
DESCRIPTION: "DESCRIPTION"i
|
||||
GRANT: "GRANT"i
|
||||
REVOKE: "REVOKE"i
|
||||
ALL: "ALL"i
|
||||
PERMISSION: "PERMISSION"i
|
||||
TO: "TO"i
|
||||
FROM: "FROM"i
|
||||
FOR: "FOR"i
|
||||
RESOURCES: "RESOURCES"i
|
||||
ON: "ON"i
|
||||
SET: "SET"i
|
||||
|
||||
list_services: LIST SERVICES ";"
|
||||
show_service: SHOW SERVICE NUMBER ";"
|
||||
startup_service: STARTUP SERVICE NUMBER ";"
|
||||
shutdown_service: SHUTDOWN SERVICE NUMBER ";"
|
||||
restart_service: RESTART SERVICE NUMBER ";"
|
||||
|
||||
list_users: LIST USERS ";"
|
||||
drop_user: DROP USER quoted_string ";"
|
||||
alter_user: ALTER USER PASSWORD quoted_string quoted_string ";"
|
||||
show_user: SHOW USER quoted_string ";"
|
||||
create_user: CREATE USER quoted_string quoted_string ";"
|
||||
activate_user: ALTER USER ACTIVE quoted_string status ";"
|
||||
|
||||
list_datasets: LIST DATASETS OF quoted_string ";"
|
||||
list_agents: LIST AGENTS OF quoted_string ";"
|
||||
|
||||
create_role: CREATE ROLE identifier [DESCRIPTION quoted_string] ";"
|
||||
drop_role: DROP ROLE identifier ";"
|
||||
alter_role: ALTER ROLE identifier SET DESCRIPTION quoted_string ";"
|
||||
list_roles: LIST ROLES ";"
|
||||
show_role: SHOW ROLE identifier ";"
|
||||
|
||||
grant_permission: GRANT action_list ON identifier TO ROLE identifier ";"
|
||||
revoke_permission: REVOKE action_list ON identifier FROM ROLE identifier ";"
|
||||
alter_user_role: ALTER USER quoted_string SET ROLE identifier ";"
|
||||
show_user_permission: SHOW USER PERMISSION quoted_string ";"
|
||||
|
||||
action_list: identifier ("," identifier)*
|
||||
|
||||
identifier: WORD
|
||||
quoted_string: QUOTED_STRING
|
||||
status: WORD
|
||||
|
||||
QUOTED_STRING: /'[^']+'/ | /"[^"]+"/
|
||||
WORD: /[a-zA-Z0-9_\-\.]+/
|
||||
NUMBER: /[0-9]+/
|
||||
|
||||
%import common.WS
|
||||
%ignore WS
|
||||
"""
|
||||
|
||||
|
||||
class AdminTransformer(Transformer):
|
||||
|
||||
def start(self, items):
|
||||
return items[0]
|
||||
|
||||
def command(self, items):
|
||||
return items[0]
|
||||
|
||||
def list_services(self, items):
|
||||
result = {'type': 'list_services'}
|
||||
return result
|
||||
|
||||
def show_service(self, items):
|
||||
service_id = int(items[2])
|
||||
return {"type": "show_service", "number": service_id}
|
||||
|
||||
def startup_service(self, items):
|
||||
service_id = int(items[2])
|
||||
return {"type": "startup_service", "number": service_id}
|
||||
|
||||
def shutdown_service(self, items):
|
||||
service_id = int(items[2])
|
||||
return {"type": "shutdown_service", "number": service_id}
|
||||
|
||||
def restart_service(self, items):
|
||||
service_id = int(items[2])
|
||||
return {"type": "restart_service", "number": service_id}
|
||||
|
||||
def list_users(self, items):
|
||||
return {"type": "list_users"}
|
||||
|
||||
def show_user(self, items):
|
||||
user_name = items[2]
|
||||
return {"type": "show_user", "user_name": user_name}
|
||||
|
||||
def drop_user(self, items):
|
||||
user_name = items[2]
|
||||
return {"type": "drop_user", "user_name": user_name}
|
||||
|
||||
def alter_user(self, items):
|
||||
user_name = items[3]
|
||||
new_password = items[4]
|
||||
return {"type": "alter_user", "user_name": user_name, "password": new_password}
|
||||
|
||||
def create_user(self, items):
|
||||
user_name = items[2]
|
||||
password = items[3]
|
||||
return {"type": "create_user", "user_name": user_name, "password": password, "role": "user"}
|
||||
|
||||
def activate_user(self, items):
|
||||
user_name = items[3]
|
||||
activate_status = items[4]
|
||||
return {"type": "activate_user", "activate_status": activate_status, "user_name": user_name}
|
||||
|
||||
def list_datasets(self, items):
|
||||
user_name = items[3]
|
||||
return {"type": "list_datasets", "user_name": user_name}
|
||||
|
||||
def list_agents(self, items):
|
||||
user_name = items[3]
|
||||
return {"type": "list_agents", "user_name": user_name}
|
||||
|
||||
def create_role(self, items):
|
||||
role_name = items[2]
|
||||
if len(items) > 4:
|
||||
description = items[4]
|
||||
return {"type": "create_role", "role_name": role_name, "description": description}
|
||||
else:
|
||||
return {"type": "create_role", "role_name": role_name}
|
||||
|
||||
def drop_role(self, items):
|
||||
role_name = items[2]
|
||||
return {"type": "drop_role", "role_name": role_name}
|
||||
|
||||
def alter_role(self, items):
|
||||
role_name = items[2]
|
||||
description = items[5]
|
||||
return {"type": "alter_role", "role_name": role_name, "description": description}
|
||||
|
||||
def list_roles(self, items):
|
||||
return {"type": "list_roles"}
|
||||
|
||||
def show_role(self, items):
|
||||
role_name = items[2]
|
||||
return {"type": "show_role", "role_name": role_name}
|
||||
|
||||
def grant_permission(self, items):
|
||||
action_list = items[1]
|
||||
resource = items[3]
|
||||
role_name = items[6]
|
||||
return {"type": "grant_permission", "role_name": role_name, "resource": resource, "actions": action_list}
|
||||
|
||||
def revoke_permission(self, items):
|
||||
action_list = items[1]
|
||||
resource = items[3]
|
||||
role_name = items[6]
|
||||
return {
|
||||
"type": "revoke_permission",
|
||||
"role_name": role_name,
|
||||
"resource": resource, "actions": action_list
|
||||
}
|
||||
|
||||
def alter_user_role(self, items):
|
||||
user_name = items[2]
|
||||
role_name = items[5]
|
||||
return {"type": "alter_user_role", "user_name": user_name, "role_name": role_name}
|
||||
|
||||
def show_user_permission(self, items):
|
||||
user_name = items[3]
|
||||
return {"type": "show_user_permission", "user_name": user_name}
|
||||
|
||||
def action_list(self, items):
|
||||
return items
|
||||
|
||||
def meta_command(self, items):
|
||||
command_name = str(items[0]).lower()
|
||||
args = items[1:] if len(items) > 1 else []
|
||||
|
||||
# handle quoted parameter
|
||||
parsed_args = []
|
||||
for arg in args:
|
||||
if hasattr(arg, 'value'):
|
||||
parsed_args.append(arg.value)
|
||||
else:
|
||||
parsed_args.append(str(arg))
|
||||
|
||||
return {'type': 'meta', 'command': command_name, 'args': parsed_args}
|
||||
|
||||
def meta_command_name(self, items):
|
||||
return items[0]
|
||||
|
||||
def meta_args(self, items):
|
||||
return items
|
||||
|
||||
|
||||
def encrypt(input_string):
|
||||
pub = '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArq9XTUSeYr2+N1h3Afl/z8Dse/2yD0ZGrKwx+EEEcdsBLca9Ynmx3nIB5obmLlSfmskLpBo0UACBmB5rEjBp2Q2f3AG3Hjd4B+gNCG6BDaawuDlgANIhGnaTLrIqWrrcm4EMzJOnAOI1fgzJRsOOUEfaS318Eq9OVO3apEyCCt0lOQK6PuksduOjVxtltDav+guVAA068NrPYmRNabVKRNLJpL8w4D44sfth5RvZ3q9t+6RTArpEtc5sh5ChzvqPOzKGMXW83C95TxmXqpbK6olN4RevSfVjEAgCydH6HN6OhtOQEcnrU97r9H0iZOWwbw3pVrZiUkuRD1R56Wzs2wIDAQAB\n-----END PUBLIC KEY-----'
|
||||
pub_key = RSA.importKey(pub)
|
||||
cipher = Cipher_pkcs1_v1_5.new(pub_key)
|
||||
cipher_text = cipher.encrypt(base64.b64encode(input_string.encode('utf-8')))
|
||||
return base64.b64encode(cipher_text).decode("utf-8")
|
||||
|
||||
|
||||
def encode_to_base64(input_string):
|
||||
base64_encoded = base64.b64encode(input_string.encode('utf-8'))
|
||||
return base64_encoded.decode('utf-8')
|
||||
|
||||
|
||||
class AdminCLI(Cmd):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.parser = Lark(GRAMMAR, start='start', parser='lalr', transformer=AdminTransformer())
|
||||
self.command_history = []
|
||||
self.is_interactive = False
|
||||
self.admin_account = "admin@ragflow.io"
|
||||
self.admin_password: str = "admin"
|
||||
self.session = requests.Session()
|
||||
self.access_token: str = ""
|
||||
self.host: str = ""
|
||||
self.port: int = 0
|
||||
|
||||
intro = r"""Type "\h" for help."""
|
||||
prompt = "admin> "
|
||||
|
||||
def onecmd(self, command: str) -> bool:
|
||||
try:
|
||||
result = self.parse_command(command)
|
||||
|
||||
if isinstance(result, dict):
|
||||
if 'type' in result and result.get('type') == 'empty':
|
||||
return False
|
||||
|
||||
self.execute_command(result)
|
||||
|
||||
if isinstance(result, Tree):
|
||||
return False
|
||||
|
||||
if result.get('type') == 'meta' and result.get('command') in ['q', 'quit', 'exit']:
|
||||
return True
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nUse '\\q' to quit")
|
||||
except EOFError:
|
||||
print("\nGoodbye!")
|
||||
return True
|
||||
return False
|
||||
|
||||
def emptyline(self) -> bool:
|
||||
return False
|
||||
|
||||
def default(self, line: str) -> bool:
|
||||
return self.onecmd(line)
|
||||
|
||||
def parse_command(self, command_str: str) -> dict[str, str]:
|
||||
if not command_str.strip():
|
||||
return {'type': 'empty'}
|
||||
|
||||
self.command_history.append(command_str)
|
||||
|
||||
try:
|
||||
result = self.parser.parse(command_str)
|
||||
return result
|
||||
except Exception as e:
|
||||
return {'type': 'error', 'message': f'Parse error: {str(e)}'}
|
||||
|
||||
def verify_admin(self, arguments: dict, single_command: bool):
|
||||
self.host = arguments['host']
|
||||
self.port = arguments['port']
|
||||
print(f"Attempt to access ip: {self.host}, port: {self.port}")
|
||||
url = f"http://{self.host}:{self.port}/api/v1/admin/login"
|
||||
|
||||
attempt_count = 3
|
||||
if single_command:
|
||||
attempt_count = 1
|
||||
|
||||
try_count = 0
|
||||
while True:
|
||||
try_count += 1
|
||||
if try_count > attempt_count:
|
||||
return False
|
||||
|
||||
if single_command:
|
||||
admin_passwd = arguments['password']
|
||||
else:
|
||||
admin_passwd = input(f"password for {self.admin_account}: ").strip()
|
||||
try:
|
||||
self.admin_password = encrypt(admin_passwd)
|
||||
response = self.session.post(url, json={'email': self.admin_account, 'password': self.admin_password})
|
||||
if response.status_code == 200:
|
||||
res_json = response.json()
|
||||
error_code = res_json.get('code', -1)
|
||||
if error_code == 0:
|
||||
self.session.headers.update({
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': response.headers['Authorization'],
|
||||
'User-Agent': 'RAGFlow-CLI/0.21.1'
|
||||
})
|
||||
print("Authentication successful.")
|
||||
return True
|
||||
else:
|
||||
error_message = res_json.get('message', 'Unknown error')
|
||||
print(f"Authentication failed: {error_message}, try again")
|
||||
continue
|
||||
else:
|
||||
print(f"Bad response,status: {response.status_code}, password is wrong")
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
print(f"Can't access {self.host}, port: {self.port}")
|
||||
|
||||
def _print_table_simple(self, data):
|
||||
if not data:
|
||||
print("No data to print")
|
||||
return
|
||||
if isinstance(data, dict):
|
||||
# handle single row data
|
||||
data = [data]
|
||||
|
||||
columns = list(data[0].keys())
|
||||
col_widths = {}
|
||||
|
||||
def get_string_width(text):
|
||||
half_width_chars = (
|
||||
" !\"#$%&'()*+,-./0123456789:;<=>?@"
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`"
|
||||
"abcdefghijklmnopqrstuvwxyz{|}~"
|
||||
"\t\n\r"
|
||||
)
|
||||
width = 0
|
||||
for char in text:
|
||||
if char in half_width_chars:
|
||||
width += 1
|
||||
else:
|
||||
width += 2
|
||||
return width
|
||||
|
||||
for col in columns:
|
||||
max_width = get_string_width(str(col))
|
||||
for item in data:
|
||||
value_len = get_string_width(str(item.get(col, '')))
|
||||
if value_len > max_width:
|
||||
max_width = value_len
|
||||
col_widths[col] = max(2, max_width)
|
||||
|
||||
# Generate delimiter
|
||||
separator = "+" + "+".join(["-" * (col_widths[col] + 2) for col in columns]) + "+"
|
||||
|
||||
# Print header
|
||||
print(separator)
|
||||
header = "|" + "|".join([f" {col:<{col_widths[col]}} " for col in columns]) + "|"
|
||||
print(header)
|
||||
print(separator)
|
||||
|
||||
# Print data
|
||||
for item in data:
|
||||
row = "|"
|
||||
for col in columns:
|
||||
value = str(item.get(col, ''))
|
||||
if get_string_width(value) > col_widths[col]:
|
||||
value = value[:col_widths[col] - 3] + "..."
|
||||
row += f" {value:<{col_widths[col] - (get_string_width(value) - len(value))}} |"
|
||||
print(row)
|
||||
|
||||
print(separator)
|
||||
|
||||
def run_interactive(self):
|
||||
|
||||
self.is_interactive = True
|
||||
print("RAGFlow Admin command line interface - Type '\\?' for help, '\\q' to quit")
|
||||
|
||||
while True:
|
||||
try:
|
||||
command = input("admin> ").strip()
|
||||
if not command:
|
||||
continue
|
||||
|
||||
print(f"command: {command}")
|
||||
result = self.parse_command(command)
|
||||
self.execute_command(result)
|
||||
|
||||
if isinstance(result, Tree):
|
||||
continue
|
||||
|
||||
if result.get('type') == 'meta' and result.get('command') in ['q', 'quit', 'exit']:
|
||||
break
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nUse '\\q' to quit")
|
||||
except EOFError:
|
||||
print("\nGoodbye!")
|
||||
break
|
||||
|
||||
def run_single_command(self, command: str):
|
||||
result = self.parse_command(command)
|
||||
self.execute_command(result)
|
||||
|
||||
def parse_connection_args(self, args: List[str]) -> Dict[str, Any]:
|
||||
parser = argparse.ArgumentParser(description='Admin CLI Client', add_help=False)
|
||||
parser.add_argument('-h', '--host', default='localhost', help='Admin service host')
|
||||
parser.add_argument('-p', '--port', type=int, default=8080, help='Admin service port')
|
||||
parser.add_argument('-w', '--password', default='admin', type=str, help='Superuser password')
|
||||
parser.add_argument('command', nargs='?', help='Single command')
|
||||
try:
|
||||
parsed_args, remaining_args = parser.parse_known_args(args)
|
||||
if remaining_args:
|
||||
command = remaining_args[0]
|
||||
return {
|
||||
'host': parsed_args.host,
|
||||
'port': parsed_args.port,
|
||||
'password': parsed_args.password,
|
||||
'command': command
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'host': parsed_args.host,
|
||||
'port': parsed_args.port,
|
||||
}
|
||||
except SystemExit:
|
||||
return {'error': 'Invalid connection arguments'}
|
||||
|
||||
def execute_command(self, parsed_command: Dict[str, Any]):
|
||||
|
||||
command_dict: dict
|
||||
if isinstance(parsed_command, Tree):
|
||||
command_dict = parsed_command.children[0]
|
||||
else:
|
||||
if parsed_command['type'] == 'error':
|
||||
print(f"Error: {parsed_command['message']}")
|
||||
return
|
||||
else:
|
||||
command_dict = parsed_command
|
||||
|
||||
# print(f"Parsed command: {command_dict}")
|
||||
|
||||
command_type = command_dict['type']
|
||||
|
||||
match command_type:
|
||||
case 'list_services':
|
||||
self._handle_list_services(command_dict)
|
||||
case 'show_service':
|
||||
self._handle_show_service(command_dict)
|
||||
case 'restart_service':
|
||||
self._handle_restart_service(command_dict)
|
||||
case 'shutdown_service':
|
||||
self._handle_shutdown_service(command_dict)
|
||||
case 'startup_service':
|
||||
self._handle_startup_service(command_dict)
|
||||
case 'list_users':
|
||||
self._handle_list_users(command_dict)
|
||||
case 'show_user':
|
||||
self._handle_show_user(command_dict)
|
||||
case 'drop_user':
|
||||
self._handle_drop_user(command_dict)
|
||||
case 'alter_user':
|
||||
self._handle_alter_user(command_dict)
|
||||
case 'create_user':
|
||||
self._handle_create_user(command_dict)
|
||||
case 'activate_user':
|
||||
self._handle_activate_user(command_dict)
|
||||
case 'list_datasets':
|
||||
self._handle_list_datasets(command_dict)
|
||||
case 'list_agents':
|
||||
self._handle_list_agents(command_dict)
|
||||
case 'create_role':
|
||||
self._create_role(command_dict)
|
||||
case 'drop_role':
|
||||
self._drop_role(command_dict)
|
||||
case 'alter_role':
|
||||
self._alter_role(command_dict)
|
||||
case 'list_roles':
|
||||
self._list_roles(command_dict)
|
||||
case 'show_role':
|
||||
self._show_role(command_dict)
|
||||
case 'grant_permission':
|
||||
self._grant_permission(command_dict)
|
||||
case 'revoke_permission':
|
||||
self._revoke_permission(command_dict)
|
||||
case 'alter_user_role':
|
||||
self._alter_user_role(command_dict)
|
||||
case 'show_user_permission':
|
||||
self._show_user_permission(command_dict)
|
||||
case 'meta':
|
||||
self._handle_meta_command(command_dict)
|
||||
case _:
|
||||
print(f"Command '{command_type}' would be executed with API")
|
||||
|
||||
def _handle_list_services(self, command):
|
||||
print("Listing all services")
|
||||
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/services'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to get all services, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_show_service(self, command):
|
||||
service_id: int = command['number']
|
||||
print(f"Showing service: {service_id}")
|
||||
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/services/{service_id}'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
res_data = res_json['data']
|
||||
if 'status' in res_data and res_data['status'] == 'alive':
|
||||
print(f"Service {res_data['service_name']} is alive, ")
|
||||
if isinstance(res_data['message'], str):
|
||||
print(res_data['message'])
|
||||
else:
|
||||
self._print_table_simple(res_data['message'])
|
||||
else:
|
||||
print(f"Service {res_data['service_name']} is down, {res_data['message']}")
|
||||
else:
|
||||
print(f"Fail to show service, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_restart_service(self, command):
|
||||
service_id: int = command['number']
|
||||
print(f"Restart service {service_id}")
|
||||
|
||||
def _handle_shutdown_service(self, command):
|
||||
service_id: int = command['number']
|
||||
print(f"Shutdown service {service_id}")
|
||||
|
||||
def _handle_startup_service(self, command):
|
||||
service_id: int = command['number']
|
||||
print(f"Startup service {service_id}")
|
||||
|
||||
def _handle_list_users(self, command):
|
||||
print("Listing all users")
|
||||
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to get all users, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_show_user(self, command):
|
||||
username_tree: Tree = command['user_name']
|
||||
user_name: str = username_tree.children[0].strip("'\"")
|
||||
print(f"Showing user: {user_name}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to get user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_drop_user(self, command):
|
||||
username_tree: Tree = command['user_name']
|
||||
user_name: str = username_tree.children[0].strip("'\"")
|
||||
print(f"Drop user: {user_name}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}'
|
||||
response = self.session.delete(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
print(res_json["message"])
|
||||
else:
|
||||
print(f"Fail to drop user, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_alter_user(self, command):
|
||||
user_name_tree: Tree = command['user_name']
|
||||
user_name: str = user_name_tree.children[0].strip("'\"")
|
||||
password_tree: Tree = command['password']
|
||||
password: str = password_tree.children[0].strip("'\"")
|
||||
print(f"Alter user: {user_name}, password: {password}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/password'
|
||||
response = self.session.put(url, json={'new_password': encrypt(password)})
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
print(res_json["message"])
|
||||
else:
|
||||
print(f"Fail to alter password, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_create_user(self, command):
|
||||
user_name_tree: Tree = command['user_name']
|
||||
user_name: str = user_name_tree.children[0].strip("'\"")
|
||||
password_tree: Tree = command['password']
|
||||
password: str = password_tree.children[0].strip("'\"")
|
||||
role: str = command['role']
|
||||
print(f"Create user: {user_name}, password: {password}, role: {role}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users'
|
||||
response = self.session.post(
|
||||
url,
|
||||
json={'user_name': user_name, 'password': encrypt(password), 'role': role}
|
||||
)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to create user {user_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_activate_user(self, command):
|
||||
user_name_tree: Tree = command['user_name']
|
||||
user_name: str = user_name_tree.children[0].strip("'\"")
|
||||
activate_tree: Tree = command['activate_status']
|
||||
activate_status: str = activate_tree.children[0].strip("'\"")
|
||||
if activate_status.lower() in ['on', 'off']:
|
||||
print(f"Alter user {user_name} activate status, turn {activate_status.lower()}.")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/activate'
|
||||
response = self.session.put(url, json={'activate_status': activate_status})
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
print(res_json["message"])
|
||||
else:
|
||||
print(f"Fail to alter activate status, code: {res_json['code']}, message: {res_json['message']}")
|
||||
else:
|
||||
print(f"Unknown activate status: {activate_status}.")
|
||||
|
||||
def _handle_list_datasets(self, command):
|
||||
username_tree: Tree = command['user_name']
|
||||
user_name: str = username_tree.children[0].strip("'\"")
|
||||
print(f"Listing all datasets of user: {user_name}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/datasets'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to get all datasets of {user_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_list_agents(self, command):
|
||||
username_tree: Tree = command['user_name']
|
||||
user_name: str = username_tree.children[0].strip("'\"")
|
||||
print(f"Listing all agents of user: {user_name}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name}/agents'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to get all agents of {user_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _create_role(self, command):
|
||||
role_name_tree: Tree = command['role_name']
|
||||
role_name: str = role_name_tree.children[0].strip("'\"")
|
||||
desc_str: str = ''
|
||||
if 'description' in command:
|
||||
desc_tree: Tree = command['description']
|
||||
desc_str = desc_tree.children[0].strip("'\"")
|
||||
|
||||
print(f"create role name: {role_name}, description: {desc_str}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/roles'
|
||||
response = self.session.post(
|
||||
url,
|
||||
json={'role_name': role_name, 'description': desc_str}
|
||||
)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to create role {role_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _drop_role(self, command):
|
||||
role_name_tree: Tree = command['role_name']
|
||||
role_name: str = role_name_tree.children[0].strip("'\"")
|
||||
print(f"drop role name: {role_name}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name}'
|
||||
response = self.session.delete(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to drop role {role_name}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _alter_role(self, command):
|
||||
role_name_tree: Tree = command['role_name']
|
||||
role_name: str = role_name_tree.children[0].strip("'\"")
|
||||
desc_tree: Tree = command['description']
|
||||
desc_str: str = desc_tree.children[0].strip("'\"")
|
||||
|
||||
print(f"alter role name: {role_name}, description: {desc_str}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name}'
|
||||
response = self.session.put(
|
||||
url,
|
||||
json={'description': desc_str}
|
||||
)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(
|
||||
f"Fail to update role {role_name} with description: {desc_str}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _list_roles(self, command):
|
||||
print("Listing all roles")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/roles'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to list roles, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _show_role(self, command):
|
||||
role_name_tree: Tree = command['role_name']
|
||||
role_name: str = role_name_tree.children[0].strip("'\"")
|
||||
print(f"show role: {role_name}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name}/permission'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(f"Fail to list roles, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _grant_permission(self, command):
|
||||
role_name_tree: Tree = command['role_name']
|
||||
role_name_str: str = role_name_tree.children[0].strip("'\"")
|
||||
resource_tree: Tree = command['resource']
|
||||
resource_str: str = resource_tree.children[0].strip("'\"")
|
||||
action_tree_list: list = command['actions']
|
||||
actions: list = []
|
||||
for action_tree in action_tree_list:
|
||||
action_str: str = action_tree.children[0].strip("'\"")
|
||||
actions.append(action_str)
|
||||
print(f"grant role_name: {role_name_str}, resource: {resource_str}, actions: {actions}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name_str}/permission'
|
||||
response = self.session.post(
|
||||
url,
|
||||
json={'actions': actions, 'resource': resource_str}
|
||||
)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(
|
||||
f"Fail to grant role {role_name_str} with {actions} on {resource_str}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _revoke_permission(self, command):
|
||||
role_name_tree: Tree = command['role_name']
|
||||
role_name_str: str = role_name_tree.children[0].strip("'\"")
|
||||
resource_tree: Tree = command['resource']
|
||||
resource_str: str = resource_tree.children[0].strip("'\"")
|
||||
action_tree_list: list = command['actions']
|
||||
actions: list = []
|
||||
for action_tree in action_tree_list:
|
||||
action_str: str = action_tree.children[0].strip("'\"")
|
||||
actions.append(action_str)
|
||||
print(f"revoke role_name: {role_name_str}, resource: {resource_str}, actions: {actions}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/roles/{role_name_str}/permission'
|
||||
response = self.session.delete(
|
||||
url,
|
||||
json={'actions': actions, 'resource': resource_str}
|
||||
)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(
|
||||
f"Fail to revoke role {role_name_str} with {actions} on {resource_str}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _alter_user_role(self, command):
|
||||
role_name_tree: Tree = command['role_name']
|
||||
role_name_str: str = role_name_tree.children[0].strip("'\"")
|
||||
user_name_tree: Tree = command['user_name']
|
||||
user_name_str: str = user_name_tree.children[0].strip("'\"")
|
||||
print(f"alter_user_role user_name: {user_name_str}, role_name: {role_name_str}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name_str}/role'
|
||||
response = self.session.put(
|
||||
url,
|
||||
json={'role_name': role_name_str}
|
||||
)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(
|
||||
f"Fail to alter user: {user_name_str} to role {role_name_str}, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _show_user_permission(self, command):
|
||||
user_name_tree: Tree = command['user_name']
|
||||
user_name_str: str = user_name_tree.children[0].strip("'\"")
|
||||
print(f"show_user_permission user_name: {user_name_str}")
|
||||
url = f'http://{self.host}:{self.port}/api/v1/admin/users/{user_name_str}/permission'
|
||||
response = self.session.get(url)
|
||||
res_json = response.json()
|
||||
if response.status_code == 200:
|
||||
self._print_table_simple(res_json['data'])
|
||||
else:
|
||||
print(
|
||||
f"Fail to show user: {user_name_str} permission, code: {res_json['code']}, message: {res_json['message']}")
|
||||
|
||||
def _handle_meta_command(self, command):
|
||||
meta_command = command['command']
|
||||
args = command.get('args', [])
|
||||
|
||||
if meta_command in ['?', 'h', 'help']:
|
||||
self.show_help()
|
||||
elif meta_command in ['q', 'quit', 'exit']:
|
||||
print("Goodbye!")
|
||||
else:
|
||||
print(f"Meta command '{meta_command}' with args {args}")
|
||||
|
||||
def show_help(self):
|
||||
"""Help info"""
|
||||
help_text = """
|
||||
Commands:
|
||||
LIST SERVICES
|
||||
SHOW SERVICE <service>
|
||||
STARTUP SERVICE <service>
|
||||
SHUTDOWN SERVICE <service>
|
||||
RESTART SERVICE <service>
|
||||
LIST USERS
|
||||
SHOW USER <user>
|
||||
DROP USER <user>
|
||||
CREATE USER <user> <password>
|
||||
ALTER USER PASSWORD <user> <new_password>
|
||||
ALTER USER ACTIVE <user> <on/off>
|
||||
LIST DATASETS OF <user>
|
||||
LIST AGENTS OF <user>
|
||||
|
||||
Meta Commands:
|
||||
\\?, \\h, \\help Show this help
|
||||
\\q, \\quit, \\exit Quit the CLI
|
||||
"""
|
||||
print(help_text)
|
||||
|
||||
|
||||
def main():
|
||||
import sys
|
||||
|
||||
cli = AdminCLI()
|
||||
|
||||
args = cli.parse_connection_args(sys.argv)
|
||||
if 'error' in args:
|
||||
print(f"Error: {args['error']}")
|
||||
return
|
||||
|
||||
if 'command' in args:
|
||||
if 'password' not in args:
|
||||
print("Error: password is missing")
|
||||
return
|
||||
if cli.verify_admin(args, single_command=True):
|
||||
command: str = args['command']
|
||||
print(f"Run single command: {command}")
|
||||
cli.run_single_command(command)
|
||||
else:
|
||||
if cli.verify_admin(args, single_command=False):
|
||||
print(r"""
|
||||
____ ___ ______________ ___ __ _
|
||||
/ __ \/ | / ____/ ____/ /___ _ __ / | ____/ /___ ___ (_)___
|
||||
/ /_/ / /| |/ / __/ /_ / / __ \ | /| / / / /| |/ __ / __ `__ \/ / __ \
|
||||
/ _, _/ ___ / /_/ / __/ / / /_/ / |/ |/ / / ___ / /_/ / / / / / / / / / /
|
||||
/_/ |_/_/ |_\____/_/ /_/\____/|__/|__/ /_/ |_\__,_/_/ /_/ /_/_/_/ /_/
|
||||
""")
|
||||
cli.cmdloop()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
24
admin/client/pyproject.toml
Normal file
24
admin/client/pyproject.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[project]
|
||||
name = "ragflow-cli"
|
||||
version = "0.21.1"
|
||||
description = "Admin Service's client of [RAGFlow](https://github.com/infiniflow/ragflow). The Admin Service provides user management and system monitoring. "
|
||||
authors = [{ name = "Lynn", email = "lynn_inf@hotmail.com" }]
|
||||
license = { text = "Apache License, Version 2.0" }
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"requests>=2.30.0,<3.0.0",
|
||||
"beartype>=0.18.5,<0.19.0",
|
||||
"pycryptodomex>=3.10.0",
|
||||
"lark>=1.1.0",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
test = [
|
||||
"pytest>=8.3.5",
|
||||
"requests>=2.32.3",
|
||||
"requests-toolbelt>=1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
ragflow-cli = "admin_client:main"
|
||||
75
admin/server/admin_server.py
Normal file
75
admin/server/admin_server.py
Normal file
@ -0,0 +1,75 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import signal
|
||||
import logging
|
||||
import time
|
||||
import threading
|
||||
import traceback
|
||||
from werkzeug.serving import run_simple
|
||||
from flask import Flask
|
||||
from routes import admin_bp
|
||||
from api.utils.log_utils import init_root_logger
|
||||
from api.constants import SERVICE_CONF
|
||||
from api import settings
|
||||
from config import load_configurations, SERVICE_CONFIGS
|
||||
from auth import init_default_admin, setup_auth
|
||||
from flask_session import Session
|
||||
from flask_login import LoginManager
|
||||
|
||||
stop_event = threading.Event()
|
||||
|
||||
if __name__ == '__main__':
|
||||
init_root_logger("admin_service")
|
||||
logging.info(r"""
|
||||
____ ___ ______________ ___ __ _
|
||||
/ __ \/ | / ____/ ____/ /___ _ __ / | ____/ /___ ___ (_)___
|
||||
/ /_/ / /| |/ / __/ /_ / / __ \ | /| / / / /| |/ __ / __ `__ \/ / __ \
|
||||
/ _, _/ ___ / /_/ / __/ / / /_/ / |/ |/ / / ___ / /_/ / / / / / / / / / /
|
||||
/_/ |_/_/ |_\____/_/ /_/\____/|__/|__/ /_/ |_\__,_/_/ /_/ /_/_/_/ /_/
|
||||
""")
|
||||
|
||||
app = Flask(__name__)
|
||||
app.register_blueprint(admin_bp)
|
||||
app.config["SESSION_PERMANENT"] = False
|
||||
app.config["SESSION_TYPE"] = "filesystem"
|
||||
app.config["MAX_CONTENT_LENGTH"] = int(
|
||||
os.environ.get("MAX_CONTENT_LENGTH", 1024 * 1024 * 1024)
|
||||
)
|
||||
Session(app)
|
||||
login_manager = LoginManager()
|
||||
login_manager.init_app(app)
|
||||
settings.init_settings()
|
||||
setup_auth(login_manager)
|
||||
init_default_admin()
|
||||
SERVICE_CONFIGS.configs = load_configurations(SERVICE_CONF)
|
||||
|
||||
try:
|
||||
logging.info("RAGFlow Admin service start...")
|
||||
run_simple(
|
||||
hostname="0.0.0.0",
|
||||
port=9381,
|
||||
application=app,
|
||||
threaded=True,
|
||||
use_reloader=True,
|
||||
use_debugger=True,
|
||||
)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
193
admin/server/auth.py
Normal file
193
admin/server/auth.py
Normal file
@ -0,0 +1,193 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
from functools import wraps
|
||||
from datetime import datetime
|
||||
from flask import request, jsonify
|
||||
from flask_login import current_user, login_user
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
|
||||
from api import settings
|
||||
from api.common.exceptions import AdminException, UserNotFoundError
|
||||
from api.db.init_data import encode_to_base64
|
||||
from api.db.services import UserService
|
||||
from api.db import ActiveEnum, StatusEnum
|
||||
from api.utils.crypt import decrypt
|
||||
from api.utils import (
|
||||
current_timestamp,
|
||||
datetime_format,
|
||||
get_format_time,
|
||||
get_uuid,
|
||||
)
|
||||
from api.utils.api_utils import (
|
||||
construct_response,
|
||||
)
|
||||
|
||||
|
||||
def setup_auth(login_manager):
|
||||
@login_manager.request_loader
|
||||
def load_user(web_request):
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
authorization = web_request.headers.get("Authorization")
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
logging.warning("Authentication attempt with empty access token")
|
||||
return None
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
logging.warning(f"Authentication attempt with invalid token format: {len(access_token)} chars")
|
||||
return None
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
logging.warning(f"User {user[0].email} has empty access_token in database")
|
||||
return None
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def init_default_admin():
|
||||
# Verify that at least one active admin user exists. If not, create a default one.
|
||||
users = UserService.query(is_superuser=True)
|
||||
if not users:
|
||||
default_admin = {
|
||||
"id": uuid.uuid1().hex,
|
||||
"password": encode_to_base64("admin"),
|
||||
"nickname": "admin",
|
||||
"is_superuser": True,
|
||||
"email": "admin@ragflow.io",
|
||||
"creator": "system",
|
||||
"status": "1",
|
||||
}
|
||||
if not UserService.save(**default_admin):
|
||||
raise AdminException("Can't init admin.", 500)
|
||||
elif not any([u.is_active == ActiveEnum.ACTIVE.value for u in users]):
|
||||
raise AdminException("No active admin. Please update 'is_active' in db manually.", 500)
|
||||
|
||||
|
||||
def check_admin_auth(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
user = UserService.filter_by_id(current_user.id)
|
||||
if not user:
|
||||
raise UserNotFoundError(current_user.email)
|
||||
if not user.is_superuser:
|
||||
raise AdminException("Not admin", 403)
|
||||
if user.is_active == ActiveEnum.INACTIVE.value:
|
||||
raise AdminException(f"User {current_user.email} inactive", 403)
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def login_admin(email: str, password: str):
|
||||
"""
|
||||
:param email: admin email
|
||||
:param password: string before decrypt
|
||||
"""
|
||||
users = UserService.query(email=email)
|
||||
if not users:
|
||||
raise UserNotFoundError(email)
|
||||
psw = decrypt(password)
|
||||
user = UserService.query_user(email, psw)
|
||||
if not user:
|
||||
raise AdminException("Email and password do not match!")
|
||||
if not user.is_superuser:
|
||||
raise AdminException("Not admin", 403)
|
||||
if user.is_active == ActiveEnum.INACTIVE.value:
|
||||
raise AdminException(f"User {email} inactive", 403)
|
||||
|
||||
resp = user.to_json()
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.update_time = (current_timestamp(),)
|
||||
user.update_date = (datetime_format(datetime.now()),)
|
||||
user.last_login_time = get_format_time()
|
||||
user.save()
|
||||
msg = "Welcome back!"
|
||||
return construct_response(data=resp, auth=user.get_id(), message=msg)
|
||||
|
||||
|
||||
def check_admin(username: str, password: str):
|
||||
users = UserService.query(email=username)
|
||||
if not users:
|
||||
logging.info(f"Username: {username} is not registered!")
|
||||
user_info = {
|
||||
"id": uuid.uuid1().hex,
|
||||
"password": encode_to_base64("admin"),
|
||||
"nickname": "admin",
|
||||
"is_superuser": True,
|
||||
"email": "admin@ragflow.io",
|
||||
"creator": "system",
|
||||
"status": "1",
|
||||
}
|
||||
if not UserService.save(**user_info):
|
||||
raise AdminException("Can't init admin.", 500)
|
||||
|
||||
user = UserService.query_user(username, password)
|
||||
if user:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def login_verify(f):
|
||||
@wraps(f)
|
||||
def decorated(*args, **kwargs):
|
||||
auth = request.authorization
|
||||
if not auth or 'username' not in auth.parameters or 'password' not in auth.parameters:
|
||||
return jsonify({
|
||||
"code": 401,
|
||||
"message": "Authentication required",
|
||||
"data": None
|
||||
}), 200
|
||||
|
||||
username = auth.parameters['username']
|
||||
password = auth.parameters['password']
|
||||
try:
|
||||
if check_admin(username, password) is False:
|
||||
return jsonify({
|
||||
"code": 500,
|
||||
"message": "Access denied",
|
||||
"data": None
|
||||
}), 200
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
return jsonify({
|
||||
"code": 500,
|
||||
"message": error_msg
|
||||
}), 200
|
||||
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
306
admin/server/config.py
Normal file
306
admin/server/config.py
Normal file
@ -0,0 +1,306 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import BaseModel
|
||||
from typing import Any
|
||||
from api.utils.configs import read_config
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
class ServiceConfigs:
|
||||
configs = dict
|
||||
|
||||
def __init__(self):
|
||||
self.configs = []
|
||||
self.lock = threading.Lock()
|
||||
|
||||
|
||||
SERVICE_CONFIGS = ServiceConfigs
|
||||
|
||||
|
||||
class ServiceType(Enum):
|
||||
METADATA = "metadata"
|
||||
RETRIEVAL = "retrieval"
|
||||
MESSAGE_QUEUE = "message_queue"
|
||||
RAGFLOW_SERVER = "ragflow_server"
|
||||
TASK_EXECUTOR = "task_executor"
|
||||
FILE_STORE = "file_store"
|
||||
|
||||
|
||||
class BaseConfig(BaseModel):
|
||||
id: int
|
||||
name: str
|
||||
host: str
|
||||
port: int
|
||||
service_type: str
|
||||
detail_func_name: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {'id': self.id, 'name': self.name, 'host': self.host, 'port': self.port,
|
||||
'service_type': self.service_type}
|
||||
|
||||
|
||||
class MetaConfig(BaseConfig):
|
||||
meta_type: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['meta_type'] = self.meta_type
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
class MySQLConfig(MetaConfig):
|
||||
username: str
|
||||
password: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['username'] = self.username
|
||||
extra_dict['password'] = self.password
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
class PostgresConfig(MetaConfig):
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
return result
|
||||
|
||||
|
||||
class RetrievalConfig(BaseConfig):
|
||||
retrieval_type: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['retrieval_type'] = self.retrieval_type
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
class InfinityConfig(RetrievalConfig):
|
||||
db_name: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['db_name'] = self.db_name
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
class ElasticsearchConfig(RetrievalConfig):
|
||||
username: str
|
||||
password: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['username'] = self.username
|
||||
extra_dict['password'] = self.password
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
class MessageQueueConfig(BaseConfig):
|
||||
mq_type: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['mq_type'] = self.mq_type
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
class RedisConfig(MessageQueueConfig):
|
||||
database: int
|
||||
password: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['database'] = self.database
|
||||
extra_dict['password'] = self.password
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
class RabbitMQConfig(MessageQueueConfig):
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
return result
|
||||
|
||||
|
||||
class RAGFlowServerConfig(BaseConfig):
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
return result
|
||||
|
||||
|
||||
class TaskExecutorConfig(BaseConfig):
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
return result
|
||||
|
||||
|
||||
class FileStoreConfig(BaseConfig):
|
||||
store_type: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['store_type'] = self.store_type
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
class MinioConfig(FileStoreConfig):
|
||||
user: str
|
||||
password: str
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result = super().to_dict()
|
||||
if 'extra' not in result:
|
||||
result['extra'] = dict()
|
||||
extra_dict = result['extra'].copy()
|
||||
extra_dict['user'] = self.user
|
||||
extra_dict['password'] = self.password
|
||||
result['extra'] = extra_dict
|
||||
return result
|
||||
|
||||
|
||||
def load_configurations(config_path: str) -> list[BaseConfig]:
|
||||
raw_configs = read_config(config_path)
|
||||
configurations = []
|
||||
ragflow_count = 0
|
||||
id_count = 0
|
||||
for k, v in raw_configs.items():
|
||||
match (k):
|
||||
case "ragflow":
|
||||
name: str = f'ragflow_{ragflow_count}'
|
||||
host: str = v['host']
|
||||
http_port: int = v['http_port']
|
||||
config = RAGFlowServerConfig(id=id_count, name=name, host=host, port=http_port,
|
||||
service_type="ragflow_server",
|
||||
detail_func_name="check_ragflow_server_alive")
|
||||
configurations.append(config)
|
||||
id_count += 1
|
||||
case "es":
|
||||
name: str = 'elasticsearch'
|
||||
url = v['hosts']
|
||||
parsed = urlparse(url)
|
||||
host: str = parsed.hostname
|
||||
port: int = parsed.port
|
||||
username: str = v.get('username')
|
||||
password: str = v.get('password')
|
||||
config = ElasticsearchConfig(id=id_count, name=name, host=host, port=port, service_type="retrieval",
|
||||
retrieval_type="elasticsearch",
|
||||
username=username, password=password,
|
||||
detail_func_name="get_es_cluster_stats")
|
||||
configurations.append(config)
|
||||
id_count += 1
|
||||
|
||||
case "infinity":
|
||||
name: str = 'infinity'
|
||||
url = v['uri']
|
||||
parts = url.split(':', 1)
|
||||
host = parts[0]
|
||||
port = int(parts[1])
|
||||
database: str = v.get('db_name', 'default_db')
|
||||
config = InfinityConfig(id=id_count, name=name, host=host, port=port, service_type="retrieval",
|
||||
retrieval_type="infinity",
|
||||
db_name=database, detail_func_name="get_infinity_status")
|
||||
configurations.append(config)
|
||||
id_count += 1
|
||||
case "minio":
|
||||
name: str = 'minio'
|
||||
url = v['host']
|
||||
parts = url.split(':', 1)
|
||||
host = parts[0]
|
||||
port = int(parts[1])
|
||||
user = v.get('user')
|
||||
password = v.get('password')
|
||||
config = MinioConfig(id=id_count, name=name, host=host, port=port, user=user, password=password,
|
||||
service_type="file_store",
|
||||
store_type="minio", detail_func_name="check_minio_alive")
|
||||
configurations.append(config)
|
||||
id_count += 1
|
||||
case "redis":
|
||||
name: str = 'redis'
|
||||
url = v['host']
|
||||
parts = url.split(':', 1)
|
||||
host = parts[0]
|
||||
port = int(parts[1])
|
||||
password = v.get('password')
|
||||
db: int = v.get('db')
|
||||
config = RedisConfig(id=id_count, name=name, host=host, port=port, password=password, database=db,
|
||||
service_type="message_queue", mq_type="redis", detail_func_name="get_redis_info")
|
||||
configurations.append(config)
|
||||
id_count += 1
|
||||
case "mysql":
|
||||
name: str = 'mysql'
|
||||
host: str = v.get('host')
|
||||
port: int = v.get('port')
|
||||
username = v.get('user')
|
||||
password = v.get('password')
|
||||
config = MySQLConfig(id=id_count, name=name, host=host, port=port, username=username, password=password,
|
||||
service_type="meta_data", meta_type="mysql", detail_func_name="get_mysql_status")
|
||||
configurations.append(config)
|
||||
id_count += 1
|
||||
case "admin":
|
||||
pass
|
||||
case _:
|
||||
logging.warning(f"Unknown configuration key: {k}")
|
||||
continue
|
||||
|
||||
return configurations
|
||||
17
admin/server/exceptions.py
Normal file
17
admin/server/exceptions.py
Normal file
@ -0,0 +1,17 @@
|
||||
class AdminException(Exception):
|
||||
def __init__(self, message, code=400):
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
self.message = message
|
||||
|
||||
class UserNotFoundError(AdminException):
|
||||
def __init__(self, username):
|
||||
super().__init__(f"User '{username}' not found", 404)
|
||||
|
||||
class UserAlreadyExistsError(AdminException):
|
||||
def __init__(self, username):
|
||||
super().__init__(f"User '{username}' already exists", 409)
|
||||
|
||||
class CannotDeleteAdminError(AdminException):
|
||||
def __init__(self):
|
||||
super().__init__("Cannot delete admin account", 403)
|
||||
15
admin/server/models.py
Normal file
15
admin/server/models.py
Normal file
@ -0,0 +1,15 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
34
admin/server/responses.py
Normal file
34
admin/server/responses.py
Normal file
@ -0,0 +1,34 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from flask import jsonify
|
||||
|
||||
|
||||
def success_response(data=None, message="Success", code=0):
|
||||
return jsonify({
|
||||
"code": code,
|
||||
"message": message,
|
||||
"data": data
|
||||
}), 200
|
||||
|
||||
|
||||
def error_response(message="Error", code=-1, data=None):
|
||||
return jsonify({
|
||||
"code": code,
|
||||
"message": message,
|
||||
"data": data
|
||||
}), 400
|
||||
76
admin/server/roles.py
Normal file
76
admin/server/roles.py
Normal file
@ -0,0 +1,76 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
|
||||
from typing import Dict, Any
|
||||
|
||||
from api.common.exceptions import AdminException
|
||||
|
||||
|
||||
class RoleMgr:
|
||||
@staticmethod
|
||||
def create_role(role_name: str, description: str):
|
||||
error_msg = f"not implement: create role: {role_name}, description: {description}"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
|
||||
@staticmethod
|
||||
def update_role_description(role_name: str, description: str) -> Dict[str, Any]:
|
||||
error_msg = f"not implement: update role: {role_name} with description: {description}"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
|
||||
@staticmethod
|
||||
def delete_role(role_name: str) -> Dict[str, Any]:
|
||||
error_msg = f"not implement: drop role: {role_name}"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
|
||||
@staticmethod
|
||||
def list_roles() -> Dict[str, Any]:
|
||||
error_msg = "not implement: list roles"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
|
||||
@staticmethod
|
||||
def get_role_permission(role_name: str) -> Dict[str, Any]:
|
||||
error_msg = f"not implement: show role {role_name}"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
|
||||
@staticmethod
|
||||
def grant_role_permission(role_name: str, actions: list, resource: str) -> Dict[str, Any]:
|
||||
error_msg = f"not implement: grant role {role_name} actions: {actions} on {resource}"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
|
||||
@staticmethod
|
||||
def revoke_role_permission(role_name: str, actions: list, resource: str) -> Dict[str, Any]:
|
||||
error_msg = f"not implement: revoke role {role_name} actions: {actions} on {resource}"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
|
||||
@staticmethod
|
||||
def update_user_role(user_name: str, role_name: str) -> Dict[str, Any]:
|
||||
error_msg = f"not implement: update user role: {user_name} to role {role_name}"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
|
||||
@staticmethod
|
||||
def get_user_permission(user_name: str) -> Dict[str, Any]:
|
||||
error_msg = f"not implement: get user permission: {user_name}"
|
||||
logging.error(error_msg)
|
||||
raise AdminException(error_msg)
|
||||
371
admin/server/routes.py
Normal file
371
admin/server/routes.py
Normal file
@ -0,0 +1,371 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import secrets
|
||||
|
||||
from flask import Blueprint, request
|
||||
from flask_login import current_user, logout_user, login_required
|
||||
|
||||
from auth import login_verify, login_admin, check_admin_auth
|
||||
from responses import success_response, error_response
|
||||
from services import UserMgr, ServiceMgr, UserServiceMgr
|
||||
from roles import RoleMgr
|
||||
from api.common.exceptions import AdminException
|
||||
|
||||
admin_bp = Blueprint('admin', __name__, url_prefix='/api/v1/admin')
|
||||
|
||||
|
||||
@admin_bp.route('/login', methods=['POST'])
|
||||
def login():
|
||||
if not request.json:
|
||||
return error_response('Authorize admin failed.' ,400)
|
||||
try:
|
||||
email = request.json.get("email", "")
|
||||
password = request.json.get("password", "")
|
||||
return login_admin(email, password)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/logout', methods=['GET'])
|
||||
@login_required
|
||||
def logout():
|
||||
try:
|
||||
current_user.access_token = f"INVALID_{secrets.token_hex(16)}"
|
||||
current_user.save()
|
||||
logout_user()
|
||||
return success_response(True)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/auth', methods=['GET'])
|
||||
@login_verify
|
||||
def auth_admin():
|
||||
try:
|
||||
return success_response(None, "Admin is authorized", 0)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def list_users():
|
||||
try:
|
||||
users = UserMgr.get_all_users()
|
||||
return success_response(users, "Get all users", 0)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users', methods=['POST'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def create_user():
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'username' not in data or 'password' not in data:
|
||||
return error_response("Username and password are required", 400)
|
||||
|
||||
username = data['username']
|
||||
password = data['password']
|
||||
role = data.get('role', 'user')
|
||||
|
||||
res = UserMgr.create_user(username, password, role)
|
||||
if res["success"]:
|
||||
user_info = res["user_info"]
|
||||
user_info.pop("password") # do not return password
|
||||
return success_response(user_info, "User created successfully")
|
||||
else:
|
||||
return error_response("create user failed")
|
||||
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e))
|
||||
|
||||
|
||||
@admin_bp.route('/users/<username>', methods=['DELETE'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def delete_user(username):
|
||||
try:
|
||||
res = UserMgr.delete_user(username)
|
||||
if res["success"]:
|
||||
return success_response(None, res["message"])
|
||||
else:
|
||||
return error_response(res["message"])
|
||||
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users/<username>/password', methods=['PUT'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def change_password(username):
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'new_password' not in data:
|
||||
return error_response("New password is required", 400)
|
||||
|
||||
new_password = data['new_password']
|
||||
msg = UserMgr.update_user_password(username, new_password)
|
||||
return success_response(None, msg)
|
||||
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users/<username>/activate', methods=['PUT'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def alter_user_activate_status(username):
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'activate_status' not in data:
|
||||
return error_response("Activation status is required", 400)
|
||||
activate_status = data['activate_status']
|
||||
msg = UserMgr.update_user_activate_status(username, activate_status)
|
||||
return success_response(None, msg)
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users/<username>', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def get_user_details(username):
|
||||
try:
|
||||
user_details = UserMgr.get_user_details(username)
|
||||
return success_response(user_details)
|
||||
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users/<username>/datasets', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def get_user_datasets(username):
|
||||
try:
|
||||
datasets_list = UserServiceMgr.get_user_datasets(username)
|
||||
return success_response(datasets_list)
|
||||
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users/<username>/agents', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def get_user_agents(username):
|
||||
try:
|
||||
agents_list = UserServiceMgr.get_user_agents(username)
|
||||
return success_response(agents_list)
|
||||
|
||||
except AdminException as e:
|
||||
return error_response(e.message, e.code)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/services', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def get_services():
|
||||
try:
|
||||
services = ServiceMgr.get_all_services()
|
||||
return success_response(services, "Get all services", 0)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/service_types/<service_type>', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def get_services_by_type(service_type_str):
|
||||
try:
|
||||
services = ServiceMgr.get_services_by_type(service_type_str)
|
||||
return success_response(services)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/services/<service_id>', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def get_service(service_id):
|
||||
try:
|
||||
services = ServiceMgr.get_service_details(service_id)
|
||||
return success_response(services)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/services/<service_id>', methods=['DELETE'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def shutdown_service(service_id):
|
||||
try:
|
||||
services = ServiceMgr.shutdown_service(service_id)
|
||||
return success_response(services)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/services/<service_id>', methods=['PUT'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def restart_service(service_id):
|
||||
try:
|
||||
services = ServiceMgr.restart_service(service_id)
|
||||
return success_response(services)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/roles', methods=['POST'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def create_role():
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'role_name' not in data:
|
||||
return error_response("Role name is required", 400)
|
||||
role_name: str = data['role_name']
|
||||
description: str = data['description']
|
||||
res = RoleMgr.create_role(role_name, description)
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/roles/<role_name>', methods=['PUT'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def update_role(role_name: str):
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'description' not in data:
|
||||
return error_response("Role description is required", 400)
|
||||
description: str = data['description']
|
||||
res = RoleMgr.update_role_description(role_name, description)
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/roles/<role_name>', methods=['DELETE'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def delete_role(role_name: str):
|
||||
try:
|
||||
res = RoleMgr.delete_role(role_name)
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/roles', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def list_roles():
|
||||
try:
|
||||
res = RoleMgr.list_roles()
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/roles/<role_name>/permission', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def get_role_permission(role_name: str):
|
||||
try:
|
||||
res = RoleMgr.get_role_permission(role_name)
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/roles/<role_name>/permission', methods=['POST'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def grant_role_permission(role_name: str):
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'actions' not in data or 'resource' not in data:
|
||||
return error_response("Permission is required", 400)
|
||||
actions: list = data['actions']
|
||||
resource: str = data['resource']
|
||||
res = RoleMgr.grant_role_permission(role_name, actions, resource)
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/roles/<role_name>/permission', methods=['DELETE'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def revoke_role_permission(role_name: str):
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'actions' not in data or 'resource' not in data:
|
||||
return error_response("Permission is required", 400)
|
||||
actions: list = data['actions']
|
||||
resource: str = data['resource']
|
||||
res = RoleMgr.revoke_role_permission(role_name, actions, resource)
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users/<user_name>/role', methods=['PUT'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def update_user_role(user_name: str):
|
||||
try:
|
||||
data = request.get_json()
|
||||
if not data or 'role_name' not in data:
|
||||
return error_response("Role name is required", 400)
|
||||
role_name: str = data['role_name']
|
||||
res = RoleMgr.update_user_role(user_name, role_name)
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
|
||||
|
||||
@admin_bp.route('/users/<user_name>/permission', methods=['GET'])
|
||||
@login_required
|
||||
@check_admin_auth
|
||||
def get_user_permission(user_name: str):
|
||||
try:
|
||||
res = RoleMgr.get_user_permission(user_name)
|
||||
return success_response(res)
|
||||
except Exception as e:
|
||||
return error_response(str(e), 500)
|
||||
225
admin/server/services.py
Normal file
225
admin/server/services.py
Normal file
@ -0,0 +1,225 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import re
|
||||
from werkzeug.security import check_password_hash
|
||||
from api.db import ActiveEnum
|
||||
from api.db.services import UserService
|
||||
from api.db.joint_services.user_account_service import create_new_user, delete_user_data
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.crypt import decrypt
|
||||
from api.utils import health_utils
|
||||
|
||||
from api.common.exceptions import AdminException, UserAlreadyExistsError, UserNotFoundError
|
||||
from config import SERVICE_CONFIGS
|
||||
|
||||
|
||||
class UserMgr:
|
||||
@staticmethod
|
||||
def get_all_users():
|
||||
users = UserService.get_all_users()
|
||||
result = []
|
||||
for user in users:
|
||||
result.append({
|
||||
'email': user.email,
|
||||
'nickname': user.nickname,
|
||||
'create_date': user.create_date,
|
||||
'is_active': user.is_active,
|
||||
'is_superuser': user.is_superuser,
|
||||
})
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def get_user_details(username):
|
||||
# use email to query
|
||||
users = UserService.query_user_by_email(username)
|
||||
result = []
|
||||
for user in users:
|
||||
result.append({
|
||||
'email': user.email,
|
||||
'language': user.language,
|
||||
'last_login_time': user.last_login_time,
|
||||
'is_active': user.is_active,
|
||||
'is_anonymous': user.is_anonymous,
|
||||
'login_channel': user.login_channel,
|
||||
'status': user.status,
|
||||
'is_superuser': user.is_superuser,
|
||||
'create_date': user.create_date,
|
||||
'update_date': user.update_date
|
||||
})
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def create_user(username, password, role="user") -> dict:
|
||||
# Validate the email address
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", username):
|
||||
raise AdminException(f"Invalid email address: {username}!")
|
||||
# Check if the email address is already used
|
||||
if UserService.query(email=username):
|
||||
raise UserAlreadyExistsError(username)
|
||||
# Construct user info data
|
||||
user_info_dict = {
|
||||
"email": username,
|
||||
"nickname": "", # ask user to edit it manually in settings.
|
||||
"password": decrypt(password),
|
||||
"login_channel": "password",
|
||||
"is_superuser": role == "admin",
|
||||
}
|
||||
return create_new_user(user_info_dict)
|
||||
|
||||
@staticmethod
|
||||
def delete_user(username):
|
||||
# use email to delete
|
||||
user_list = UserService.query_user_by_email(username)
|
||||
if not user_list:
|
||||
raise UserNotFoundError(username)
|
||||
if len(user_list) > 1:
|
||||
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||
usr = user_list[0]
|
||||
return delete_user_data(usr.id)
|
||||
|
||||
@staticmethod
|
||||
def update_user_password(username, new_password) -> str:
|
||||
# use email to find user. check exist and unique.
|
||||
user_list = UserService.query_user_by_email(username)
|
||||
if not user_list:
|
||||
raise UserNotFoundError(username)
|
||||
elif len(user_list) > 1:
|
||||
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||
# check new_password different from old.
|
||||
usr = user_list[0]
|
||||
psw = decrypt(new_password)
|
||||
if check_password_hash(usr.password, psw):
|
||||
return "Same password, no need to update!"
|
||||
# update password
|
||||
UserService.update_user_password(usr.id, psw)
|
||||
return "Password updated successfully!"
|
||||
|
||||
@staticmethod
|
||||
def update_user_activate_status(username, activate_status: str):
|
||||
# use email to find user. check exist and unique.
|
||||
user_list = UserService.query_user_by_email(username)
|
||||
if not user_list:
|
||||
raise UserNotFoundError(username)
|
||||
elif len(user_list) > 1:
|
||||
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||
# check activate status different from new
|
||||
usr = user_list[0]
|
||||
# format activate_status before handle
|
||||
_activate_status = activate_status.lower()
|
||||
target_status = {
|
||||
'on': ActiveEnum.ACTIVE.value,
|
||||
'off': ActiveEnum.INACTIVE.value,
|
||||
}.get(_activate_status)
|
||||
if not target_status:
|
||||
raise AdminException(f"Invalid activate_status: {activate_status}")
|
||||
if target_status == usr.is_active:
|
||||
return f"User activate status is already {_activate_status}!"
|
||||
# update is_active
|
||||
UserService.update_user(usr.id, {"is_active": target_status})
|
||||
return f"Turn {_activate_status} user activate status successfully!"
|
||||
|
||||
|
||||
class UserServiceMgr:
|
||||
|
||||
@staticmethod
|
||||
def get_user_datasets(username):
|
||||
# use email to find user.
|
||||
user_list = UserService.query_user_by_email(username)
|
||||
if not user_list:
|
||||
raise UserNotFoundError(username)
|
||||
elif len(user_list) > 1:
|
||||
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||
# find tenants
|
||||
usr = user_list[0]
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(usr.id)
|
||||
tenant_ids = [m["tenant_id"] for m in tenants]
|
||||
# filter permitted kb and owned kb
|
||||
return KnowledgebaseService.get_all_kb_by_tenant_ids(tenant_ids, usr.id)
|
||||
|
||||
@staticmethod
|
||||
def get_user_agents(username):
|
||||
# use email to find user.
|
||||
user_list = UserService.query_user_by_email(username)
|
||||
if not user_list:
|
||||
raise UserNotFoundError(username)
|
||||
elif len(user_list) > 1:
|
||||
raise AdminException(f"Exist more than 1 user: {username}!")
|
||||
# find tenants
|
||||
usr = user_list[0]
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(usr.id)
|
||||
tenant_ids = [m["tenant_id"] for m in tenants]
|
||||
# filter permitted agents and owned agents
|
||||
res = UserCanvasService.get_all_agents_by_tenant_ids(tenant_ids, usr.id)
|
||||
return [{
|
||||
'title': r['title'],
|
||||
'permission': r['permission'],
|
||||
'canvas_category': r['canvas_category'].split('_')[0]
|
||||
} for r in res]
|
||||
|
||||
|
||||
class ServiceMgr:
|
||||
|
||||
@staticmethod
|
||||
def get_all_services():
|
||||
result = []
|
||||
configs = SERVICE_CONFIGS.configs
|
||||
for service_id, config in enumerate(configs):
|
||||
config_dict = config.to_dict()
|
||||
try:
|
||||
service_detail = ServiceMgr.get_service_details(service_id)
|
||||
if "status" in service_detail:
|
||||
config_dict['status'] = service_detail['status']
|
||||
else:
|
||||
config_dict['status'] = 'timeout'
|
||||
except Exception:
|
||||
config_dict['status'] = 'timeout'
|
||||
result.append(config_dict)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def get_services_by_type(service_type_str: str):
|
||||
raise AdminException("get_services_by_type: not implemented")
|
||||
|
||||
@staticmethod
|
||||
def get_service_details(service_id: int):
|
||||
service_id = int(service_id)
|
||||
configs = SERVICE_CONFIGS.configs
|
||||
service_config_mapping = {
|
||||
c.id: {
|
||||
'name': c.name,
|
||||
'detail_func_name': c.detail_func_name
|
||||
} for c in configs
|
||||
}
|
||||
service_info = service_config_mapping.get(service_id, {})
|
||||
if not service_info:
|
||||
raise AdminException(f"invalid service_id: {service_id}")
|
||||
|
||||
detail_func = getattr(health_utils, service_info.get('detail_func_name'))
|
||||
res = detail_func()
|
||||
res.update({'service_name': service_info.get('name')})
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def shutdown_service(service_id: int):
|
||||
raise AdminException("shutdown_service: not implemented")
|
||||
|
||||
@staticmethod
|
||||
def restart_service(service_id: int):
|
||||
raise AdminException("restart_service: not implemented")
|
||||
@ -1,45 +0,0 @@
|
||||
English | [简体中文](./README_zh.md)
|
||||
|
||||
# *Graph*
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
*Graph* is a mathematical concept which is composed of nodes and edges.
|
||||
It is used to compose a complex work flow or agent.
|
||||
And this graph is beyond the DAG that we can use circles to describe our agent or work flow.
|
||||
Under this folder, we propose a test tool ./test/client.py which can test the DSLs such as json files in folder ./test/dsl_examples.
|
||||
Please use this client at the same folder you start RAGFlow. If it's run by Docker, please go into the container before running the client.
|
||||
Otherwise, correct configurations in service_conf.yaml is essential.
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
usage: client.py [-h] -s DSL -t TENANT_ID -m
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-s DSL, --dsl DSL input dsl
|
||||
-t TENANT_ID, --tenant_id TENANT_ID
|
||||
Tenant ID
|
||||
-m, --stream Stream output
|
||||
```
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/79179c5e-d4d6-464a-b6c4-5721cb329899" width="1000"/>
|
||||
</div>
|
||||
|
||||
|
||||
## How to gain a TENANT_ID in command line?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/419d8588-87b1-4ab8-ac49-2d1f047a4b97" width="600"/>
|
||||
</div>
|
||||
💡 We plan to display it here in the near future.
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/c97915de-0091-46a5-afd9-e278946e5fe3" width="600"/>
|
||||
</div>
|
||||
|
||||
|
||||
## How to set 'kb_ids' for component 'Retrieval' in DSL?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/0a731534-cac8-49fd-8a92-ca247eeef66d" width="600"/>
|
||||
</div>
|
||||
|
||||
@ -1,46 +0,0 @@
|
||||
[English](./README.md) | 简体中文
|
||||
|
||||
# *Graph*
|
||||
|
||||
|
||||
## 简介
|
||||
|
||||
"Graph"是一个由节点和边组成的数学概念。
|
||||
它被用来构建复杂的工作流或代理。
|
||||
这个图超越了有向无环图(DAG),我们可以使用循环来描述我们的代理或工作流。
|
||||
在这个文件夹下,我们提出了一个测试工具 ./test/client.py,
|
||||
它可以测试像文件夹./test/dsl_examples下一样的DSL文件。
|
||||
请在启动 RAGFlow 的同一文件夹中使用此客户端。如果它是通过 Docker 运行的,请在运行客户端之前进入容器。
|
||||
否则,正确配置 service_conf.yaml 文件是必不可少的。
|
||||
|
||||
```bash
|
||||
PYTHONPATH=path/to/ragflow python graph/test/client.py -h
|
||||
usage: client.py [-h] -s DSL -t TENANT_ID -m
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-s DSL, --dsl DSL input dsl
|
||||
-t TENANT_ID, --tenant_id TENANT_ID
|
||||
Tenant ID
|
||||
-m, --stream Stream output
|
||||
```
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/05924730-c427-495b-8ee4-90b8b2250681" width="1000"/>
|
||||
</div>
|
||||
|
||||
|
||||
## 命令行中的TENANT_ID如何获得?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/419d8588-87b1-4ab8-ac49-2d1f047a4b97" width="600"/>
|
||||
</div>
|
||||
💡 后面会展示在这里:
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/c97915de-0091-46a5-afd9-e278946e5fe3" width="600"/>
|
||||
</div>
|
||||
|
||||
|
||||
## DSL里面的Retrieval组件的kb_ids怎么填?
|
||||
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
||||
<img src="https://github.com/infiniflow/ragflow/assets/12318111/0a731534-cac8-49fd-8a92-ca247eeef66d" width="600"/>
|
||||
</div>
|
||||
|
||||
667
agent/canvas.py
667
agent/canvas.py
@ -13,90 +13,71 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
|
||||
import pandas as pd
|
||||
from typing import Any, Union, Tuple
|
||||
|
||||
from agent.component import component_class
|
||||
from agent.component.base import ComponentBase
|
||||
from api.db.services.file_service import FileService
|
||||
from api.utils import get_uuid, hash_str2int
|
||||
from rag.prompts.generator import chunks_format
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
class Canvas:
|
||||
class Graph:
|
||||
"""
|
||||
dsl = {
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {},
|
||||
},
|
||||
"downstream": ["answer_0"],
|
||||
"upstream": [],
|
||||
},
|
||||
"answer_0": {
|
||||
"obj": {
|
||||
"component_name": "Answer",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["retrieval_0"],
|
||||
"upstream": ["begin", "generate_0"],
|
||||
},
|
||||
"retrieval_0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate_0"],
|
||||
"upstream": ["answer_0"],
|
||||
},
|
||||
"generate_0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["answer_0"],
|
||||
"upstream": ["retrieval_0"],
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": [],
|
||||
"path": [["begin"]],
|
||||
"answer": []
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, dsl: str, tenant_id=None):
|
||||
self.path = []
|
||||
self.history = []
|
||||
self.messages = []
|
||||
self.answer = []
|
||||
self.components = {}
|
||||
self.dsl = json.loads(dsl) if dsl else {
|
||||
dsl = {
|
||||
"components": {
|
||||
"begin": {
|
||||
"obj": {
|
||||
"obj":{
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"prologue": "Hi there!"
|
||||
}
|
||||
"params": {},
|
||||
},
|
||||
"downstream": [],
|
||||
"downstream": ["answer_0"],
|
||||
"upstream": [],
|
||||
"parent_id": ""
|
||||
},
|
||||
"retrieval_0": {
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["generate_0"],
|
||||
"upstream": ["answer_0"],
|
||||
},
|
||||
"generate_0": {
|
||||
"obj": {
|
||||
"component_name": "Generate",
|
||||
"params": {}
|
||||
},
|
||||
"downstream": ["answer_0"],
|
||||
"upstream": ["retrieval_0"],
|
||||
}
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"reference": [],
|
||||
"path": [],
|
||||
"answer": []
|
||||
"path": ["begin"],
|
||||
"retrieval": {"chunks": [], "doc_aggs": []},
|
||||
"globals": {
|
||||
"sys.query": "",
|
||||
"sys.user_id": tenant_id,
|
||||
"sys.conversation_turns": 0,
|
||||
"sys.files": []
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, dsl: str, tenant_id=None, task_id=None):
|
||||
self.path = []
|
||||
self.components = {}
|
||||
self.error = ""
|
||||
self.dsl = json.loads(dsl)
|
||||
self._tenant_id = tenant_id
|
||||
self._embed_id = ""
|
||||
self.task_id = task_id if task_id else get_uuid()
|
||||
self.load()
|
||||
|
||||
def load(self):
|
||||
@ -105,34 +86,22 @@ class Canvas:
|
||||
for k, cpn in self.components.items():
|
||||
cpn_nms.add(cpn["obj"]["component_name"])
|
||||
|
||||
assert "Begin" in cpn_nms, "There have to be an 'Begin' component."
|
||||
assert "Answer" in cpn_nms, "There have to be an 'Answer' component."
|
||||
|
||||
for k, cpn in self.components.items():
|
||||
cpn_nms.add(cpn["obj"]["component_name"])
|
||||
param = component_class(cpn["obj"]["component_name"] + "Param")()
|
||||
param.update(cpn["obj"]["params"])
|
||||
param.check()
|
||||
try:
|
||||
param.check()
|
||||
except Exception as e:
|
||||
raise ValueError(self.get_component_name(k) + f": {e}")
|
||||
|
||||
cpn["obj"] = component_class(cpn["obj"]["component_name"])(self, k, param)
|
||||
if cpn["obj"].component_name == "Categorize":
|
||||
for _, desc in param.category_description.items():
|
||||
if desc["to"] not in cpn["downstream"]:
|
||||
cpn["downstream"].append(desc["to"])
|
||||
|
||||
self.path = self.dsl["path"]
|
||||
self.history = self.dsl["history"]
|
||||
self.messages = self.dsl["messages"]
|
||||
self.answer = self.dsl["answer"]
|
||||
self.reference = self.dsl["reference"]
|
||||
self._embed_id = self.dsl.get("embed_id", "")
|
||||
|
||||
def __str__(self):
|
||||
self.dsl["path"] = self.path
|
||||
self.dsl["history"] = self.history
|
||||
self.dsl["messages"] = self.messages
|
||||
self.dsl["answer"] = self.answer
|
||||
self.dsl["reference"] = self.reference
|
||||
self.dsl["embed_id"] = self._embed_id
|
||||
self.dsl["task_id"] = self.task_id
|
||||
dsl = {
|
||||
"components": {}
|
||||
}
|
||||
@ -153,158 +122,304 @@ class Canvas:
|
||||
|
||||
def reset(self):
|
||||
self.path = []
|
||||
self.history = []
|
||||
self.messages = []
|
||||
self.answer = []
|
||||
self.reference = []
|
||||
for k, cpn in self.components.items():
|
||||
self.components[k]["obj"].reset()
|
||||
self._embed_id = ""
|
||||
try:
|
||||
REDIS_CONN.delete(f"{self.task_id}-logs")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
def get_component_name(self, cid):
|
||||
for n in self.dsl["graph"]["nodes"]:
|
||||
for n in self.dsl.get("graph", {}).get("nodes", []):
|
||||
if cid == n["id"]:
|
||||
return n["data"]["name"]
|
||||
return ""
|
||||
|
||||
def run(self, **kwargs):
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
try:
|
||||
ans = self.components[cpn_id]["obj"].run(self.history, **kwargs)
|
||||
except Exception as e:
|
||||
ans = ComponentBase.be_output(str(e))
|
||||
self.path[-1].append(cpn_id)
|
||||
if kwargs.get("stream"):
|
||||
for an in ans():
|
||||
yield an
|
||||
else:
|
||||
yield ans
|
||||
return
|
||||
raise NotImplementedError()
|
||||
|
||||
if not self.path:
|
||||
self.components["begin"]["obj"].run(self.history, **kwargs)
|
||||
self.path.append(["begin"])
|
||||
def get_component(self, cpn_id) -> Union[None, dict[str, Any]]:
|
||||
return self.components.get(cpn_id)
|
||||
|
||||
self.path.append([])
|
||||
def get_component_obj(self, cpn_id) -> ComponentBase:
|
||||
return self.components.get(cpn_id)["obj"]
|
||||
|
||||
ran = -1
|
||||
waiting = []
|
||||
without_dependent_checking = []
|
||||
def get_component_type(self, cpn_id) -> str:
|
||||
return self.components.get(cpn_id)["obj"].component_name
|
||||
|
||||
def prepare2run(cpns):
|
||||
nonlocal ran, ans
|
||||
for c in cpns:
|
||||
if self.path[-1] and c == self.path[-1][-1]:
|
||||
continue
|
||||
cpn = self.components[c]["obj"]
|
||||
if cpn.component_name == "Answer":
|
||||
self.answer.append(c)
|
||||
else:
|
||||
logging.debug(f"Canvas.prepare2run: {c}")
|
||||
if c not in without_dependent_checking:
|
||||
cpids = cpn.get_dependent_components()
|
||||
if any([cc not in self.path[-1] for cc in cpids]):
|
||||
if c not in waiting:
|
||||
waiting.append(c)
|
||||
continue
|
||||
yield "*'{}'* is running...🕞".format(self.get_component_name(c))
|
||||
|
||||
if cpn.component_name.lower() == "iteration":
|
||||
st_cpn = cpn.get_start()
|
||||
assert st_cpn, "Start component not found for Iteration."
|
||||
if not st_cpn["obj"].end():
|
||||
cpn = st_cpn["obj"]
|
||||
c = cpn._id
|
||||
|
||||
try:
|
||||
ans = cpn.run(self.history, **kwargs)
|
||||
except Exception as e:
|
||||
logging.exception(f"Canvas.run got exception: {e}")
|
||||
self.path[-1].append(c)
|
||||
ran += 1
|
||||
raise e
|
||||
self.path[-1].append(c)
|
||||
|
||||
ran += 1
|
||||
|
||||
downstream = self.components[self.path[-2][-1]]["downstream"]
|
||||
if not downstream and self.components[self.path[-2][-1]].get("parent_id"):
|
||||
cid = self.path[-2][-1]
|
||||
pid = self.components[cid]["parent_id"]
|
||||
o, _ = self.components[cid]["obj"].output(allow_partial=False)
|
||||
oo, _ = self.components[pid]["obj"].output(allow_partial=False)
|
||||
self.components[pid]["obj"].set(pd.concat([oo, o], ignore_index=True))
|
||||
downstream = [pid]
|
||||
|
||||
for m in prepare2run(downstream):
|
||||
yield {"content": m, "running_status": True}
|
||||
|
||||
while 0 <= ran < len(self.path[-1]):
|
||||
logging.debug(f"Canvas.run: {ran} {self.path}")
|
||||
cpn_id = self.path[-1][ran]
|
||||
cpn = self.get_component(cpn_id)
|
||||
if not any([cpn["downstream"], cpn.get("parent_id"), waiting]):
|
||||
break
|
||||
|
||||
loop = self._find_loop()
|
||||
if loop:
|
||||
raise OverflowError(f"Too much loops: {loop}")
|
||||
|
||||
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
||||
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
||||
assert switch_out in self.components, \
|
||||
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
||||
for m in prepare2run([switch_out]):
|
||||
yield {"content": m, "running_status": True}
|
||||
continue
|
||||
|
||||
downstream = cpn["downstream"]
|
||||
if not downstream and cpn.get("parent_id"):
|
||||
pid = cpn["parent_id"]
|
||||
_, o = cpn["obj"].output(allow_partial=False)
|
||||
_, oo = self.components[pid]["obj"].output(allow_partial=False)
|
||||
self.components[pid]["obj"].set_output(pd.concat([oo.dropna(axis=1), o.dropna(axis=1)], ignore_index=True))
|
||||
downstream = [pid]
|
||||
|
||||
for m in prepare2run(downstream):
|
||||
yield {"content": m, "running_status": True}
|
||||
|
||||
if ran >= len(self.path[-1]) and waiting:
|
||||
without_dependent_checking = waiting
|
||||
waiting = []
|
||||
for m in prepare2run(without_dependent_checking):
|
||||
yield {"content": m, "running_status": True}
|
||||
without_dependent_checking = []
|
||||
ran -= 1
|
||||
|
||||
if self.answer:
|
||||
cpn_id = self.answer[0]
|
||||
self.answer.pop(0)
|
||||
ans = self.components[cpn_id]["obj"].run(self.history, **kwargs)
|
||||
self.path[-1].append(cpn_id)
|
||||
if kwargs.get("stream"):
|
||||
assert isinstance(ans, partial)
|
||||
for an in ans():
|
||||
yield an
|
||||
else:
|
||||
yield ans
|
||||
|
||||
else:
|
||||
raise Exception("The dialog flow has no way to interact with you. Please add an 'Interact' component to the end of the flow.")
|
||||
|
||||
def get_component(self, cpn_id):
|
||||
return self.components[cpn_id]
|
||||
def get_component_input_form(self, cpn_id) -> dict:
|
||||
return self.components.get(cpn_id)["obj"].get_input_form()
|
||||
|
||||
def get_tenant_id(self):
|
||||
return self._tenant_id
|
||||
|
||||
def get_variable_value(self, exp: str) -> Any:
|
||||
exp = exp.strip("{").strip("}").strip(" ").strip("{").strip("}")
|
||||
if exp.find("@") < 0:
|
||||
return self.globals[exp]
|
||||
cpn_id, var_nm = exp.split("@")
|
||||
cpn = self.get_component(cpn_id)
|
||||
if not cpn:
|
||||
raise Exception(f"Can't find variable: '{cpn_id}@{var_nm}'")
|
||||
return cpn["obj"].output(var_nm)
|
||||
|
||||
|
||||
class Canvas(Graph):
|
||||
|
||||
def __init__(self, dsl: str, tenant_id=None, task_id=None):
|
||||
self.globals = {
|
||||
"sys.query": "",
|
||||
"sys.user_id": tenant_id,
|
||||
"sys.conversation_turns": 0,
|
||||
"sys.files": []
|
||||
}
|
||||
super().__init__(dsl, tenant_id, task_id)
|
||||
|
||||
def load(self):
|
||||
super().load()
|
||||
self.history = self.dsl["history"]
|
||||
if "globals" in self.dsl:
|
||||
self.globals = self.dsl["globals"]
|
||||
else:
|
||||
self.globals = {
|
||||
"sys.query": "",
|
||||
"sys.user_id": "",
|
||||
"sys.conversation_turns": 0,
|
||||
"sys.files": []
|
||||
}
|
||||
|
||||
self.retrieval = self.dsl["retrieval"]
|
||||
self.memory = self.dsl.get("memory", [])
|
||||
|
||||
def __str__(self):
|
||||
self.dsl["history"] = self.history
|
||||
self.dsl["retrieval"] = self.retrieval
|
||||
self.dsl["memory"] = self.memory
|
||||
return super().__str__()
|
||||
|
||||
def reset(self, mem=False):
|
||||
super().reset()
|
||||
if not mem:
|
||||
self.history = []
|
||||
self.retrieval = []
|
||||
self.memory = []
|
||||
for k in self.globals.keys():
|
||||
if isinstance(self.globals[k], str):
|
||||
self.globals[k] = ""
|
||||
elif isinstance(self.globals[k], int):
|
||||
self.globals[k] = 0
|
||||
elif isinstance(self.globals[k], float):
|
||||
self.globals[k] = 0
|
||||
elif isinstance(self.globals[k], list):
|
||||
self.globals[k] = []
|
||||
elif isinstance(self.globals[k], dict):
|
||||
self.globals[k] = {}
|
||||
else:
|
||||
self.globals[k] = None
|
||||
|
||||
def run(self, **kwargs):
|
||||
st = time.perf_counter()
|
||||
self.message_id = get_uuid()
|
||||
created_at = int(time.time())
|
||||
self.add_user_input(kwargs.get("query"))
|
||||
for k, cpn in self.components.items():
|
||||
self.components[k]["obj"].reset(True)
|
||||
|
||||
for k in kwargs.keys():
|
||||
if k in ["query", "user_id", "files"] and kwargs[k]:
|
||||
if k == "files":
|
||||
self.globals[f"sys.{k}"] = self.get_files(kwargs[k])
|
||||
else:
|
||||
self.globals[f"sys.{k}"] = kwargs[k]
|
||||
if not self.globals["sys.conversation_turns"] :
|
||||
self.globals["sys.conversation_turns"] = 0
|
||||
self.globals["sys.conversation_turns"] += 1
|
||||
|
||||
def decorate(event, dt):
|
||||
nonlocal created_at
|
||||
return {
|
||||
"event": event,
|
||||
#"conversation_id": "f3cc152b-24b0-4258-a1a1-7d5e9fc8a115",
|
||||
"message_id": self.message_id,
|
||||
"created_at": created_at,
|
||||
"task_id": self.task_id,
|
||||
"data": dt
|
||||
}
|
||||
|
||||
if not self.path or self.path[-1].lower().find("userfillup") < 0:
|
||||
self.path.append("begin")
|
||||
self.retrieval.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
yield decorate("workflow_started", {"inputs": kwargs.get("inputs")})
|
||||
self.retrieval.append({"chunks": {}, "doc_aggs": {}})
|
||||
|
||||
def _run_batch(f, t):
|
||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||
thr = []
|
||||
for i in range(f, t):
|
||||
cpn = self.get_component_obj(self.path[i])
|
||||
if cpn.component_name.lower() in ["begin", "userfillup"]:
|
||||
thr.append(executor.submit(cpn.invoke, inputs=kwargs.get("inputs", {})))
|
||||
else:
|
||||
thr.append(executor.submit(cpn.invoke, **cpn.get_input()))
|
||||
for t in thr:
|
||||
t.result()
|
||||
|
||||
def _node_finished(cpn_obj):
|
||||
return decorate("node_finished",{
|
||||
"inputs": cpn_obj.get_input_values(),
|
||||
"outputs": cpn_obj.output(),
|
||||
"component_id": cpn_obj._id,
|
||||
"component_name": self.get_component_name(cpn_obj._id),
|
||||
"component_type": self.get_component_type(cpn_obj._id),
|
||||
"error": cpn_obj.error(),
|
||||
"elapsed_time": time.perf_counter() - cpn_obj.output("_created_time"),
|
||||
"created_at": cpn_obj.output("_created_time"),
|
||||
})
|
||||
|
||||
self.error = ""
|
||||
idx = len(self.path) - 1
|
||||
partials = []
|
||||
while idx < len(self.path):
|
||||
to = len(self.path)
|
||||
for i in range(idx, to):
|
||||
yield decorate("node_started", {
|
||||
"inputs": None, "created_at": int(time.time()),
|
||||
"component_id": self.path[i],
|
||||
"component_name": self.get_component_name(self.path[i]),
|
||||
"component_type": self.get_component_type(self.path[i]),
|
||||
"thoughts": self.get_component_thoughts(self.path[i])
|
||||
})
|
||||
_run_batch(idx, to)
|
||||
# post processing of components invocation
|
||||
for i in range(idx, to):
|
||||
cpn = self.get_component(self.path[i])
|
||||
cpn_obj = self.get_component_obj(self.path[i])
|
||||
if cpn_obj.component_name.lower() == "message":
|
||||
if isinstance(cpn_obj.output("content"), partial):
|
||||
_m = ""
|
||||
for m in cpn_obj.output("content")():
|
||||
if not m:
|
||||
continue
|
||||
if m == "<think>":
|
||||
yield decorate("message", {"content": "", "start_to_think": True})
|
||||
elif m == "</think>":
|
||||
yield decorate("message", {"content": "", "end_to_think": True})
|
||||
else:
|
||||
yield decorate("message", {"content": m})
|
||||
_m += m
|
||||
cpn_obj.set_output("content", _m)
|
||||
cite = re.search(r"\[ID:[ 0-9]+\]", _m)
|
||||
else:
|
||||
yield decorate("message", {"content": cpn_obj.output("content")})
|
||||
cite = re.search(r"\[ID:[ 0-9]+\]", cpn_obj.output("content"))
|
||||
yield decorate("message_end", {"reference": self.get_reference() if cite else None})
|
||||
|
||||
while partials:
|
||||
_cpn_obj = self.get_component_obj(partials[0])
|
||||
if isinstance(_cpn_obj.output("content"), partial):
|
||||
break
|
||||
yield _node_finished(_cpn_obj)
|
||||
partials.pop(0)
|
||||
|
||||
other_branch = False
|
||||
if cpn_obj.error():
|
||||
ex = cpn_obj.exception_handler()
|
||||
if ex and ex["goto"]:
|
||||
self.path.extend(ex["goto"])
|
||||
other_branch = True
|
||||
elif ex and ex["default_value"]:
|
||||
yield decorate("message", {"content": ex["default_value"]})
|
||||
yield decorate("message_end", {})
|
||||
else:
|
||||
self.error = cpn_obj.error()
|
||||
|
||||
if cpn_obj.component_name.lower() != "iteration":
|
||||
if isinstance(cpn_obj.output("content"), partial):
|
||||
if self.error:
|
||||
cpn_obj.set_output("content", None)
|
||||
yield _node_finished(cpn_obj)
|
||||
else:
|
||||
partials.append(self.path[i])
|
||||
else:
|
||||
yield _node_finished(cpn_obj)
|
||||
|
||||
def _append_path(cpn_id):
|
||||
nonlocal other_branch
|
||||
if other_branch:
|
||||
return
|
||||
if self.path[-1] == cpn_id:
|
||||
return
|
||||
self.path.append(cpn_id)
|
||||
|
||||
def _extend_path(cpn_ids):
|
||||
nonlocal other_branch
|
||||
if other_branch:
|
||||
return
|
||||
for cpn_id in cpn_ids:
|
||||
_append_path(cpn_id)
|
||||
|
||||
if cpn_obj.component_name.lower() == "iterationitem" and cpn_obj.end():
|
||||
iter = cpn_obj.get_parent()
|
||||
yield _node_finished(iter)
|
||||
_extend_path(self.get_component(cpn["parent_id"])["downstream"])
|
||||
elif cpn_obj.component_name.lower() in ["categorize", "switch"]:
|
||||
_extend_path(cpn_obj.output("_next"))
|
||||
elif cpn_obj.component_name.lower() == "iteration":
|
||||
_append_path(cpn_obj.get_start())
|
||||
elif not cpn["downstream"] and cpn_obj.get_parent():
|
||||
_append_path(cpn_obj.get_parent().get_start())
|
||||
else:
|
||||
_extend_path(cpn["downstream"])
|
||||
|
||||
if self.error:
|
||||
logging.error(f"Runtime Error: {self.error}")
|
||||
break
|
||||
idx = to
|
||||
|
||||
if any([self.get_component_obj(c).component_name.lower() == "userfillup" for c in self.path[idx:]]):
|
||||
path = [c for c in self.path[idx:] if self.get_component(c)["obj"].component_name.lower() == "userfillup"]
|
||||
path.extend([c for c in self.path[idx:] if self.get_component(c)["obj"].component_name.lower() != "userfillup"])
|
||||
another_inputs = {}
|
||||
tips = ""
|
||||
for c in path:
|
||||
o = self.get_component_obj(c)
|
||||
if o.component_name.lower() == "userfillup":
|
||||
another_inputs.update(o.get_input_elements())
|
||||
if o.get_param("enable_tips"):
|
||||
tips = o.get_param("tips")
|
||||
self.path = path
|
||||
yield decorate("user_inputs", {"inputs": another_inputs, "tips": tips})
|
||||
return
|
||||
self.path = self.path[:idx]
|
||||
if not self.error:
|
||||
yield decorate("workflow_finished",
|
||||
{
|
||||
"inputs": kwargs.get("inputs"),
|
||||
"outputs": self.get_component_obj(self.path[-1]).output(),
|
||||
"elapsed_time": time.perf_counter() - st,
|
||||
"created_at": st,
|
||||
})
|
||||
self.history.append(("assistant", self.get_component_obj(self.path[-1]).output()))
|
||||
|
||||
def is_reff(self, exp: str) -> bool:
|
||||
exp = exp.strip("{").strip("}")
|
||||
if exp.find("@") < 0:
|
||||
return exp in self.globals
|
||||
arr = exp.split("@")
|
||||
if len(arr) != 2:
|
||||
return False
|
||||
if self.get_component(arr[0]) is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_history(self, window_size):
|
||||
convs = []
|
||||
for role, obj in self.history[window_size * -1:]:
|
||||
if isinstance(obj, list) and obj and all([isinstance(o, dict) for o in obj]):
|
||||
convs.append({"role": role, "content": '\n'.join([str(s.get("content", "")) for s in obj])})
|
||||
if window_size <= 0:
|
||||
return convs
|
||||
for role, obj in self.history[window_size * -2:]:
|
||||
if isinstance(obj, dict):
|
||||
convs.append({"role": role, "content": obj.get("content", "")})
|
||||
else:
|
||||
convs.append({"role": role, "content": str(obj)})
|
||||
return convs
|
||||
@ -312,54 +427,86 @@ class Canvas:
|
||||
def add_user_input(self, question):
|
||||
self.history.append(("user", question))
|
||||
|
||||
def set_embedding_model(self, embed_id):
|
||||
self._embed_id = embed_id
|
||||
|
||||
def get_embedding_model(self):
|
||||
return self._embed_id
|
||||
|
||||
def _find_loop(self, max_loops=6):
|
||||
path = self.path[-1][::-1]
|
||||
if len(path) < 2:
|
||||
return False
|
||||
|
||||
for i in range(len(path)):
|
||||
if path[i].lower().find("answer") == 0 or path[i].lower().find("iterationitem") == 0:
|
||||
path = path[:i]
|
||||
break
|
||||
|
||||
if len(path) < 2:
|
||||
return False
|
||||
|
||||
for loc in range(2, len(path) // 2):
|
||||
pat = ",".join(path[0:loc])
|
||||
path_str = ",".join(path)
|
||||
if len(pat) >= len(path_str):
|
||||
return False
|
||||
loop = max_loops
|
||||
while path_str.find(pat) == 0 and loop >= 0:
|
||||
loop -= 1
|
||||
if len(pat)+1 >= len(path_str):
|
||||
return False
|
||||
path_str = path_str[len(pat)+1:]
|
||||
if loop < 0:
|
||||
pat = " => ".join([p.split(":")[0] for p in path[0:loc]])
|
||||
return pat + " => " + pat
|
||||
|
||||
return False
|
||||
|
||||
def get_prologue(self):
|
||||
return self.components["begin"]["obj"]._param.prologue
|
||||
|
||||
def get_mode(self):
|
||||
return self.components["begin"]["obj"]._param.mode
|
||||
|
||||
def set_global_param(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
for q in self.components["begin"]["obj"]._param.query:
|
||||
if k != q["key"]:
|
||||
continue
|
||||
q["value"] = v
|
||||
self.globals.update(kwargs)
|
||||
|
||||
def get_preset_param(self):
|
||||
return self.components["begin"]["obj"]._param.query
|
||||
return self.components["begin"]["obj"]._param.inputs
|
||||
|
||||
def get_component_input_elements(self, cpnnm):
|
||||
return self.components[cpnnm]["obj"].get_input_elements()
|
||||
return self.components[cpnnm]["obj"].get_input_elements()
|
||||
|
||||
def get_files(self, files: Union[None, list[dict]]) -> list[str]:
|
||||
if not files:
|
||||
return []
|
||||
def image_to_base64(file):
|
||||
return "data:{};base64,{}".format(file["mime_type"],
|
||||
base64.b64encode(FileService.get_blob(file["created_by"], file["id"])).decode("utf-8"))
|
||||
exe = ThreadPoolExecutor(max_workers=5)
|
||||
threads = []
|
||||
for file in files:
|
||||
if file["mime_type"].find("image") >=0:
|
||||
threads.append(exe.submit(image_to_base64, file))
|
||||
continue
|
||||
threads.append(exe.submit(FileService.parse, file["name"], FileService.get_blob(file["created_by"], file["id"]), True, file["created_by"]))
|
||||
return [th.result() for th in threads]
|
||||
|
||||
def tool_use_callback(self, agent_id: str, func_name: str, params: dict, result: Any, elapsed_time=None):
|
||||
agent_ids = agent_id.split("-->")
|
||||
agent_name = self.get_component_name(agent_ids[0])
|
||||
path = agent_name if len(agent_ids) < 2 else agent_name+"-->"+"-->".join(agent_ids[1:])
|
||||
try:
|
||||
bin = REDIS_CONN.get(f"{self.task_id}-{self.message_id}-logs")
|
||||
if bin:
|
||||
obj = json.loads(bin.encode("utf-8"))
|
||||
if obj[-1]["component_id"] == agent_ids[0]:
|
||||
obj[-1]["trace"].append({"path": path, "tool_name": func_name, "arguments": params, "result": result, "elapsed_time": elapsed_time})
|
||||
else:
|
||||
obj.append({
|
||||
"component_id": agent_ids[0],
|
||||
"trace": [{"path": path, "tool_name": func_name, "arguments": params, "result": result, "elapsed_time": elapsed_time}]
|
||||
})
|
||||
else:
|
||||
obj = [{
|
||||
"component_id": agent_ids[0],
|
||||
"trace": [{"path": path, "tool_name": func_name, "arguments": params, "result": result, "elapsed_time": elapsed_time}]
|
||||
}]
|
||||
REDIS_CONN.set_obj(f"{self.task_id}-{self.message_id}-logs", obj, 60*10)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
def add_reference(self, chunks: list[object], doc_infos: list[object]):
|
||||
if not self.retrieval:
|
||||
self.retrieval = [{"chunks": {}, "doc_aggs": {}}]
|
||||
|
||||
r = self.retrieval[-1]
|
||||
for ck in chunks_format({"chunks": chunks}):
|
||||
cid = hash_str2int(ck["id"], 500)
|
||||
# cid = uuid.uuid5(uuid.NAMESPACE_DNS, ck["id"])
|
||||
if cid not in r:
|
||||
r["chunks"][cid] = ck
|
||||
|
||||
for doc in doc_infos:
|
||||
if doc["doc_name"] not in r:
|
||||
r["doc_aggs"][doc["doc_name"]] = doc
|
||||
|
||||
def get_reference(self):
|
||||
if not self.retrieval:
|
||||
return {"chunks": {}, "doc_aggs": {}}
|
||||
return self.retrieval[-1]
|
||||
|
||||
def add_memory(self, user:str, assist:str, summ: str):
|
||||
self.memory.append((user, assist, summ))
|
||||
|
||||
def get_memory(self) -> list[Tuple]:
|
||||
return self.memory
|
||||
|
||||
def get_component_thoughts(self, cpn_id) -> str:
|
||||
return self.components.get(cpn_id)["obj"].thoughts()
|
||||
|
||||
|
||||
@ -14,120 +14,45 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import importlib
|
||||
from .begin import Begin, BeginParam
|
||||
from .generate import Generate, GenerateParam
|
||||
from .retrieval import Retrieval, RetrievalParam
|
||||
from .answer import Answer, AnswerParam
|
||||
from .categorize import Categorize, CategorizeParam
|
||||
from .switch import Switch, SwitchParam
|
||||
from .relevant import Relevant, RelevantParam
|
||||
from .message import Message, MessageParam
|
||||
from .rewrite import RewriteQuestion, RewriteQuestionParam
|
||||
from .keyword import KeywordExtract, KeywordExtractParam
|
||||
from .concentrator import Concentrator, ConcentratorParam
|
||||
from .baidu import Baidu, BaiduParam
|
||||
from .duckduckgo import DuckDuckGo, DuckDuckGoParam
|
||||
from .wikipedia import Wikipedia, WikipediaParam
|
||||
from .pubmed import PubMed, PubMedParam
|
||||
from .arxiv import ArXiv, ArXivParam
|
||||
from .google import Google, GoogleParam
|
||||
from .bing import Bing, BingParam
|
||||
from .googlescholar import GoogleScholar, GoogleScholarParam
|
||||
from .deepl import DeepL, DeepLParam
|
||||
from .github import GitHub, GitHubParam
|
||||
from .baidufanyi import BaiduFanyi, BaiduFanyiParam
|
||||
from .qweather import QWeather, QWeatherParam
|
||||
from .exesql import ExeSQL, ExeSQLParam
|
||||
from .yahoofinance import YahooFinance, YahooFinanceParam
|
||||
from .wencai import WenCai, WenCaiParam
|
||||
from .jin10 import Jin10, Jin10Param
|
||||
from .tushare import TuShare, TuShareParam
|
||||
from .akshare import AkShare, AkShareParam
|
||||
from .crawler import Crawler, CrawlerParam
|
||||
from .invoke import Invoke, InvokeParam
|
||||
from .template import Template, TemplateParam
|
||||
from .email import Email, EmailParam
|
||||
from .iteration import Iteration, IterationParam
|
||||
from .iterationitem import IterationItem, IterationItemParam
|
||||
import inspect
|
||||
from types import ModuleType
|
||||
from typing import Dict, Type
|
||||
|
||||
_package_path = os.path.dirname(__file__)
|
||||
__all_classes: Dict[str, Type] = {}
|
||||
|
||||
def _import_submodules() -> None:
|
||||
for filename in os.listdir(_package_path): # noqa: F821
|
||||
if filename.startswith("__") or not filename.endswith(".py") or filename.startswith("base"):
|
||||
continue
|
||||
module_name = filename[:-3]
|
||||
|
||||
try:
|
||||
module = importlib.import_module(f".{module_name}", package=__name__)
|
||||
_extract_classes_from_module(module) # noqa: F821
|
||||
except ImportError as e:
|
||||
print(f"Warning: Failed to import module {module_name}: {str(e)}")
|
||||
|
||||
def _extract_classes_from_module(module: ModuleType) -> None:
|
||||
for name, obj in inspect.getmembers(module):
|
||||
if (inspect.isclass(obj) and
|
||||
obj.__module__ == module.__name__ and not name.startswith("_")):
|
||||
__all_classes[name] = obj
|
||||
globals()[name] = obj
|
||||
|
||||
_import_submodules()
|
||||
|
||||
__all__ = list(__all_classes.keys()) + ["__all_classes"]
|
||||
|
||||
del _package_path, _import_submodules, _extract_classes_from_module
|
||||
|
||||
|
||||
def component_class(class_name):
|
||||
m = importlib.import_module("agent.component")
|
||||
c = getattr(m, class_name)
|
||||
return c
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Begin",
|
||||
"BeginParam",
|
||||
"Generate",
|
||||
"GenerateParam",
|
||||
"Retrieval",
|
||||
"RetrievalParam",
|
||||
"Answer",
|
||||
"AnswerParam",
|
||||
"Categorize",
|
||||
"CategorizeParam",
|
||||
"Switch",
|
||||
"SwitchParam",
|
||||
"Relevant",
|
||||
"RelevantParam",
|
||||
"Message",
|
||||
"MessageParam",
|
||||
"RewriteQuestion",
|
||||
"RewriteQuestionParam",
|
||||
"KeywordExtract",
|
||||
"KeywordExtractParam",
|
||||
"Concentrator",
|
||||
"ConcentratorParam",
|
||||
"Baidu",
|
||||
"BaiduParam",
|
||||
"DuckDuckGo",
|
||||
"DuckDuckGoParam",
|
||||
"Wikipedia",
|
||||
"WikipediaParam",
|
||||
"PubMed",
|
||||
"PubMedParam",
|
||||
"ArXiv",
|
||||
"ArXivParam",
|
||||
"Google",
|
||||
"GoogleParam",
|
||||
"Bing",
|
||||
"BingParam",
|
||||
"GoogleScholar",
|
||||
"GoogleScholarParam",
|
||||
"DeepL",
|
||||
"DeepLParam",
|
||||
"GitHub",
|
||||
"GitHubParam",
|
||||
"BaiduFanyi",
|
||||
"BaiduFanyiParam",
|
||||
"QWeather",
|
||||
"QWeatherParam",
|
||||
"ExeSQL",
|
||||
"ExeSQLParam",
|
||||
"YahooFinance",
|
||||
"YahooFinanceParam",
|
||||
"WenCai",
|
||||
"WenCaiParam",
|
||||
"Jin10",
|
||||
"Jin10Param",
|
||||
"TuShare",
|
||||
"TuShareParam",
|
||||
"AkShare",
|
||||
"AkShareParam",
|
||||
"Crawler",
|
||||
"CrawlerParam",
|
||||
"Invoke",
|
||||
"InvokeParam",
|
||||
"Iteration",
|
||||
"IterationParam",
|
||||
"IterationItem",
|
||||
"IterationItemParam",
|
||||
"Template",
|
||||
"TemplateParam",
|
||||
"Email",
|
||||
"EmailParam",
|
||||
"component_class"
|
||||
]
|
||||
for mdl in ["agent.component", "agent.tools", "rag.flow"]:
|
||||
try:
|
||||
return getattr(importlib.import_module(mdl), class_name)
|
||||
except Exception:
|
||||
pass
|
||||
assert False, f"Can't import {class_name}"
|
||||
|
||||
356
agent/component/agent_with_tools.py
Normal file
356
agent/component/agent_with_tools.py
Normal file
@ -0,0 +1,356 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
|
||||
import json_repair
|
||||
from timeit import default_timer as timer
|
||||
from agent.tools.base import LLMToolPluginCallSession, ToolParamBase, ToolBase, ToolMeta
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.db.services.mcp_server_service import MCPServerService
|
||||
from api.utils.api_utils import timeout
|
||||
from rag.prompts.generator import next_step, COMPLETE_TASK, analyze_task, \
|
||||
citation_prompt, reflect, rank_memories, kb_prompt, citation_plus, full_question, message_fit_in
|
||||
from rag.utils.mcp_tool_call_conn import MCPToolCallSession, mcp_tool_metadata_to_openai_tool
|
||||
from agent.component.llm import LLMParam, LLM
|
||||
|
||||
|
||||
class AgentParam(LLMParam, ToolParamBase):
|
||||
"""
|
||||
Define the Agent component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.meta:ToolMeta = {
|
||||
"name": "agent",
|
||||
"description": "This is an agent for a specific task.",
|
||||
"parameters": {
|
||||
"user_prompt": {
|
||||
"type": "string",
|
||||
"description": "This is the order you need to send to the agent.",
|
||||
"default": "",
|
||||
"required": True
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Supervisor's reasoning for choosing the this agent. "
|
||||
"Explain why this agent is being invoked and what is expected of it."
|
||||
),
|
||||
"required": True
|
||||
},
|
||||
"context": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"All relevant background information, prior facts, decisions, "
|
||||
"and state needed by the agent to solve the current query. "
|
||||
"Should be as detailed and self-contained as possible."
|
||||
),
|
||||
"required": True
|
||||
},
|
||||
}
|
||||
}
|
||||
super().__init__()
|
||||
self.function_name = "agent"
|
||||
self.tools = []
|
||||
self.mcp = []
|
||||
self.max_rounds = 5
|
||||
self.description = ""
|
||||
|
||||
|
||||
class Agent(LLM, ToolBase):
|
||||
component_name = "Agent"
|
||||
|
||||
def __init__(self, canvas, id, param: LLMParam):
|
||||
LLM.__init__(self, canvas, id, param)
|
||||
self.tools = {}
|
||||
for cpn in self._param.tools:
|
||||
cpn = self._load_tool_obj(cpn)
|
||||
self.tools[cpn.get_meta()["function"]["name"]] = cpn
|
||||
|
||||
self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), TenantLLMService.llm_id2llm_type(self._param.llm_id), self._param.llm_id,
|
||||
max_retries=self._param.max_retries,
|
||||
retry_interval=self._param.delay_after_error,
|
||||
max_rounds=self._param.max_rounds,
|
||||
verbose_tool_use=True
|
||||
)
|
||||
self.tool_meta = [v.get_meta() for _,v in self.tools.items()]
|
||||
|
||||
for mcp in self._param.mcp:
|
||||
_, mcp_server = MCPServerService.get_by_id(mcp["mcp_id"])
|
||||
tool_call_session = MCPToolCallSession(mcp_server, mcp_server.variables)
|
||||
for tnm, meta in mcp["tools"].items():
|
||||
self.tool_meta.append(mcp_tool_metadata_to_openai_tool(meta))
|
||||
self.tools[tnm] = tool_call_session
|
||||
self.callback = partial(self._canvas.tool_use_callback, id)
|
||||
self.toolcall_session = LLMToolPluginCallSession(self.tools, self.callback)
|
||||
#self.chat_mdl.bind_tools(self.toolcall_session, self.tool_metas)
|
||||
|
||||
def _load_tool_obj(self, cpn: dict) -> object:
|
||||
from agent.component import component_class
|
||||
param = component_class(cpn["component_name"] + "Param")()
|
||||
param.update(cpn["params"])
|
||||
try:
|
||||
param.check()
|
||||
except Exception as e:
|
||||
self.set_output("_ERROR", cpn["component_name"] + f" configuration error: {e}")
|
||||
raise
|
||||
cpn_id = f"{self._id}-->" + cpn.get("name", "").replace(" ", "_")
|
||||
return component_class(cpn["component_name"])(self._canvas, cpn_id, param)
|
||||
|
||||
def get_meta(self) -> dict[str, Any]:
|
||||
self._param.function_name= self._id.split("-->")[-1]
|
||||
m = super().get_meta()
|
||||
if hasattr(self._param, "user_prompt") and self._param.user_prompt:
|
||||
m["function"]["parameters"]["properties"]["user_prompt"] = self._param.user_prompt
|
||||
return m
|
||||
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
res = {}
|
||||
for k, v in self.get_input_elements().items():
|
||||
res[k] = {
|
||||
"type": "line",
|
||||
"name": v["name"]
|
||||
}
|
||||
for cpn in self._param.tools:
|
||||
if not isinstance(cpn, LLM):
|
||||
continue
|
||||
res.update(cpn.get_input_form())
|
||||
return res
|
||||
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 20*60)))
|
||||
def _invoke(self, **kwargs):
|
||||
if kwargs.get("user_prompt"):
|
||||
usr_pmt = ""
|
||||
if kwargs.get("reasoning"):
|
||||
usr_pmt += "\nREASONING:\n{}\n".format(kwargs["reasoning"])
|
||||
if kwargs.get("context"):
|
||||
usr_pmt += "\nCONTEXT:\n{}\n".format(kwargs["context"])
|
||||
if usr_pmt:
|
||||
usr_pmt += "\nQUERY:\n{}\n".format(str(kwargs["user_prompt"]))
|
||||
else:
|
||||
usr_pmt = str(kwargs["user_prompt"])
|
||||
self._param.prompts = [{"role": "user", "content": usr_pmt}]
|
||||
|
||||
if not self.tools:
|
||||
return LLM._invoke(self, **kwargs)
|
||||
|
||||
prompt, msg, user_defined_prompt = self._prepare_prompt_variables()
|
||||
|
||||
downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
|
||||
ex = self.exception_handler()
|
||||
if any([self._canvas.get_component_obj(cid).component_name.lower()=="message" for cid in downstreams]) and not self._param.output_structure and not (ex and ex["goto"]):
|
||||
self.set_output("content", partial(self.stream_output_with_tools, prompt, msg, user_defined_prompt))
|
||||
return
|
||||
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||
use_tools = []
|
||||
ans = ""
|
||||
for delta_ans, tk in self._react_with_tools_streamly(prompt, msg, use_tools, user_defined_prompt):
|
||||
ans += delta_ans
|
||||
|
||||
if ans.find("**ERROR**") >= 0:
|
||||
logging.error(f"Agent._chat got error. response: {ans}")
|
||||
if self.get_exception_default_value():
|
||||
self.set_output("content", self.get_exception_default_value())
|
||||
else:
|
||||
self.set_output("_ERROR", ans)
|
||||
return
|
||||
|
||||
self.set_output("content", ans)
|
||||
if use_tools:
|
||||
self.set_output("use_tools", use_tools)
|
||||
return ans
|
||||
|
||||
def stream_output_with_tools(self, prompt, msg, user_defined_prompt={}):
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||
answer_without_toolcall = ""
|
||||
use_tools = []
|
||||
for delta_ans,_ in self._react_with_tools_streamly(prompt, msg, use_tools, user_defined_prompt):
|
||||
if delta_ans.find("**ERROR**") >= 0:
|
||||
if self.get_exception_default_value():
|
||||
self.set_output("content", self.get_exception_default_value())
|
||||
yield self.get_exception_default_value()
|
||||
else:
|
||||
self.set_output("_ERROR", delta_ans)
|
||||
answer_without_toolcall += delta_ans
|
||||
yield delta_ans
|
||||
|
||||
self.set_output("content", answer_without_toolcall)
|
||||
if use_tools:
|
||||
self.set_output("use_tools", use_tools)
|
||||
|
||||
def _gen_citations(self, text):
|
||||
retrievals = self._canvas.get_reference()
|
||||
retrievals = {"chunks": list(retrievals["chunks"].values()), "doc_aggs": list(retrievals["doc_aggs"].values())}
|
||||
formated_refer = kb_prompt(retrievals, self.chat_mdl.max_length, True)
|
||||
for delta_ans in self._generate_streamly([{"role": "system", "content": citation_plus("\n\n".join(formated_refer))},
|
||||
{"role": "user", "content": text}
|
||||
]):
|
||||
yield delta_ans
|
||||
|
||||
def _react_with_tools_streamly(self, prompt, history: list[dict], use_tools, user_defined_prompt={}):
|
||||
token_count = 0
|
||||
tool_metas = self.tool_meta
|
||||
hist = deepcopy(history)
|
||||
last_calling = ""
|
||||
if len(hist) > 3:
|
||||
st = timer()
|
||||
user_request = full_question(messages=history, chat_mdl=self.chat_mdl)
|
||||
self.callback("Multi-turn conversation optimization", {}, user_request, elapsed_time=timer()-st)
|
||||
else:
|
||||
user_request = history[-1]["content"]
|
||||
|
||||
def use_tool(name, args):
|
||||
nonlocal hist, use_tools, token_count,last_calling,user_request
|
||||
logging.info(f"{last_calling=} == {name=}")
|
||||
# Summarize of function calling
|
||||
#if all([
|
||||
# isinstance(self.toolcall_session.get_tool_obj(name), Agent),
|
||||
# last_calling,
|
||||
# last_calling != name
|
||||
#]):
|
||||
# self.toolcall_session.get_tool_obj(name).add2system_prompt(f"The chat history with other agents are as following: \n" + self.get_useful_memory(user_request, str(args["user_prompt"]),user_defined_prompt))
|
||||
last_calling = name
|
||||
tool_response = self.toolcall_session.tool_call(name, args)
|
||||
use_tools.append({
|
||||
"name": name,
|
||||
"arguments": args,
|
||||
"results": tool_response
|
||||
})
|
||||
# self.callback("add_memory", {}, "...")
|
||||
#self.add_memory(hist[-2]["content"], hist[-1]["content"], name, args, str(tool_response), user_defined_prompt)
|
||||
|
||||
return name, tool_response
|
||||
|
||||
def complete():
|
||||
nonlocal hist
|
||||
need2cite = self._param.cite and self._canvas.get_reference()["chunks"] and self._id.find("-->") < 0
|
||||
cited = False
|
||||
if hist[0]["role"] == "system" and need2cite:
|
||||
if len(hist) < 7:
|
||||
hist[0]["content"] += citation_prompt()
|
||||
cited = True
|
||||
yield "", token_count
|
||||
|
||||
_hist = hist
|
||||
if len(hist) > 12:
|
||||
_hist = [hist[0], hist[1], *hist[-10:]]
|
||||
entire_txt = ""
|
||||
for delta_ans in self._generate_streamly(_hist):
|
||||
if not need2cite or cited:
|
||||
yield delta_ans, 0
|
||||
entire_txt += delta_ans
|
||||
if not need2cite or cited:
|
||||
return
|
||||
|
||||
st = timer()
|
||||
txt = ""
|
||||
for delta_ans in self._gen_citations(entire_txt):
|
||||
yield delta_ans, 0
|
||||
txt += delta_ans
|
||||
|
||||
self.callback("gen_citations", {}, txt, elapsed_time=timer()-st)
|
||||
|
||||
def append_user_content(hist, content):
|
||||
if hist[-1]["role"] == "user":
|
||||
hist[-1]["content"] += content
|
||||
else:
|
||||
hist.append({"role": "user", "content": content})
|
||||
|
||||
st = timer()
|
||||
task_desc = analyze_task(self.chat_mdl, prompt, user_request, tool_metas, user_defined_prompt)
|
||||
self.callback("analyze_task", {}, task_desc, elapsed_time=timer()-st)
|
||||
for _ in range(self._param.max_rounds + 1):
|
||||
response, tk = next_step(self.chat_mdl, hist, tool_metas, task_desc, user_defined_prompt)
|
||||
# self.callback("next_step", {}, str(response)[:256]+"...")
|
||||
token_count += tk
|
||||
hist.append({"role": "assistant", "content": response})
|
||||
try:
|
||||
functions = json_repair.loads(re.sub(r"```.*", "", response))
|
||||
if not isinstance(functions, list):
|
||||
raise TypeError(f"List should be returned, but `{functions}`")
|
||||
for f in functions:
|
||||
if not isinstance(f, dict):
|
||||
raise TypeError(f"An object type should be returned, but `{f}`")
|
||||
with ThreadPoolExecutor(max_workers=5) as executor:
|
||||
thr = []
|
||||
for func in functions:
|
||||
name = func["name"]
|
||||
args = func["arguments"]
|
||||
if name == COMPLETE_TASK:
|
||||
append_user_content(hist, f"Respond with a formal answer. FORGET(DO NOT mention) about `{COMPLETE_TASK}`. The language for the response MUST be as the same as the first user request.\n")
|
||||
for txt, tkcnt in complete():
|
||||
yield txt, tkcnt
|
||||
return
|
||||
|
||||
thr.append(executor.submit(use_tool, name, args))
|
||||
|
||||
st = timer()
|
||||
reflection = reflect(self.chat_mdl, hist, [th.result() for th in thr], user_defined_prompt)
|
||||
append_user_content(hist, reflection)
|
||||
self.callback("reflection", {}, str(reflection), elapsed_time=timer()-st)
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(msg=f"Wrong JSON argument format in LLM ReAct response: {e}")
|
||||
e = f"\nTool call error, please correct the input parameter of response format and call it again.\n *** Exception ***\n{e}"
|
||||
append_user_content(hist, str(e))
|
||||
|
||||
logging.warning( f"Exceed max rounds: {self._param.max_rounds}")
|
||||
final_instruction = f"""
|
||||
{user_request}
|
||||
IMPORTANT: You have reached the conversation limit. Based on ALL the information and research you have gathered so far, please provide a DIRECT and COMPREHENSIVE final answer to the original request.
|
||||
Instructions:
|
||||
1. SYNTHESIZE all information collected during this conversation
|
||||
2. Provide a COMPLETE response using existing data - do not suggest additional research
|
||||
3. Structure your response as a FINAL DELIVERABLE, not a plan
|
||||
4. If information is incomplete, state what you found and provide the best analysis possible with available data
|
||||
5. DO NOT mention conversation limits or suggest further steps
|
||||
6. Focus on delivering VALUE with the information already gathered
|
||||
Respond immediately with your final comprehensive answer.
|
||||
"""
|
||||
append_user_content(hist, final_instruction)
|
||||
|
||||
for txt, tkcnt in complete():
|
||||
yield txt, tkcnt
|
||||
|
||||
def get_useful_memory(self, goal: str, sub_goal:str, topn=3, user_defined_prompt:dict={}) -> str:
|
||||
# self.callback("get_useful_memory", {"topn": 3}, "...")
|
||||
mems = self._canvas.get_memory()
|
||||
rank = rank_memories(self.chat_mdl, goal, sub_goal, [summ for (user, assist, summ) in mems], user_defined_prompt)
|
||||
try:
|
||||
rank = json_repair.loads(re.sub(r"```.*", "", rank))[:topn]
|
||||
mems = [mems[r] for r in rank]
|
||||
return "\n\n".join([f"User: {u}\nAgent: {a}" for u, a,_ in mems])
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
return "Error occurred."
|
||||
|
||||
def reset(self, temp=False):
|
||||
"""
|
||||
Reset all tools if they have a reset method. This avoids errors for tools like MCPToolCallSession.
|
||||
"""
|
||||
for k, cpn in self.tools.items():
|
||||
if hasattr(cpn, "reset") and callable(cpn.reset):
|
||||
cpn.reset()
|
||||
|
||||
@ -1,89 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
from functools import partial
|
||||
from typing import Tuple, Union
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class AnswerParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Answer component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.post_answers = []
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
|
||||
|
||||
class Answer(ComponentBase, ABC):
|
||||
component_name = "Answer"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
|
||||
ans = self.get_input()
|
||||
if self._param.post_answers:
|
||||
ans = pd.concat([ans, pd.DataFrame([{"content": random.choice(self._param.post_answers)}])], ignore_index=False)
|
||||
return ans
|
||||
|
||||
def stream_output(self):
|
||||
res = None
|
||||
if hasattr(self, "exception") and self.exception:
|
||||
res = {"content": str(self.exception)}
|
||||
self.exception = None
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
stream = self.get_stream_input()
|
||||
if isinstance(stream, pd.DataFrame):
|
||||
res = stream
|
||||
answer = ""
|
||||
for ii, row in stream.iterrows():
|
||||
answer += row.to_dict()["content"]
|
||||
yield {"content": answer}
|
||||
else:
|
||||
for st in stream():
|
||||
res = st
|
||||
yield st
|
||||
if self._param.post_answers:
|
||||
res["content"] += random.choice(self._param.post_answers)
|
||||
yield res
|
||||
|
||||
self.set_output(res)
|
||||
|
||||
def set_exception(self, e):
|
||||
self.exception = e
|
||||
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
if allow_partial:
|
||||
return super.output()
|
||||
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
return self._param.output_var_name, pd.DataFrame([{"content": c}])
|
||||
|
||||
self._param.output_var_name, pd.DataFrame([])
|
||||
|
||||
@ -1,68 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import arxiv
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
class ArXivParam(ComponentParamBase):
|
||||
"""
|
||||
Define the ArXiv component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 6
|
||||
self.sort_by = 'submittedDate'
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.sort_by, "ArXiv Search Sort_by",
|
||||
['submittedDate', 'lastUpdatedDate', 'relevance'])
|
||||
|
||||
|
||||
class ArXiv(ComponentBase, ABC):
|
||||
component_name = "ArXiv"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return ArXiv.be_output("")
|
||||
|
||||
try:
|
||||
sort_choices = {"relevance": arxiv.SortCriterion.Relevance,
|
||||
"lastUpdatedDate": arxiv.SortCriterion.LastUpdatedDate,
|
||||
'submittedDate': arxiv.SortCriterion.SubmittedDate}
|
||||
arxiv_client = arxiv.Client()
|
||||
search = arxiv.Search(
|
||||
query=ans,
|
||||
max_results=self._param.top_n,
|
||||
sort_by=sort_choices[self._param.sort_by]
|
||||
)
|
||||
arxiv_res = [
|
||||
{"content": 'Title: ' + i.title + '\nPdf_Url: <a href="' + i.pdf_url + '"></a> \nSummary: ' + i.summary} for
|
||||
i in list(arxiv_client.results(search))]
|
||||
except Exception as e:
|
||||
return ArXiv.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not arxiv_res:
|
||||
return ArXiv.be_output("")
|
||||
|
||||
df = pd.DataFrame(arxiv_res)
|
||||
logging.debug(f"df: {str(df)}")
|
||||
return df
|
||||
@ -1,67 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class BaiduParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Baidu component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class Baidu(ComponentBase, ABC):
|
||||
component_name = "Baidu"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Baidu.be_output("")
|
||||
|
||||
try:
|
||||
url = 'http://www.baidu.com/s?wd=' + ans + '&rn=' + str(self._param.top_n)
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'}
|
||||
response = requests.get(url=url, headers=headers)
|
||||
|
||||
url_res = re.findall(r"'url': \\\"(.*?)\\\"}", response.text)
|
||||
title_res = re.findall(r"'title': \\\"(.*?)\\\",\\n", response.text)
|
||||
body_res = re.findall(r"\"contentText\":\"(.*?)\"", response.text)
|
||||
baidu_res = [{"content": re.sub('<em>|</em>', '', '<a href="' + url + '">' + title + '</a> ' + body)} for
|
||||
url, title, body in zip(url_res, title_res, body_res)]
|
||||
del body_res, url_res, title_res
|
||||
except Exception as e:
|
||||
return Baidu.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not baidu_res:
|
||||
return Baidu.be_output("")
|
||||
|
||||
df = pd.DataFrame(baidu_res)
|
||||
logging.debug(f"df: {str(df)}")
|
||||
return df
|
||||
|
||||
@ -1,96 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import random
|
||||
from abc import ABC
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from hashlib import md5
|
||||
|
||||
|
||||
class BaiduFanyiParam(ComponentParamBase):
|
||||
"""
|
||||
Define the BaiduFanyi component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.appid = "xxx"
|
||||
self.secret_key = "xxx"
|
||||
self.trans_type = 'translate'
|
||||
self.parameters = []
|
||||
self.source_lang = 'auto'
|
||||
self.target_lang = 'auto'
|
||||
self.domain = 'finance'
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.appid, "BaiduFanyi APPID")
|
||||
self.check_empty(self.secret_key, "BaiduFanyi Secret Key")
|
||||
self.check_valid_value(self.trans_type, "Translate type", ['translate', 'fieldtranslate'])
|
||||
self.check_valid_value(self.source_lang, "Source language",
|
||||
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||
'hu', 'cht', 'vie'])
|
||||
self.check_valid_value(self.target_lang, "Target language",
|
||||
['auto', 'zh', 'en', 'yue', 'wyw', 'jp', 'kor', 'fra', 'spa', 'th', 'ara', 'ru', 'pt',
|
||||
'de', 'it', 'el', 'nl', 'pl', 'bul', 'est', 'dan', 'fin', 'cs', 'rom', 'slo', 'swe',
|
||||
'hu', 'cht', 'vie'])
|
||||
self.check_valid_value(self.domain, "Translate field",
|
||||
['it', 'finance', 'machinery', 'senimed', 'novel', 'academic', 'aerospace', 'wiki',
|
||||
'news', 'law', 'contract'])
|
||||
|
||||
|
||||
class BaiduFanyi(ComponentBase, ABC):
|
||||
component_name = "BaiduFanyi"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return BaiduFanyi.be_output("")
|
||||
|
||||
try:
|
||||
source_lang = self._param.source_lang
|
||||
target_lang = self._param.target_lang
|
||||
appid = self._param.appid
|
||||
salt = random.randint(32768, 65536)
|
||||
secret_key = self._param.secret_key
|
||||
|
||||
if self._param.trans_type == 'translate':
|
||||
sign = md5((appid + ans + salt + secret_key).encode('utf-8')).hexdigest()
|
||||
url = 'http://api.fanyi.baidu.com/api/trans/vip/translate?' + 'q=' + ans + '&from=' + source_lang + '&to=' + target_lang + '&appid=' + appid + '&salt=' + salt + '&sign=' + sign
|
||||
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||
response = requests.post(url=url, headers=headers).json()
|
||||
|
||||
if response.get('error_code'):
|
||||
BaiduFanyi.be_output("**Error**:" + response['error_msg'])
|
||||
|
||||
return BaiduFanyi.be_output(response['trans_result'][0]['dst'])
|
||||
elif self._param.trans_type == 'fieldtranslate':
|
||||
domain = self._param.domain
|
||||
sign = md5((appid + ans + salt + domain + secret_key).encode('utf-8')).hexdigest()
|
||||
url = 'http://api.fanyi.baidu.com/api/trans/vip/fieldtranslate?' + 'q=' + ans + '&from=' + source_lang + '&to=' + target_lang + '&appid=' + appid + '&salt=' + salt + '&domain=' + domain + '&sign=' + sign
|
||||
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||
response = requests.post(url=url, headers=headers).json()
|
||||
|
||||
if response.get('error_code'):
|
||||
BaiduFanyi.be_output("**Error**:" + response['error_msg'])
|
||||
|
||||
return BaiduFanyi.be_output(response['trans_result'][0]['dst'])
|
||||
|
||||
except Exception as e:
|
||||
BaiduFanyi.be_output("**Error**:" + str(e))
|
||||
|
||||
@ -13,17 +13,20 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import re
|
||||
import time
|
||||
from abc import ABC
|
||||
import builtins
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from functools import partial
|
||||
from typing import Tuple, Union
|
||||
|
||||
from typing import Any, List, Union
|
||||
import pandas as pd
|
||||
|
||||
import trio
|
||||
from agent import settings
|
||||
from api.utils.api_utils import timeout
|
||||
|
||||
|
||||
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
||||
_DEPRECATED_PARAMS = "_deprecated_params"
|
||||
@ -33,11 +36,16 @@ _IS_RAW_CONF = "_is_raw_conf"
|
||||
|
||||
class ComponentParamBase(ABC):
|
||||
def __init__(self):
|
||||
self.output_var_name = "output"
|
||||
self.message_history_window_size = 22
|
||||
self.query = []
|
||||
self.inputs = []
|
||||
self.debug_inputs = []
|
||||
self.message_history_window_size = 13
|
||||
self.inputs = {}
|
||||
self.outputs = {}
|
||||
self.description = ""
|
||||
self.max_retries = 0
|
||||
self.delay_after_error = 2.0
|
||||
self.exception_method = None
|
||||
self.exception_default_value = None
|
||||
self.exception_goto = None
|
||||
self.debug_inputs = {}
|
||||
|
||||
def set_name(self, name: str):
|
||||
self._name = name
|
||||
@ -88,6 +96,14 @@ class ComponentParamBase(ABC):
|
||||
def as_dict(self):
|
||||
def _recursive_convert_obj_to_dict(obj):
|
||||
ret_dict = {}
|
||||
if isinstance(obj, dict):
|
||||
for k,v in obj.items():
|
||||
if isinstance(v, dict) or (v and type(v).__name__ not in dir(builtins)):
|
||||
ret_dict[k] = _recursive_convert_obj_to_dict(v)
|
||||
else:
|
||||
ret_dict[k] = v
|
||||
return ret_dict
|
||||
|
||||
for attr_name in list(obj.__dict__):
|
||||
if attr_name in [_FEEDED_DEPRECATED_PARAMS, _DEPRECATED_PARAMS, _USER_FEEDED_PARAMS, _IS_RAW_CONF]:
|
||||
continue
|
||||
@ -96,7 +112,7 @@ class ComponentParamBase(ABC):
|
||||
if isinstance(attr, pd.DataFrame):
|
||||
ret_dict[attr_name] = attr.to_dict()
|
||||
continue
|
||||
if attr and type(attr).__name__ not in dir(builtins):
|
||||
if isinstance(attr, dict) or (attr and type(attr).__name__ not in dir(builtins)):
|
||||
ret_dict[attr_name] = _recursive_convert_obj_to_dict(attr)
|
||||
else:
|
||||
ret_dict[attr_name] = attr
|
||||
@ -228,7 +244,7 @@ class ComponentParamBase(ABC):
|
||||
|
||||
if not value_legal:
|
||||
raise ValueError(
|
||||
"Plase check runtime conf, {} = {} does not match user-parameter restriction".format(
|
||||
"Please check runtime conf, {} = {} does not match user-parameter restriction".format(
|
||||
variable, value
|
||||
)
|
||||
)
|
||||
@ -376,6 +392,8 @@ class ComponentParamBase(ABC):
|
||||
|
||||
class ComponentBase(ABC):
|
||||
component_name: str
|
||||
thread_limiter = trio.CapacityLimiter(int(os.environ.get('MAX_CONCURRENT_CHATS', 10)))
|
||||
variable_ref_patt = r"\{* *\{([a-zA-Z:0-9]+@[A-Za-z:0-9_.-]+|sys\.[a-z_]+)\} *\}*"
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
@ -386,201 +404,161 @@ class ComponentBase(ABC):
|
||||
"""
|
||||
return """{{
|
||||
"component_name": "{}",
|
||||
"params": {},
|
||||
"output": {},
|
||||
"inputs": {}
|
||||
"params": {}
|
||||
}}""".format(self.component_name,
|
||||
self._param,
|
||||
json.dumps(json.loads(str(self._param)).get("output", {}), ensure_ascii=False),
|
||||
json.dumps(json.loads(str(self._param)).get("inputs", []), ensure_ascii=False)
|
||||
self._param
|
||||
)
|
||||
|
||||
def __init__(self, canvas, id, param: ComponentParamBase):
|
||||
from agent.canvas import Graph # Local import to avoid cyclic dependency
|
||||
assert isinstance(canvas, Graph), "canvas must be an instance of Canvas"
|
||||
self._canvas = canvas
|
||||
self._id = id
|
||||
self._param = param
|
||||
self._param.check()
|
||||
|
||||
def get_dependent_components(self):
|
||||
cpnts = set([para["component_id"].split("@")[0] for para in self._param.query \
|
||||
if para.get("component_id") \
|
||||
and para["component_id"].lower().find("answer") < 0 \
|
||||
and para["component_id"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def run(self, history, **kwargs):
|
||||
logging.debug("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
||||
json.dumps(kwargs, ensure_ascii=False)))
|
||||
self._param.debug_inputs = []
|
||||
def invoke(self, **kwargs) -> dict[str, Any]:
|
||||
self.set_output("_created_time", time.perf_counter())
|
||||
try:
|
||||
res = self._run(history, **kwargs)
|
||||
self.set_output(res)
|
||||
self._invoke(**kwargs)
|
||||
except Exception as e:
|
||||
self.set_output(pd.DataFrame([{"content": str(e)}]))
|
||||
raise e
|
||||
if self.get_exception_default_value():
|
||||
self.set_exception_default_value()
|
||||
else:
|
||||
self.set_output("_ERROR", str(e))
|
||||
logging.exception(e)
|
||||
self._param.debug_inputs = {}
|
||||
self.set_output("_elapsed_time", time.perf_counter() - self.output("_created_time"))
|
||||
return self.output()
|
||||
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||
def _invoke(self, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
def output(self, allow_partial=True) -> Tuple[str, Union[pd.DataFrame, partial]]:
|
||||
o = getattr(self._param, self._param.output_var_name)
|
||||
if not isinstance(o, partial):
|
||||
if not isinstance(o, pd.DataFrame):
|
||||
if isinstance(o, list):
|
||||
return self._param.output_var_name, pd.DataFrame(o)
|
||||
if o is None:
|
||||
return self._param.output_var_name, pd.DataFrame()
|
||||
return self._param.output_var_name, pd.DataFrame([{"content": str(o)}])
|
||||
return self._param.output_var_name, o
|
||||
def output(self, var_nm: str=None) -> Union[dict[str, Any], Any]:
|
||||
if var_nm:
|
||||
return self._param.outputs.get(var_nm, {}).get("value", "")
|
||||
return {k: o.get("value") for k,o in self._param.outputs.items()}
|
||||
|
||||
if allow_partial or not isinstance(o, partial):
|
||||
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
||||
return pd.DataFrame(o if isinstance(o, list) else [o])
|
||||
return self._param.output_var_name, o
|
||||
def set_output(self, key: str, value: Any):
|
||||
if key not in self._param.outputs:
|
||||
self._param.outputs[key] = {"value": None, "type": str(type(value))}
|
||||
self._param.outputs[key]["value"] = value
|
||||
|
||||
outs = None
|
||||
for oo in o():
|
||||
if not isinstance(oo, pd.DataFrame):
|
||||
outs = pd.DataFrame(oo if isinstance(oo, list) else [oo])
|
||||
def error(self):
|
||||
return self._param.outputs.get("_ERROR", {}).get("value")
|
||||
|
||||
def reset(self, only_output=False):
|
||||
for k in self._param.outputs.keys():
|
||||
self._param.outputs[k]["value"] = None
|
||||
if only_output:
|
||||
return
|
||||
for k in self._param.inputs.keys():
|
||||
self._param.inputs[k]["value"] = None
|
||||
self._param.debug_inputs = {}
|
||||
|
||||
def get_input(self, key: str=None) -> Union[Any, dict[str, Any]]:
|
||||
if key:
|
||||
return self._param.inputs.get(key, {}).get("value")
|
||||
|
||||
res = {}
|
||||
for var, o in self.get_input_elements().items():
|
||||
v = self.get_param(var)
|
||||
if v is None:
|
||||
continue
|
||||
if isinstance(v, str) and self._canvas.is_reff(v):
|
||||
self.set_input_value(var, self._canvas.get_variable_value(v))
|
||||
else:
|
||||
outs = oo
|
||||
return self._param.output_var_name, outs
|
||||
self.set_input_value(var, v)
|
||||
res[var] = self.get_input_value(var)
|
||||
return res
|
||||
|
||||
def reset(self):
|
||||
setattr(self._param, self._param.output_var_name, None)
|
||||
self._param.inputs = []
|
||||
|
||||
def set_output(self, v):
|
||||
setattr(self._param, self._param.output_var_name, v)
|
||||
|
||||
def get_input(self):
|
||||
def get_input_values(self) -> Union[Any, dict[str, Any]]:
|
||||
if self._param.debug_inputs:
|
||||
return pd.DataFrame([{"content": v["value"]} for v in self._param.debug_inputs if v.get("value")])
|
||||
return self._param.debug_inputs
|
||||
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
return {var: self.get_input_value(var) for var, o in self.get_input_elements().items()}
|
||||
|
||||
if self._param.query:
|
||||
self._param.inputs = []
|
||||
outs = []
|
||||
for q in self._param.query:
|
||||
if q.get("component_id"):
|
||||
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = q["component_id"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
outs.append(pd.DataFrame([{"content": p.get("value", "")}]))
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": p.get("value", "")})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
def get_input_elements_from_text(self, txt: str) -> dict[str, dict[str, str]]:
|
||||
res = {}
|
||||
for r in re.finditer(self.variable_ref_patt, txt, flags=re.IGNORECASE|re.DOTALL):
|
||||
exp = r.group(1)
|
||||
cpn_id, var_nm = exp.split("@") if exp.find("@")>0 else ("", exp)
|
||||
res[exp] = {
|
||||
"name": (self._canvas.get_component_name(cpn_id) +f"@{var_nm}") if cpn_id else exp,
|
||||
"value": self._canvas.get_variable_value(exp),
|
||||
"_retrival": self._canvas.get_variable_value(f"{cpn_id}@_references") if cpn_id else None,
|
||||
"_cpn_id": cpn_id
|
||||
}
|
||||
return res
|
||||
|
||||
if q["component_id"].lower().find("answer") == 0:
|
||||
txt = []
|
||||
for r, c in self._canvas.history[::-1][:self._param.message_history_window_size][::-1]:
|
||||
txt.append(f"{r.upper()}: {c}")
|
||||
txt = "\n".join(txt)
|
||||
self._param.inputs.append({"content": txt, "component_id": q["component_id"]})
|
||||
outs.append(pd.DataFrame([{"content": txt}]))
|
||||
continue
|
||||
def get_input_elements(self) -> dict[str, Any]:
|
||||
return self._param.inputs
|
||||
|
||||
outs.append(self._canvas.get_component(q["component_id"])["obj"].output(allow_partial=False)[1])
|
||||
self._param.inputs.append({"component_id": q["component_id"],
|
||||
"content": "\n".join(
|
||||
[str(d["content"]) for d in outs[-1].to_dict('records')])})
|
||||
elif q.get("value"):
|
||||
self._param.inputs.append({"component_id": None, "content": q["value"]})
|
||||
outs.append(pd.DataFrame([{"content": q["value"]}]))
|
||||
if outs:
|
||||
df = pd.concat(outs, ignore_index=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
return df
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
return self._param.get_input_form()
|
||||
|
||||
upstream_outs = []
|
||||
def set_input_value(self, key: str, value: Any) -> None:
|
||||
if key not in self._param.inputs:
|
||||
self._param.inputs[key] = {"value": None}
|
||||
self._param.inputs[key]["value"] = value
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "concentrator"]:
|
||||
continue
|
||||
if self.component_name.lower() == "generate" and self.get_component_name(u) == "retrieval":
|
||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||
if o is not None:
|
||||
o["component_id"] = u
|
||||
upstream_outs.append(o)
|
||||
continue
|
||||
#if self.component_name.lower()!="answer" and u not in self._canvas.get_component(self._id)["upstream"]: continue
|
||||
if self.component_name.lower().find("switch") < 0 \
|
||||
and self.get_component_name(u) in ["relevant", "categorize"]:
|
||||
continue
|
||||
if u.lower().find("answer") >= 0:
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
upstream_outs.append(pd.DataFrame([{"content": c, "component_id": u}]))
|
||||
break
|
||||
break
|
||||
if self.component_name.lower().find("answer") >= 0 and self.get_component_name(u) in ["relevant"]:
|
||||
continue
|
||||
o = self._canvas.get_component(u)["obj"].output(allow_partial=False)[1]
|
||||
if o is not None:
|
||||
o["component_id"] = u
|
||||
upstream_outs.append(o)
|
||||
break
|
||||
def get_input_value(self, key: str) -> Any:
|
||||
if key not in self._param.inputs:
|
||||
return None
|
||||
return self._param.inputs[key].get("value")
|
||||
|
||||
assert upstream_outs, "Can't inference the where the component input is. Please identify whose output is this component's input."
|
||||
|
||||
df = pd.concat(upstream_outs, ignore_index=True)
|
||||
if "content" in df:
|
||||
df = df.drop_duplicates(subset=['content']).reset_index(drop=True)
|
||||
|
||||
self._param.inputs = []
|
||||
for _, r in df.iterrows():
|
||||
self._param.inputs.append({"component_id": r["component_id"], "content": r["content"]})
|
||||
|
||||
return df
|
||||
|
||||
def get_input_elements(self):
|
||||
assert self._param.query, "Please identify input parameters firstly."
|
||||
eles = []
|
||||
for q in self._param.query:
|
||||
if q.get("component_id"):
|
||||
cpn_id = q["component_id"]
|
||||
if cpn_id.split("@")[0].lower().find("begin") >= 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
eles.extend(self._canvas.get_component(cpn_id)["obj"]._param.query)
|
||||
continue
|
||||
|
||||
eles.append({"name": self._canvas.get_component_name(cpn_id), "key": cpn_id})
|
||||
else:
|
||||
eles.append({"key": q["value"], "name": q["value"], "value": q["value"]})
|
||||
return eles
|
||||
|
||||
def get_stream_input(self):
|
||||
reversed_cpnts = []
|
||||
if len(self._canvas.path) > 1:
|
||||
reversed_cpnts.extend(self._canvas.path[-2])
|
||||
reversed_cpnts.extend(self._canvas.path[-1])
|
||||
|
||||
for u in reversed_cpnts[::-1]:
|
||||
if self.get_component_name(u) in ["switch", "answer"]:
|
||||
continue
|
||||
return self._canvas.get_component(u)["obj"].output()[1]
|
||||
|
||||
@staticmethod
|
||||
def be_output(v):
|
||||
return pd.DataFrame([{"content": v}])
|
||||
|
||||
def get_component_name(self, cpn_id):
|
||||
def get_component_name(self, cpn_id) -> str:
|
||||
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
def get_param(self, name):
|
||||
if hasattr(self._param, name):
|
||||
return getattr(self._param, name)
|
||||
|
||||
def get_parent(self):
|
||||
pid = self._canvas.get_component(self._id)["parent_id"]
|
||||
def debug(self, **kwargs):
|
||||
return self._invoke(**kwargs)
|
||||
|
||||
def get_parent(self) -> Union[object, None]:
|
||||
pid = self._canvas.get_component(self._id).get("parent_id")
|
||||
if not pid:
|
||||
return
|
||||
return self._canvas.get_component(pid)["obj"]
|
||||
|
||||
def get_upstream(self) -> List[str]:
|
||||
cpn_nms = self._canvas.get_component(self._id)['upstream']
|
||||
return cpn_nms
|
||||
|
||||
def get_downstream(self) -> List[str]:
|
||||
cpn_nms = self._canvas.get_component(self._id)['downstream']
|
||||
return cpn_nms
|
||||
|
||||
@staticmethod
|
||||
def string_format(content: str, kv: dict[str, str]) -> str:
|
||||
for n, v in kv.items():
|
||||
def repl(_match, val=v):
|
||||
return str(val) if val is not None else ""
|
||||
content = re.sub(
|
||||
r"\{%s\}" % re.escape(n),
|
||||
repl,
|
||||
content
|
||||
)
|
||||
return content
|
||||
|
||||
def exception_handler(self):
|
||||
if not self._param.exception_method:
|
||||
return
|
||||
return {
|
||||
"goto": self._param.exception_goto,
|
||||
"default_value": self._param.exception_default_value
|
||||
}
|
||||
|
||||
def get_exception_default_value(self):
|
||||
if self._param.exception_method != "comment":
|
||||
return ""
|
||||
return self._param.exception_default_value
|
||||
|
||||
def set_exception_default_value(self):
|
||||
self.set_output("result", self.get_exception_default_value())
|
||||
|
||||
def thoughts(self) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -13,37 +13,40 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from agent.component.fillup import UserFillUpParam, UserFillUp
|
||||
|
||||
|
||||
class BeginParam(ComponentParamBase):
|
||||
class BeginParam(UserFillUpParam):
|
||||
|
||||
"""
|
||||
Define the Begin component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.mode = "conversational"
|
||||
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
||||
self.query = []
|
||||
|
||||
def check(self):
|
||||
return True
|
||||
self.check_valid_value(self.mode, "The 'mode' should be either `conversational` or `task`", ["conversational", "task"])
|
||||
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
return getattr(self, "inputs")
|
||||
|
||||
|
||||
class Begin(ComponentBase):
|
||||
class Begin(UserFillUp):
|
||||
component_name = "Begin"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
return pd.DataFrame([{"content": self._param.prologue}])
|
||||
|
||||
def stream_output(self):
|
||||
res = {"content": self._param.prologue}
|
||||
yield res
|
||||
self.set_output(self.be_output(res))
|
||||
|
||||
|
||||
def _invoke(self, **kwargs):
|
||||
for k, v in kwargs.get("inputs", {}).items():
|
||||
if isinstance(v, dict) and v.get("type", "").lower().find("file") >=0:
|
||||
if v.get("optional") and v.get("value", None) is None:
|
||||
v = None
|
||||
else:
|
||||
v = self._canvas.get_files([v["value"]])
|
||||
else:
|
||||
v = v.get("value")
|
||||
self.set_output(k, v)
|
||||
self.set_input_value(k, v)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return ""
|
||||
|
||||
@ -1,84 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import requests
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
class BingParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Bing component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.channel = "Webpages"
|
||||
self.api_key = "YOUR_ACCESS_KEY"
|
||||
self.country = "CN"
|
||||
self.language = "en"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.channel, "Bing Web Search or Bing News", ["Webpages", "News"])
|
||||
self.check_empty(self.api_key, "Bing subscription key")
|
||||
self.check_valid_value(self.country, "Bing Country",
|
||||
['AR', 'AU', 'AT', 'BE', 'BR', 'CA', 'CL', 'DK', 'FI', 'FR', 'DE', 'HK', 'IN', 'ID',
|
||||
'IT', 'JP', 'KR', 'MY', 'MX', 'NL', 'NZ', 'NO', 'CN', 'PL', 'PT', 'PH', 'RU', 'SA',
|
||||
'ZA', 'ES', 'SE', 'CH', 'TW', 'TR', 'GB', 'US'])
|
||||
self.check_valid_value(self.language, "Bing Languages",
|
||||
['ar', 'eu', 'bn', 'bg', 'ca', 'ns', 'nt', 'hr', 'cs', 'da', 'nl', 'en', 'gb', 'et',
|
||||
'fi', 'fr', 'gl', 'de', 'gu', 'he', 'hi', 'hu', 'is', 'it', 'jp', 'kn', 'ko', 'lv',
|
||||
'lt', 'ms', 'ml', 'mr', 'nb', 'pl', 'br', 'pt', 'pa', 'ro', 'ru', 'sr', 'sk', 'sl',
|
||||
'es', 'sv', 'ta', 'te', 'th', 'tr', 'uk', 'vi'])
|
||||
|
||||
|
||||
class Bing(ComponentBase, ABC):
|
||||
component_name = "Bing"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Bing.be_output("")
|
||||
|
||||
try:
|
||||
headers = {"Ocp-Apim-Subscription-Key": self._param.api_key, 'Accept-Language': self._param.language}
|
||||
params = {"q": ans, "textDecorations": True, "textFormat": "HTML", "cc": self._param.country,
|
||||
"answerCount": 1, "promote": self._param.channel}
|
||||
if self._param.channel == "Webpages":
|
||||
response = requests.get("https://api.bing.microsoft.com/v7.0/search", headers=headers, params=params)
|
||||
response.raise_for_status()
|
||||
search_results = response.json()
|
||||
bing_res = [{"content": '<a href="' + i["url"] + '">' + i["name"] + '</a> ' + i["snippet"]} for i in
|
||||
search_results["webPages"]["value"]]
|
||||
elif self._param.channel == "News":
|
||||
response = requests.get("https://api.bing.microsoft.com/v7.0/news/search", headers=headers,
|
||||
params=params)
|
||||
response.raise_for_status()
|
||||
search_results = response.json()
|
||||
bing_res = [{"content": '<a href="' + i["url"] + '">' + i["name"] + '</a> ' + i["description"]} for i
|
||||
in search_results['news']['value']]
|
||||
except Exception as e:
|
||||
return Bing.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not bing_res:
|
||||
return Bing.be_output("")
|
||||
|
||||
df = pd.DataFrame(bing_res)
|
||||
logging.debug(f"df: {str(df)}")
|
||||
return df
|
||||
@ -14,24 +14,31 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from abc import ABC
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from agent.component.llm import LLMParam, LLM
|
||||
from api.utils.api_utils import timeout
|
||||
from rag.llm.chat_model import ERROR_PREFIX
|
||||
|
||||
|
||||
class CategorizeParam(GenerateParam):
|
||||
class CategorizeParam(LLMParam):
|
||||
|
||||
"""
|
||||
Define the Categorize component parameters.
|
||||
Define the categorize component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.category_description = {}
|
||||
self.prompt = ""
|
||||
self.query = "sys.query"
|
||||
self.message_history_window_size = 1
|
||||
self.update_prompt()
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_positive_integer(self.message_history_window_size, "[Categorize] Message window size > 0")
|
||||
self.check_empty(self.category_description, "[Categorize] Category examples")
|
||||
for k, v in self.category_description.items():
|
||||
if not k:
|
||||
@ -39,60 +46,92 @@ class CategorizeParam(GenerateParam):
|
||||
if not v.get("to"):
|
||||
raise ValueError(f"[Categorize] 'To' of category {k} can not be empty!")
|
||||
|
||||
def get_prompt(self, chat_hist):
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
return {
|
||||
"query": {
|
||||
"type": "line",
|
||||
"name": "Query"
|
||||
}
|
||||
}
|
||||
|
||||
def update_prompt(self):
|
||||
cate_lines = []
|
||||
for c, desc in self.category_description.items():
|
||||
for line in desc.get("examples", "").split("\n"):
|
||||
for line in desc.get("examples", []):
|
||||
if not line:
|
||||
continue
|
||||
cate_lines.append("USER: {}\nCategory: {}".format(line, c))
|
||||
cate_lines.append("USER: \"" + re.sub(r"\n", " ", line, flags=re.DOTALL) + "\" → "+c)
|
||||
|
||||
descriptions = []
|
||||
for c, desc in self.category_description.items():
|
||||
if desc.get("description"):
|
||||
descriptions.append(
|
||||
"--------------------\nCategory: {}\nDescription: {}\n".format(c, desc["description"]))
|
||||
"\n------\nCategory: {}\nDescription: {}".format(c, desc["description"]))
|
||||
|
||||
self.prompt = """
|
||||
You're a text classifier. You need to categorize the user’s questions into {} categories,
|
||||
namely: {}
|
||||
Here's description of each category:
|
||||
{}
|
||||
self.sys_prompt = """
|
||||
You are an advanced classification system that categorizes user questions into specific types. Analyze the input question and classify it into ONE of the following categories:
|
||||
{}
|
||||
|
||||
You could learn from the following examples:
|
||||
{}
|
||||
You could learn from the above examples.
|
||||
Just mention the category names, no need for any additional words.
|
||||
|
||||
---- Real Data ----
|
||||
{}
|
||||
""".format(
|
||||
len(self.category_description.keys()),
|
||||
"/".join(list(self.category_description.keys())),
|
||||
"\n".join(descriptions),
|
||||
"- ".join(cate_lines),
|
||||
chat_hist
|
||||
Here's description of each category:
|
||||
- {}
|
||||
|
||||
---- Instructions ----
|
||||
- Consider both explicit mentions and implied context
|
||||
- Prioritize the most specific applicable category
|
||||
- Return only the category name without explanations
|
||||
- Use "Other" only when no other category fits
|
||||
|
||||
""".format(
|
||||
"\n - ".join(list(self.category_description.keys())),
|
||||
"\n".join(descriptions)
|
||||
)
|
||||
return self.prompt
|
||||
|
||||
if cate_lines:
|
||||
self.sys_prompt += """
|
||||
---- Examples ----
|
||||
{}
|
||||
""".format("\n".join(cate_lines))
|
||||
|
||||
|
||||
class Categorize(Generate, ABC):
|
||||
class Categorize(LLM, ABC):
|
||||
component_name = "Categorize"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
input = self.get_input()
|
||||
input = " - ".join(input["content"]) if "content" in input else ""
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||
def _invoke(self, **kwargs):
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if not msg:
|
||||
msg = [{"role": "user", "content": ""}]
|
||||
if kwargs.get("sys.query"):
|
||||
msg[-1]["content"] = kwargs["sys.query"]
|
||||
self.set_input_value("sys.query", kwargs["sys.query"])
|
||||
else:
|
||||
msg[-1]["content"] = self._canvas.get_variable_value(self._param.query)
|
||||
self.set_input_value(self._param.query, msg[-1]["content"])
|
||||
self._param.update_prompt()
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(input), [{"role": "user", "content": "\nCategory: "}],
|
||||
self._param.gen_conf())
|
||||
logging.debug(f"input: {input}, answer: {str(ans)}")
|
||||
|
||||
user_prompt = """
|
||||
---- Real Data ----
|
||||
{} →
|
||||
""".format(" | ".join(["{}: \"{}\"".format(c["role"].upper(), re.sub(r"\n", "", c["content"], flags=re.DOTALL)) for c in msg]))
|
||||
ans = chat_mdl.chat(self._param.sys_prompt, [{"role": "user", "content": user_prompt}], self._param.gen_conf())
|
||||
logging.info(f"input: {user_prompt}, answer: {str(ans)}")
|
||||
if ERROR_PREFIX in ans:
|
||||
raise Exception(ans)
|
||||
# Count the number of times each category appears in the answer.
|
||||
category_counts = {}
|
||||
for c in self._param.category_description.keys():
|
||||
if ans.lower().find(c.lower()) >= 0:
|
||||
return Categorize.be_output(self._param.category_description[c]["to"])
|
||||
count = ans.lower().count(c.lower())
|
||||
category_counts[c] = count
|
||||
|
||||
return Categorize.be_output(list(self._param.category_description.items())[-1][1]["to"])
|
||||
cpn_ids = list(self._param.category_description.items())[-1][1]["to"]
|
||||
max_category = list(self._param.category_description.keys())[0]
|
||||
if any(category_counts.values()):
|
||||
max_category = max(category_counts.items(), key=lambda x: x[1])[0]
|
||||
cpn_ids = self._param.category_description[max_category]["to"]
|
||||
|
||||
def debug(self, **kwargs):
|
||||
df = self._run([], **kwargs)
|
||||
cpn_id = df.iloc[0, 0]
|
||||
return Categorize.be_output(self._canvas.get_component_name(cpn_id))
|
||||
self.set_output("category_name", max_category)
|
||||
self.set_output("_next", cpn_ids)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return "Which should it falls into {}? ...".format(",".join([f"`{c}`" for c, _ in self._param.category_description.items()]))
|
||||
|
||||
@ -1,66 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from duckduckgo_search import DDGS
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class DuckDuckGoParam(ComponentParamBase):
|
||||
"""
|
||||
Define the DuckDuckGo component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.channel = "text"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.channel, "Web Search or News", ["text", "news"])
|
||||
|
||||
|
||||
class DuckDuckGo(ComponentBase, ABC):
|
||||
component_name = "DuckDuckGo"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return DuckDuckGo.be_output("")
|
||||
|
||||
try:
|
||||
if self._param.channel == "text":
|
||||
with DDGS() as ddgs:
|
||||
# {'title': '', 'href': '', 'body': ''}
|
||||
duck_res = [{"content": '<a href="' + i["href"] + '">' + i["title"] + '</a> ' + i["body"]} for i
|
||||
in ddgs.text(ans, max_results=self._param.top_n)]
|
||||
elif self._param.channel == "news":
|
||||
with DDGS() as ddgs:
|
||||
# {'date': '', 'title': '', 'body': '', 'url': '', 'image': '', 'source': ''}
|
||||
duck_res = [{"content": '<a href="' + i["url"] + '">' + i["title"] + '</a> ' + i["body"]} for i
|
||||
in ddgs.news(ans, max_results=self._param.top_n)]
|
||||
except Exception as e:
|
||||
return DuckDuckGo.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not duck_res:
|
||||
return DuckDuckGo.be_output("")
|
||||
|
||||
df = pd.DataFrame(duck_res)
|
||||
logging.debug("df: {df}")
|
||||
return df
|
||||
@ -1,138 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from abc import ABC
|
||||
import json
|
||||
import smtplib
|
||||
import logging
|
||||
from email.mime.text import MIMEText
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.header import Header
|
||||
from email.utils import formataddr
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
class EmailParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Email component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Fixed configuration parameters
|
||||
self.smtp_server = "" # SMTP server address
|
||||
self.smtp_port = 465 # SMTP port
|
||||
self.email = "" # Sender email
|
||||
self.password = "" # Email authorization code
|
||||
self.sender_name = "" # Sender name
|
||||
|
||||
def check(self):
|
||||
# Check required parameters
|
||||
self.check_empty(self.smtp_server, "SMTP Server")
|
||||
self.check_empty(self.email, "Email")
|
||||
self.check_empty(self.password, "Password")
|
||||
self.check_empty(self.sender_name, "Sender Name")
|
||||
|
||||
class Email(ComponentBase, ABC):
|
||||
component_name = "Email"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
# Get upstream component output and parse JSON
|
||||
ans = self.get_input()
|
||||
content = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not content:
|
||||
return Email.be_output("No content to send")
|
||||
|
||||
success = False
|
||||
try:
|
||||
# Parse JSON string passed from upstream
|
||||
email_data = json.loads(content)
|
||||
|
||||
# Validate required fields
|
||||
if "to_email" not in email_data:
|
||||
return Email.be_output("Missing required field: to_email")
|
||||
|
||||
# Create email object
|
||||
msg = MIMEMultipart('alternative')
|
||||
|
||||
# Properly handle sender name encoding
|
||||
msg['From'] = formataddr((str(Header(self._param.sender_name,'utf-8')), self._param.email))
|
||||
msg['To'] = email_data["to_email"]
|
||||
if "cc_email" in email_data and email_data["cc_email"]:
|
||||
msg['Cc'] = email_data["cc_email"]
|
||||
msg['Subject'] = Header(email_data.get("subject", "No Subject"), 'utf-8').encode()
|
||||
|
||||
# Use content from email_data or default content
|
||||
email_content = email_data.get("content", "No content provided")
|
||||
# msg.attach(MIMEText(email_content, 'plain', 'utf-8'))
|
||||
msg.attach(MIMEText(email_content, 'html', 'utf-8'))
|
||||
|
||||
# Connect to SMTP server and send
|
||||
logging.info(f"Connecting to SMTP server {self._param.smtp_server}:{self._param.smtp_port}")
|
||||
|
||||
context = smtplib.ssl.create_default_context()
|
||||
with smtplib.SMTP_SSL(self._param.smtp_server, self._param.smtp_port, context=context) as server:
|
||||
# Login
|
||||
logging.info(f"Attempting to login with email: {self._param.email}")
|
||||
server.login(self._param.email, self._param.password)
|
||||
|
||||
# Get all recipient list
|
||||
recipients = [email_data["to_email"]]
|
||||
if "cc_email" in email_data and email_data["cc_email"]:
|
||||
recipients.extend(email_data["cc_email"].split(','))
|
||||
|
||||
# Send email
|
||||
logging.info(f"Sending email to recipients: {recipients}")
|
||||
try:
|
||||
server.send_message(msg, self._param.email, recipients)
|
||||
success = True
|
||||
except Exception as e:
|
||||
logging.error(f"Error during send_message: {str(e)}")
|
||||
# Try alternative method
|
||||
server.sendmail(self._param.email, recipients, msg.as_string())
|
||||
success = True
|
||||
|
||||
try:
|
||||
server.quit()
|
||||
except Exception as e:
|
||||
# Ignore errors when closing connection
|
||||
logging.warning(f"Non-fatal error during connection close: {str(e)}")
|
||||
|
||||
if success:
|
||||
return Email.be_output("Email sent successfully")
|
||||
|
||||
except json.JSONDecodeError:
|
||||
error_msg = "Invalid JSON format in input"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(error_msg)
|
||||
|
||||
except smtplib.SMTPAuthenticationError:
|
||||
error_msg = "SMTP Authentication failed. Please check your email and authorization code."
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except smtplib.SMTPConnectError:
|
||||
error_msg = f"Failed to connect to SMTP server {self._param.smtp_server}:{self._param.smtp_port}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except smtplib.SMTPException as e:
|
||||
error_msg = f"SMTP error occurred: {str(e)}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error: {str(e)}"
|
||||
logging.error(error_msg)
|
||||
return Email.be_output(f"Failed to send email: {error_msg}")
|
||||
@ -1,154 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
import pandas as pd
|
||||
import pymysql
|
||||
import psycopg2
|
||||
from agent.component import GenerateParam, Generate
|
||||
import pyodbc
|
||||
import logging
|
||||
|
||||
|
||||
class ExeSQLParam(GenerateParam):
|
||||
"""
|
||||
Define the ExeSQL component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.db_type = "mysql"
|
||||
self.database = ""
|
||||
self.username = ""
|
||||
self.host = ""
|
||||
self.port = 3306
|
||||
self.password = ""
|
||||
self.loop = 3
|
||||
self.top_n = 30
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_valid_value(self.db_type, "Choose DB type", ['mysql', 'postgresql', 'mariadb', 'mssql'])
|
||||
self.check_empty(self.database, "Database name")
|
||||
self.check_empty(self.username, "database username")
|
||||
self.check_empty(self.host, "IP Address")
|
||||
self.check_positive_integer(self.port, "IP Port")
|
||||
self.check_empty(self.password, "Database password")
|
||||
self.check_positive_integer(self.top_n, "Number of records")
|
||||
if self.database == "rag_flow":
|
||||
if self.host == "ragflow-mysql":
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
if self.password == "infini_rag_flow":
|
||||
raise ValueError("For the security reason, it dose not support database named rag_flow.")
|
||||
|
||||
|
||||
class ExeSQL(Generate, ABC):
|
||||
component_name = "ExeSQL"
|
||||
|
||||
def _refactor(self, ans):
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
match = re.search(r"```sql\s*(.*?)\s*```", ans, re.DOTALL)
|
||||
if match:
|
||||
ans = match.group(1) # Query content
|
||||
return ans
|
||||
else:
|
||||
print("no markdown")
|
||||
ans = re.sub(r'^.*?SELECT ', 'SELECT ', (ans), flags=re.IGNORECASE)
|
||||
ans = re.sub(r';.*?SELECT ', '; SELECT ', ans, flags=re.IGNORECASE)
|
||||
ans = re.sub(r';[^;]*$', r';', ans)
|
||||
if not ans:
|
||||
raise Exception("SQL statement not found!")
|
||||
return ans
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join([str(a) for a in ans["content"]]) if "content" in ans else ""
|
||||
ans = self._refactor(ans)
|
||||
if self._param.db_type in ["mysql", "mariadb"]:
|
||||
db = pymysql.connect(db=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
elif self._param.db_type == 'postgresql':
|
||||
db = psycopg2.connect(dbname=self._param.database, user=self._param.username, host=self._param.host,
|
||||
port=self._param.port, password=self._param.password)
|
||||
elif self._param.db_type == 'mssql':
|
||||
conn_str = (
|
||||
r'DRIVER={ODBC Driver 17 for SQL Server};'
|
||||
r'SERVER=' + self._param.host + ',' + str(self._param.port) + ';'
|
||||
r'DATABASE=' + self._param.database + ';'
|
||||
r'UID=' + self._param.username + ';'
|
||||
r'PWD=' + self._param.password
|
||||
)
|
||||
db = pyodbc.connect(conn_str)
|
||||
try:
|
||||
cursor = db.cursor()
|
||||
except Exception as e:
|
||||
raise Exception("Database Connection Failed! \n" + str(e))
|
||||
if not hasattr(self, "_loop"):
|
||||
setattr(self, "_loop", 0)
|
||||
self._loop += 1
|
||||
input_list = re.split(r';', ans.replace(r"\n", " "))
|
||||
sql_res = []
|
||||
for i in range(len(input_list)):
|
||||
single_sql = input_list[i]
|
||||
while self._loop <= self._param.loop:
|
||||
self._loop += 1
|
||||
if not single_sql:
|
||||
break
|
||||
try:
|
||||
cursor.execute(single_sql)
|
||||
if cursor.rowcount == 0:
|
||||
sql_res.append({"content": "No record in the database!"})
|
||||
break
|
||||
if self._param.db_type == 'mssql':
|
||||
single_res = pd.DataFrame.from_records(cursor.fetchmany(self._param.top_n),
|
||||
columns=[desc[0] for desc in cursor.description])
|
||||
else:
|
||||
single_res = pd.DataFrame([i for i in cursor.fetchmany(self._param.top_n)])
|
||||
single_res.columns = [i[0] for i in cursor.description]
|
||||
sql_res.append({"content": single_res.to_markdown(index=False, floatfmt=".6f")})
|
||||
break
|
||||
except Exception as e:
|
||||
single_sql = self._regenerate_sql(single_sql, str(e), **kwargs)
|
||||
single_sql = self._refactor(single_sql)
|
||||
if self._loop > self._param.loop:
|
||||
sql_res.append({"content": "Can't query the correct data via SQL statement."})
|
||||
db.close()
|
||||
if not sql_res:
|
||||
return ExeSQL.be_output("")
|
||||
return pd.DataFrame(sql_res)
|
||||
|
||||
def _regenerate_sql(self, failed_sql, error_message, **kwargs):
|
||||
prompt = f'''
|
||||
## You are the Repair SQL Statement Helper, please modify the original SQL statement based on the SQL query error report.
|
||||
## The original SQL statement is as follows:{failed_sql}.
|
||||
## The contents of the SQL query error report is as follows:{error_message}.
|
||||
## Answer only the modified SQL statement. Please do not give any explanation, just answer the code.
|
||||
'''
|
||||
self._param.prompt = prompt
|
||||
kwargs_ = deepcopy(kwargs)
|
||||
kwargs_["stream"] = False
|
||||
response = Generate._run(self, [], **kwargs_)
|
||||
try:
|
||||
regenerated_sql = response.loc[0, "content"]
|
||||
return regenerated_sql
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to regenerate SQL: {e}")
|
||||
return None
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
@ -13,24 +13,28 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class ConcentratorParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Concentrator component parameters.
|
||||
"""
|
||||
class UserFillUpParam(ComponentParamBase):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.enable_tips = True
|
||||
self.tips = "Please fill up the form"
|
||||
|
||||
def check(self):
|
||||
def check(self) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
class Concentrator(ComponentBase, ABC):
|
||||
component_name = "Concentrator"
|
||||
class UserFillUp(ComponentBase):
|
||||
component_name = "UserFillUp"
|
||||
|
||||
def _invoke(self, **kwargs):
|
||||
for k, v in kwargs.get("inputs", {}).items():
|
||||
self.set_output(k, v)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return "Waiting for your input..."
|
||||
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
return Concentrator.be_output("")
|
||||
@ -1,248 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from functools import partial
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.prompts import message_fit_in
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Generate component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.llm_id = ""
|
||||
self.prompt = ""
|
||||
self.max_tokens = 0
|
||||
self.temperature = 0
|
||||
self.top_p = 0
|
||||
self.presence_penalty = 0
|
||||
self.frequency_penalty = 0
|
||||
self.cite = True
|
||||
self.parameters = []
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.temperature, "[Generate] Temperature")
|
||||
self.check_decimal_float(self.presence_penalty, "[Generate] Presence penalty")
|
||||
self.check_decimal_float(self.frequency_penalty, "[Generate] Frequency penalty")
|
||||
self.check_nonnegative_number(self.max_tokens, "[Generate] Max tokens")
|
||||
self.check_decimal_float(self.top_p, "[Generate] Top P")
|
||||
self.check_empty(self.llm_id, "[Generate] LLM")
|
||||
# self.check_defined_type(self.parameters, "Parameters", ["list"])
|
||||
|
||||
def gen_conf(self):
|
||||
conf = {}
|
||||
if self.max_tokens > 0:
|
||||
conf["max_tokens"] = self.max_tokens
|
||||
if self.temperature > 0:
|
||||
conf["temperature"] = self.temperature
|
||||
if self.top_p > 0:
|
||||
conf["top_p"] = self.top_p
|
||||
if self.presence_penalty > 0:
|
||||
conf["presence_penalty"] = self.presence_penalty
|
||||
if self.frequency_penalty > 0:
|
||||
conf["frequency_penalty"] = self.frequency_penalty
|
||||
return conf
|
||||
|
||||
|
||||
class Generate(ComponentBase):
|
||||
component_name = "Generate"
|
||||
|
||||
def get_dependent_components(self):
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs[1:] if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def set_cite(self, retrieval_res, answer):
|
||||
retrieval_res = retrieval_res.dropna(subset=["vector", "content_ltks"]).reset_index(drop=True)
|
||||
if "empty_response" in retrieval_res.columns:
|
||||
retrieval_res["empty_response"].fillna("", inplace=True)
|
||||
answer, idx = settings.retrievaler.insert_citations(answer,
|
||||
[ck["content_ltks"] for _, ck in retrieval_res.iterrows()],
|
||||
[ck["vector"] for _, ck in retrieval_res.iterrows()],
|
||||
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING,
|
||||
self._canvas.get_embedding_model()), tkweight=0.7,
|
||||
vtweight=0.3)
|
||||
doc_ids = set([])
|
||||
recall_docs = []
|
||||
for i in idx:
|
||||
did = retrieval_res.loc[int(i), "doc_id"]
|
||||
if did in doc_ids:
|
||||
continue
|
||||
doc_ids.add(did)
|
||||
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
||||
|
||||
del retrieval_res["vector"]
|
||||
del retrieval_res["content_ltks"]
|
||||
|
||||
reference = {
|
||||
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
||||
"doc_aggs": recall_docs
|
||||
}
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||
res = {"content": answer, "reference": reference}
|
||||
res = structure_answer(None, res, "", "")
|
||||
|
||||
return res
|
||||
|
||||
def get_input_elements(self):
|
||||
key_set = set([])
|
||||
res = [{"key": "user", "name": "Input your question here:"}]
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.prompt, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
|
||||
retrieval_res = []
|
||||
self._param.inputs = []
|
||||
for para in self.get_input_elements()[1:]:
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
kwargs[para["key"]] = p.get("value", "")
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
if hist:
|
||||
hist = hist[0]["content"]
|
||||
else:
|
||||
hist = ""
|
||||
kwargs[para["key"]] = hist
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if "content" not in out.columns:
|
||||
kwargs[para["key"]] = ""
|
||||
else:
|
||||
if cpn.component_name.lower() == "retrieval":
|
||||
retrieval_res.append(out)
|
||||
kwargs[para["key"]] = " - " + "\n - ".join([o if isinstance(o, str) else str(o) for o in out["content"]])
|
||||
self._param.inputs.append({"component_id": para["key"], "content": kwargs[para["key"]]})
|
||||
|
||||
if retrieval_res:
|
||||
retrieval_res = pd.concat(retrieval_res, ignore_index=True)
|
||||
else:
|
||||
retrieval_res = pd.DataFrame([])
|
||||
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
|
||||
if not self._param.inputs and prompt.find("{input}") >= 0:
|
||||
retrieval_res = self.get_input()
|
||||
input = (" - " + "\n - ".join(
|
||||
[c for c in retrieval_res["content"] if isinstance(c, str)])) if "content" in retrieval_res else ""
|
||||
prompt = re.sub(r"\{input\}", re.escape(input), prompt)
|
||||
|
||||
downstreams = self._canvas.get_component(self._id)["downstream"]
|
||||
if kwargs.get("stream") and len(downstreams) == 1 and self._canvas.get_component(downstreams[0])[
|
||||
"obj"].component_name.lower() == "answer":
|
||||
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
||||
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
return pd.DataFrame([res])
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
ans = chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf())
|
||||
ans = re.sub(r"<think>.*</think>", "", ans, flags=re.DOTALL)
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, ans)
|
||||
return pd.DataFrame([res])
|
||||
|
||||
return Generate.be_output(ans)
|
||||
|
||||
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
||||
res = None
|
||||
if "empty_response" in retrieval_res.columns and not "".join(retrieval_res["content"]):
|
||||
empty_res = "\n- ".join([str(t) for t in retrieval_res["empty_response"] if str(t)])
|
||||
res = {"content": empty_res if empty_res else "Nothing found in knowledgebase!", "reference": []}
|
||||
yield res
|
||||
self.set_output(res)
|
||||
return
|
||||
|
||||
msg = self._canvas.get_history(self._param.message_history_window_size)
|
||||
if len(msg) < 1:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(chat_mdl.max_length * 0.97))
|
||||
if len(msg) < 2:
|
||||
msg.append({"role": "user", "content": "Output: "})
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf()):
|
||||
res = {"content": ans, "reference": []}
|
||||
answer = ans
|
||||
yield res
|
||||
|
||||
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
||||
res = self.set_cite(retrieval_res, answer)
|
||||
yield res
|
||||
|
||||
self.set_output(Generate.be_output(res))
|
||||
|
||||
def debug(self, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
prompt = self._param.prompt
|
||||
|
||||
for para in self._param.debug_inputs:
|
||||
kwargs[para["key"]] = para.get("value", "")
|
||||
|
||||
for n, v in kwargs.items():
|
||||
prompt = re.sub(r"\{%s\}" % re.escape(n), str(v).replace("\\", " "), prompt)
|
||||
|
||||
u = kwargs.get("user")
|
||||
ans = chat_mdl.chat(prompt, [{"role": "user", "content": u if u else "Output: "}], self._param.gen_conf())
|
||||
return pd.DataFrame([ans])
|
||||
@ -1,61 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import requests
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class GitHubParam(ComponentParamBase):
|
||||
"""
|
||||
Define the GitHub component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class GitHub(ComponentBase, ABC):
|
||||
component_name = "GitHub"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return GitHub.be_output("")
|
||||
|
||||
try:
|
||||
url = 'https://api.github.com/search/repositories?q=' + ans + '&sort=stars&order=desc&per_page=' + str(
|
||||
self._param.top_n)
|
||||
headers = {"Content-Type": "application/vnd.github+json", "X-GitHub-Api-Version": '2022-11-28'}
|
||||
response = requests.get(url=url, headers=headers).json()
|
||||
|
||||
github_res = [{"content": '<a href="' + i["html_url"] + '">' + i["name"] + '</a>' + str(
|
||||
i["description"]) + '\n stars:' + str(i['watchers'])} for i in response['items']]
|
||||
except Exception as e:
|
||||
return GitHub.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not github_res:
|
||||
return GitHub.be_output("")
|
||||
|
||||
df = pd.DataFrame(github_res)
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
@ -1,70 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from scholarly import scholarly
|
||||
|
||||
|
||||
class GoogleScholarParam(ComponentParamBase):
|
||||
"""
|
||||
Define the GoogleScholar component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 6
|
||||
self.sort_by = 'relevance'
|
||||
self.year_low = None
|
||||
self.year_high = None
|
||||
self.patents = True
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.sort_by, "GoogleScholar Sort_by", ['date', 'relevance'])
|
||||
self.check_boolean(self.patents, "Whether or not to include patents, defaults to True")
|
||||
|
||||
|
||||
class GoogleScholar(ComponentBase, ABC):
|
||||
component_name = "GoogleScholar"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return GoogleScholar.be_output("")
|
||||
|
||||
scholar_client = scholarly.search_pubs(ans, patents=self._param.patents, year_low=self._param.year_low,
|
||||
year_high=self._param.year_high, sort_by=self._param.sort_by)
|
||||
scholar_res = []
|
||||
for i in range(self._param.top_n):
|
||||
try:
|
||||
pub = next(scholar_client)
|
||||
scholar_res.append({"content": 'Title: ' + pub['bib']['title'] + '\n_Url: <a href="' + pub[
|
||||
'pub_url'] + '"></a> ' + "\n author: " + ",".join(pub['bib']['author']) + '\n Abstract: ' + pub[
|
||||
'bib'].get('abstract', 'no abstract')})
|
||||
|
||||
except StopIteration or Exception:
|
||||
logging.exception("GoogleScholar")
|
||||
break
|
||||
|
||||
if not scholar_res:
|
||||
return GoogleScholar.be_output("")
|
||||
|
||||
df = pd.DataFrame(scholar_res)
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
@ -14,11 +14,17 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from abc import ABC
|
||||
|
||||
import requests
|
||||
from deepdoc.parser import HtmlParser
|
||||
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from api.utils.api_utils import timeout
|
||||
from deepdoc.parser import HtmlParser
|
||||
|
||||
|
||||
class InvokeParam(ComponentParamBase):
|
||||
@ -38,40 +44,38 @@ class InvokeParam(ComponentParamBase):
|
||||
self.datatype = "json" # New parameter to determine data posting type
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ['get', 'post', 'put'])
|
||||
self.check_valid_value(self.method.lower(), "Type of content from the crawler", ["get", "post", "put"])
|
||||
self.check_empty(self.url, "End point URL")
|
||||
self.check_positive_integer(self.timeout, "Timeout time in second")
|
||||
self.check_boolean(self.clean_html, "Clean HTML")
|
||||
self.check_valid_value(self.datatype.lower(), "Data post type", ['json', 'formdata']) # Check for valid datapost value
|
||||
self.check_valid_value(self.datatype.lower(), "Data post type", ["json", "formdata"]) # Check for valid datapost value
|
||||
|
||||
|
||||
class Invoke(ComponentBase, ABC):
|
||||
component_name = "Invoke"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3)))
|
||||
def _invoke(self, **kwargs):
|
||||
args = {}
|
||||
for para in self._param.variables:
|
||||
if para.get("component_id"):
|
||||
if '@' in para["component_id"]:
|
||||
component = para["component_id"].split('@')[0]
|
||||
field = para["component_id"].split('@')[1]
|
||||
cpn = self._canvas.get_component(component)["obj"]
|
||||
for param in cpn._param.query:
|
||||
if param["key"] == field:
|
||||
if "value" in param:
|
||||
args[para["key"]] = param["value"]
|
||||
else:
|
||||
cpn = self._canvas.get_component(para["component_id"])["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
args[para["key"]] = self._canvas.get_history(1)[0]["content"]
|
||||
continue
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
if not out.empty:
|
||||
args[para["key"]] = "\n".join(out["content"])
|
||||
else:
|
||||
if para.get("value"):
|
||||
args[para["key"]] = para["value"]
|
||||
else:
|
||||
args[para["key"]] = self._canvas.get_variable_value(para["ref"])
|
||||
|
||||
url = self._param.url.strip()
|
||||
|
||||
def replace_variable(match):
|
||||
var_name = match.group(1)
|
||||
try:
|
||||
value = self._canvas.get_variable_value(var_name)
|
||||
return str(value or "")
|
||||
except Exception:
|
||||
return ""
|
||||
|
||||
# {base_url} or {component_id@variable_name}
|
||||
url = re.sub(r"\{([a-zA-Z_][a-zA-Z0-9_.@-]*)\}", replace_variable, url)
|
||||
|
||||
if url.find("http") != 0:
|
||||
url = "http://" + url
|
||||
|
||||
@ -83,50 +87,49 @@ class Invoke(ComponentBase, ABC):
|
||||
if re.sub(r"https?:?/?/?", "", self._param.proxy):
|
||||
proxies = {"http": self._param.proxy, "https": self._param.proxy}
|
||||
|
||||
if method == 'get':
|
||||
response = requests.get(url=url,
|
||||
params=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
last_e = ""
|
||||
for _ in range(self._param.max_retries + 1):
|
||||
try:
|
||||
if method == "get":
|
||||
response = requests.get(url=url, params=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
self.set_output("result", "\n".join(sections))
|
||||
else:
|
||||
self.set_output("result", response.text)
|
||||
|
||||
return Invoke.be_output(response.text)
|
||||
if method == "put":
|
||||
if self._param.datatype.lower() == "json":
|
||||
response = requests.put(url=url, json=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.put(url=url, data=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
self.set_output("result", "\n".join(sections))
|
||||
else:
|
||||
self.set_output("result", response.text)
|
||||
|
||||
if method == 'put':
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.put(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.put(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
return Invoke.be_output(response.text)
|
||||
if method == "post":
|
||||
if self._param.datatype.lower() == "json":
|
||||
response = requests.post(url=url, json=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.post(url=url, data=args, headers=headers, proxies=proxies, timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
self.set_output("result", "\n".join(sections))
|
||||
else:
|
||||
self.set_output("result", response.text)
|
||||
|
||||
if method == 'post':
|
||||
if self._param.datatype.lower() == 'json':
|
||||
response = requests.post(url=url,
|
||||
json=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
else:
|
||||
response = requests.post(url=url,
|
||||
data=args,
|
||||
headers=headers,
|
||||
proxies=proxies,
|
||||
timeout=self._param.timeout)
|
||||
if self._param.clean_html:
|
||||
sections = HtmlParser()(None, response.content)
|
||||
return Invoke.be_output("\n".join(sections))
|
||||
return Invoke.be_output(response.text)
|
||||
return self.output("result")
|
||||
except Exception as e:
|
||||
last_e = e
|
||||
logging.exception(f"Http request error: {e}")
|
||||
time.sleep(self._param.delay_after_error)
|
||||
|
||||
if last_e:
|
||||
self.set_output("_ERROR", str(last_e))
|
||||
return f"Http request error: {last_e}"
|
||||
|
||||
assert False, self.output()
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return "Waiting for the server respond..."
|
||||
|
||||
@ -24,10 +24,18 @@ class IterationParam(ComponentParamBase):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.delimiter = ","
|
||||
self.items_ref = ""
|
||||
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
return {
|
||||
"items": {
|
||||
"type": "json",
|
||||
"name": "Items"
|
||||
}
|
||||
}
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.delimiter, "Delimiter")
|
||||
return True
|
||||
|
||||
|
||||
class Iteration(ComponentBase, ABC):
|
||||
@ -38,8 +46,15 @@ class Iteration(ComponentBase, ABC):
|
||||
if self._canvas.get_component(cid)["obj"].component_name.lower() != "iterationitem":
|
||||
continue
|
||||
if self._canvas.get_component(cid)["parent_id"] == self._id:
|
||||
return self._canvas.get_component(cid)
|
||||
return cid
|
||||
|
||||
def _invoke(self, **kwargs):
|
||||
arr = self._canvas.get_variable_value(self._param.items_ref)
|
||||
if not isinstance(arr, list):
|
||||
self.set_output("_ERROR", self._param.items_ref + " must be an array, but its type is "+str(type(arr)))
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return "Need to process {} items.".format(len(self._canvas.get_variable_value(self._param.items_ref)))
|
||||
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
return self.output(allow_partial=False)[1]
|
||||
|
||||
|
||||
@ -14,7 +14,6 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
@ -33,17 +32,52 @@ class IterationItem(ComponentBase, ABC):
|
||||
super().__init__(canvas, id, param)
|
||||
self._idx = 0
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
def _invoke(self, **kwargs):
|
||||
parent = self.get_parent()
|
||||
ans = parent.get_input()
|
||||
ans = parent._param.delimiter.join(ans["content"]) if "content" in ans else ""
|
||||
ans = [a.strip() for a in ans.split(parent._param.delimiter)]
|
||||
df = pd.DataFrame([{"content": ans[self._idx]}])
|
||||
self._idx += 1
|
||||
if self._idx >= len(ans):
|
||||
arr = self._canvas.get_variable_value(parent._param.items_ref)
|
||||
if not isinstance(arr, list):
|
||||
self._idx = -1
|
||||
return df
|
||||
raise Exception(parent._param.items_ref + " must be an array, but its type is "+str(type(arr)))
|
||||
|
||||
if self._idx > 0:
|
||||
self.output_collation()
|
||||
|
||||
if self._idx >= len(arr):
|
||||
self._idx = -1
|
||||
return
|
||||
|
||||
self.set_output("item", arr[self._idx])
|
||||
self.set_output("index", self._idx)
|
||||
|
||||
self._idx += 1
|
||||
|
||||
def output_collation(self):
|
||||
pid = self.get_parent()._id
|
||||
for cid in self._canvas.components.keys():
|
||||
obj = self._canvas.get_component_obj(cid)
|
||||
p = obj.get_parent()
|
||||
if not p:
|
||||
continue
|
||||
if p._id != pid:
|
||||
continue
|
||||
|
||||
if p.component_name.lower() in ["categorize", "message", "switch", "userfillup", "interationitem"]:
|
||||
continue
|
||||
|
||||
for k, o in p._param.outputs.items():
|
||||
if "ref" not in o:
|
||||
continue
|
||||
_cid, var = o["ref"].split("@")
|
||||
if _cid != cid:
|
||||
continue
|
||||
res = p.output(k)
|
||||
if not res:
|
||||
res = []
|
||||
res.append(obj.output(var))
|
||||
p.set_output(k, res)
|
||||
|
||||
def end(self):
|
||||
return self._idx == -1
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return "Next turn..."
|
||||
@ -1,65 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
|
||||
|
||||
class KeywordExtractParam(GenerateParam):
|
||||
"""
|
||||
Define the KeywordExtract component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 1
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
def get_prompt(self):
|
||||
self.prompt = """
|
||||
- Role: You're a question analyzer.
|
||||
- Requirements:
|
||||
- Summarize user's question, and give top %s important keyword/phrase.
|
||||
- Use comma as a delimiter to separate keywords/phrases.
|
||||
- Answer format: (in language of user's question)
|
||||
- keyword:
|
||||
""" % self.top_n
|
||||
return self.prompt
|
||||
|
||||
|
||||
class KeywordExtract(Generate, ABC):
|
||||
component_name = "KeywordExtract"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": query}],
|
||||
self._param.gen_conf())
|
||||
|
||||
ans = re.sub(r".*keyword:", "", ans).strip()
|
||||
logging.debug(f"ans: {ans}")
|
||||
return KeywordExtract.be_output(ans)
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
286
agent/component/llm.py
Normal file
286
agent/component/llm.py
Normal file
@ -0,0 +1,286 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from copy import deepcopy
|
||||
from typing import Any, Generator
|
||||
import json_repair
|
||||
from functools import partial
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from api.utils.api_utils import timeout
|
||||
from rag.prompts.generator import tool_call_summary, message_fit_in, citation_prompt
|
||||
|
||||
|
||||
class LLMParam(ComponentParamBase):
|
||||
"""
|
||||
Define the LLM component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.llm_id = ""
|
||||
self.sys_prompt = ""
|
||||
self.prompts = [{"role": "user", "content": "{sys.query}"}]
|
||||
self.max_tokens = 0
|
||||
self.temperature = 0
|
||||
self.top_p = 0
|
||||
self.presence_penalty = 0
|
||||
self.frequency_penalty = 0
|
||||
self.output_structure = None
|
||||
self.cite = True
|
||||
self.visual_files_var = None
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(float(self.temperature), "[Agent] Temperature")
|
||||
self.check_decimal_float(float(self.presence_penalty), "[Agent] Presence penalty")
|
||||
self.check_decimal_float(float(self.frequency_penalty), "[Agent] Frequency penalty")
|
||||
self.check_nonnegative_number(int(self.max_tokens), "[Agent] Max tokens")
|
||||
self.check_decimal_float(float(self.top_p), "[Agent] Top P")
|
||||
self.check_empty(self.llm_id, "[Agent] LLM")
|
||||
self.check_empty(self.sys_prompt, "[Agent] System prompt")
|
||||
self.check_empty(self.prompts, "[Agent] User prompt")
|
||||
|
||||
def gen_conf(self):
|
||||
conf = {}
|
||||
def get_attr(nm):
|
||||
try:
|
||||
return getattr(self, nm)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if int(self.max_tokens) > 0 and get_attr("maxTokensEnabled"):
|
||||
conf["max_tokens"] = int(self.max_tokens)
|
||||
if float(self.temperature) > 0 and get_attr("temperatureEnabled"):
|
||||
conf["temperature"] = float(self.temperature)
|
||||
if float(self.top_p) > 0 and get_attr("topPEnabled"):
|
||||
conf["top_p"] = float(self.top_p)
|
||||
if float(self.presence_penalty) > 0 and get_attr("presencePenaltyEnabled"):
|
||||
conf["presence_penalty"] = float(self.presence_penalty)
|
||||
if float(self.frequency_penalty) > 0 and get_attr("frequencyPenaltyEnabled"):
|
||||
conf["frequency_penalty"] = float(self.frequency_penalty)
|
||||
return conf
|
||||
|
||||
|
||||
class LLM(ComponentBase):
|
||||
component_name = "LLM"
|
||||
|
||||
def __init__(self, canvas, component_id, param: ComponentParamBase):
|
||||
super().__init__(canvas, component_id, param)
|
||||
self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), TenantLLMService.llm_id2llm_type(self._param.llm_id),
|
||||
self._param.llm_id, max_retries=self._param.max_retries,
|
||||
retry_interval=self._param.delay_after_error
|
||||
)
|
||||
self.imgs = []
|
||||
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
res = {}
|
||||
for k, v in self.get_input_elements().items():
|
||||
res[k] = {
|
||||
"type": "line",
|
||||
"name": v["name"]
|
||||
}
|
||||
return res
|
||||
|
||||
def get_input_elements(self) -> dict[str, Any]:
|
||||
res = self.get_input_elements_from_text(self._param.sys_prompt)
|
||||
if isinstance(self._param.prompts, str):
|
||||
self._param.prompts = [{"role": "user", "content": self._param.prompts}]
|
||||
for prompt in self._param.prompts:
|
||||
d = self.get_input_elements_from_text(prompt["content"])
|
||||
res.update(d)
|
||||
return res
|
||||
|
||||
def set_debug_inputs(self, inputs: dict[str, dict]):
|
||||
self._param.debug_inputs = inputs
|
||||
|
||||
def add2system_prompt(self, txt):
|
||||
self._param.sys_prompt += txt
|
||||
|
||||
def _sys_prompt_and_msg(self, msg, args):
|
||||
if isinstance(self._param.prompts, str):
|
||||
self._param.prompts = [{"role": "user", "content": self._param.prompts}]
|
||||
for p in self._param.prompts:
|
||||
if msg and msg[-1]["role"] == p["role"]:
|
||||
continue
|
||||
p = deepcopy(p)
|
||||
p["content"] = self.string_format(p["content"], args)
|
||||
msg.append(p)
|
||||
return msg, self.string_format(self._param.sys_prompt, args)
|
||||
|
||||
def _prepare_prompt_variables(self):
|
||||
if self._param.visual_files_var:
|
||||
self.imgs = self._canvas.get_variable_value(self._param.visual_files_var)
|
||||
if not self.imgs:
|
||||
self.imgs = []
|
||||
self.imgs = [img for img in self.imgs if img[:len("data:image/")] == "data:image/"]
|
||||
if self.imgs and TenantLLMService.llm_id2llm_type(self._param.llm_id) == LLMType.CHAT.value:
|
||||
self.chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.IMAGE2TEXT.value,
|
||||
self._param.llm_id, max_retries=self._param.max_retries,
|
||||
retry_interval=self._param.delay_after_error
|
||||
)
|
||||
|
||||
|
||||
args = {}
|
||||
vars = self.get_input_elements() if not self._param.debug_inputs else self._param.debug_inputs
|
||||
for k, o in vars.items():
|
||||
args[k] = o["value"]
|
||||
if not isinstance(args[k], str):
|
||||
try:
|
||||
args[k] = json.dumps(args[k], ensure_ascii=False)
|
||||
except Exception:
|
||||
args[k] = str(args[k])
|
||||
self.set_input_value(k, args[k])
|
||||
|
||||
msg, sys_prompt = self._sys_prompt_and_msg(self._canvas.get_history(self._param.message_history_window_size)[:-1], args)
|
||||
user_defined_prompt, sys_prompt = self._extract_prompts(sys_prompt)
|
||||
if self._param.cite and self._canvas.get_reference()["chunks"]:
|
||||
sys_prompt += citation_prompt(user_defined_prompt)
|
||||
|
||||
return sys_prompt, msg, user_defined_prompt
|
||||
|
||||
def _extract_prompts(self, sys_prompt):
|
||||
pts = {}
|
||||
for tag in ["TASK_ANALYSIS", "PLAN_GENERATION", "REFLECTION", "CONTEXT_SUMMARY", "CONTEXT_RANKING", "CITATION_GUIDELINES"]:
|
||||
r = re.search(rf"<{tag}>(.*?)</{tag}>", sys_prompt, flags=re.DOTALL|re.IGNORECASE)
|
||||
if not r:
|
||||
continue
|
||||
pts[tag.lower()] = r.group(1)
|
||||
sys_prompt = re.sub(rf"<{tag}>(.*?)</{tag}>", "", sys_prompt, flags=re.DOTALL|re.IGNORECASE)
|
||||
return pts, sys_prompt
|
||||
|
||||
def _generate(self, msg:list[dict], **kwargs) -> str:
|
||||
if not self.imgs:
|
||||
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs)
|
||||
return self.chat_mdl.chat(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs)
|
||||
|
||||
def _generate_streamly(self, msg:list[dict], **kwargs) -> Generator[str, None, None]:
|
||||
ans = ""
|
||||
last_idx = 0
|
||||
endswith_think = False
|
||||
def delta(txt):
|
||||
nonlocal ans, last_idx, endswith_think
|
||||
delta_ans = txt[last_idx:]
|
||||
ans = txt
|
||||
|
||||
if delta_ans.find("<think>") == 0:
|
||||
last_idx += len("<think>")
|
||||
return "<think>"
|
||||
elif delta_ans.find("<think>") > 0:
|
||||
delta_ans = txt[last_idx:last_idx+delta_ans.find("<think>")]
|
||||
last_idx += delta_ans.find("<think>")
|
||||
return delta_ans
|
||||
elif delta_ans.endswith("</think>"):
|
||||
endswith_think = True
|
||||
elif endswith_think:
|
||||
endswith_think = False
|
||||
return "</think>"
|
||||
|
||||
last_idx = len(ans)
|
||||
if ans.endswith("</think>"):
|
||||
last_idx -= len("</think>")
|
||||
return re.sub(r"(<think>|</think>)", "", delta_ans)
|
||||
|
||||
if not self.imgs:
|
||||
for txt in self.chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), **kwargs):
|
||||
yield delta(txt)
|
||||
else:
|
||||
for txt in self.chat_mdl.chat_streamly(msg[0]["content"], msg[1:], self._param.gen_conf(), images=self.imgs, **kwargs):
|
||||
yield delta(txt)
|
||||
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||
def _invoke(self, **kwargs):
|
||||
def clean_formated_answer(ans: str) -> str:
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
ans = re.sub(r"^.*```json", "", ans, flags=re.DOTALL)
|
||||
return re.sub(r"```\n*$", "", ans, flags=re.DOTALL)
|
||||
|
||||
prompt, msg, _ = self._prepare_prompt_variables()
|
||||
error: str = ""
|
||||
|
||||
if self._param.output_structure:
|
||||
prompt += "\nThe output MUST follow this JSON format:\n"+json.dumps(self._param.output_structure, ensure_ascii=False, indent=2)
|
||||
prompt += "\nRedundant information is FORBIDDEN."
|
||||
for _ in range(self._param.max_retries+1):
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||
error = ""
|
||||
ans = self._generate(msg)
|
||||
msg.pop(0)
|
||||
if ans.find("**ERROR**") >= 0:
|
||||
logging.error(f"LLM response error: {ans}")
|
||||
error = ans
|
||||
continue
|
||||
try:
|
||||
self.set_output("structured_content", json_repair.loads(clean_formated_answer(ans)))
|
||||
return
|
||||
except Exception:
|
||||
msg.append({"role": "user", "content": "The answer can't not be parsed as JSON"})
|
||||
error = "The answer can't not be parsed as JSON"
|
||||
if error:
|
||||
self.set_output("_ERROR", error)
|
||||
return
|
||||
|
||||
downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
|
||||
ex = self.exception_handler()
|
||||
if any([self._canvas.get_component_obj(cid).component_name.lower()=="message" for cid in downstreams]) and not self._param.output_structure and not (ex and ex["goto"]):
|
||||
self.set_output("content", partial(self._stream_output, prompt, msg))
|
||||
return
|
||||
|
||||
for _ in range(self._param.max_retries+1):
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||
error = ""
|
||||
ans = self._generate(msg)
|
||||
msg.pop(0)
|
||||
if ans.find("**ERROR**") >= 0:
|
||||
logging.error(f"LLM response error: {ans}")
|
||||
error = ans
|
||||
continue
|
||||
self.set_output("content", ans)
|
||||
break
|
||||
|
||||
if error:
|
||||
if self.get_exception_default_value():
|
||||
self.set_output("content", self.get_exception_default_value())
|
||||
else:
|
||||
self.set_output("_ERROR", error)
|
||||
|
||||
def _stream_output(self, prompt, msg):
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||
answer = ""
|
||||
for ans in self._generate_streamly(msg):
|
||||
if ans.find("**ERROR**") >= 0:
|
||||
if self.get_exception_default_value():
|
||||
self.set_output("content", self.get_exception_default_value())
|
||||
yield self.get_exception_default_value()
|
||||
else:
|
||||
self.set_output("_ERROR", ans)
|
||||
return
|
||||
yield ans
|
||||
answer += ans
|
||||
self.set_output("content", answer)
|
||||
|
||||
def add_memory(self, user:str, assist:str, func_name: str, params: dict, results: str, user_defined_prompt:dict={}):
|
||||
summ = tool_call_summary(self.chat_mdl, func_name, params, results, user_defined_prompt)
|
||||
logging.info(f"[MEMORY]: {summ}")
|
||||
self._canvas.add_memory(user, assist, summ)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
_, msg,_ = self._prepare_prompt_variables()
|
||||
return "⌛Give me a moment—starting from: \n\n" + re.sub(r"(User's query:|[\\]+)", '', msg[-1]['content'], flags=re.DOTALL) + "\n\nI’ll figure out our best next move."
|
||||
@ -13,41 +13,138 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
from abc import ABC
|
||||
import re
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from jinja2 import Template as Jinja2Template
|
||||
|
||||
from api.utils.api_utils import timeout
|
||||
|
||||
|
||||
class MessageParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Message component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.messages = []
|
||||
self.content = []
|
||||
self.stream = True
|
||||
self.outputs = {
|
||||
"content": {
|
||||
"type": "str"
|
||||
}
|
||||
}
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.messages, "[Message]")
|
||||
self.check_empty(self.content, "[Message] Content")
|
||||
self.check_boolean(self.stream, "[Message] stream")
|
||||
return True
|
||||
|
||||
|
||||
class Message(ComponentBase, ABC):
|
||||
class Message(ComponentBase):
|
||||
component_name = "Message"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
if kwargs.get("stream"):
|
||||
return partial(self.stream_output)
|
||||
def get_kwargs(self, script:str, kwargs:dict = {}, delimiter:str=None) -> tuple[str, dict[str, str | list | Any]]:
|
||||
for k,v in self.get_input_elements_from_text(script).items():
|
||||
if k in kwargs:
|
||||
continue
|
||||
v = v["value"]
|
||||
if not v:
|
||||
v = ""
|
||||
ans = ""
|
||||
if isinstance(v, partial):
|
||||
for t in v():
|
||||
ans += t
|
||||
elif isinstance(v, list) and delimiter:
|
||||
ans = delimiter.join([str(vv) for vv in v])
|
||||
elif not isinstance(v, str):
|
||||
try:
|
||||
ans = json.dumps(v, ensure_ascii=False)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
ans = v
|
||||
if not ans:
|
||||
ans = ""
|
||||
kwargs[k] = ans
|
||||
self.set_input_value(k, ans)
|
||||
|
||||
return Message.be_output(random.choice(self._param.messages))
|
||||
_kwargs = {}
|
||||
for n, v in kwargs.items():
|
||||
_n = re.sub("[@:.]", "_", n)
|
||||
script = re.sub(r"\{%s\}" % re.escape(n), _n, script)
|
||||
_kwargs[_n] = v
|
||||
return script, _kwargs
|
||||
|
||||
def stream_output(self):
|
||||
res = None
|
||||
if self._param.messages:
|
||||
res = {"content": random.choice(self._param.messages)}
|
||||
yield res
|
||||
def _stream(self, rand_cnt:str):
|
||||
s = 0
|
||||
all_content = ""
|
||||
cache = {}
|
||||
for r in re.finditer(self.variable_ref_patt, rand_cnt, flags=re.DOTALL):
|
||||
all_content += rand_cnt[s: r.start()]
|
||||
yield rand_cnt[s: r.start()]
|
||||
s = r.end()
|
||||
exp = r.group(1)
|
||||
if exp in cache:
|
||||
yield cache[exp]
|
||||
all_content += cache[exp]
|
||||
continue
|
||||
|
||||
self.set_output(res)
|
||||
v = self._canvas.get_variable_value(exp)
|
||||
if not v:
|
||||
v = ""
|
||||
if isinstance(v, partial):
|
||||
cnt = ""
|
||||
for t in v():
|
||||
all_content += t
|
||||
cnt += t
|
||||
yield t
|
||||
|
||||
continue
|
||||
elif not isinstance(v, str):
|
||||
try:
|
||||
v = json.dumps(v, ensure_ascii=False, indent=2)
|
||||
except Exception:
|
||||
v = str(v)
|
||||
yield v
|
||||
all_content += v
|
||||
cache[exp] = v
|
||||
|
||||
if s < len(rand_cnt):
|
||||
all_content += rand_cnt[s: ]
|
||||
yield rand_cnt[s: ]
|
||||
|
||||
self.set_output("content", all_content)
|
||||
|
||||
def _is_jinjia2(self, content:str) -> bool:
|
||||
patt = [
|
||||
r"\{%.*%\}", "{{", "}}"
|
||||
]
|
||||
return any([re.search(p, content) for p in patt])
|
||||
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||
def _invoke(self, **kwargs):
|
||||
rand_cnt = random.choice(self._param.content)
|
||||
if self._param.stream and not self._is_jinjia2(rand_cnt):
|
||||
self.set_output("content", partial(self._stream, rand_cnt))
|
||||
return
|
||||
|
||||
rand_cnt, kwargs = self.get_kwargs(rand_cnt, kwargs)
|
||||
template = Jinja2Template(rand_cnt)
|
||||
try:
|
||||
content = template.render(kwargs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for n, v in kwargs.items():
|
||||
content = re.sub(n, v, content)
|
||||
|
||||
self.set_output("content", content)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return ""
|
||||
|
||||
@ -1,69 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from Bio import Entrez
|
||||
import re
|
||||
import pandas as pd
|
||||
import xml.etree.ElementTree as ET
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class PubMedParam(ComponentParamBase):
|
||||
"""
|
||||
Define the PubMed component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 5
|
||||
self.email = "A.N.Other@example.com"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
|
||||
|
||||
class PubMed(ComponentBase, ABC):
|
||||
component_name = "PubMed"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return PubMed.be_output("")
|
||||
|
||||
try:
|
||||
Entrez.email = self._param.email
|
||||
pubmedids = Entrez.read(Entrez.esearch(db='pubmed', retmax=self._param.top_n, term=ans))['IdList']
|
||||
pubmedcnt = ET.fromstring(re.sub(r'<(/?)b>|<(/?)i>', '', Entrez.efetch(db='pubmed', id=",".join(pubmedids),
|
||||
retmode="xml").read().decode(
|
||||
"utf-8")))
|
||||
pubmed_res = [{"content": 'Title:' + child.find("MedlineCitation").find("Article").find(
|
||||
"ArticleTitle").text + '\nUrl:<a href=" https://pubmed.ncbi.nlm.nih.gov/' + child.find(
|
||||
"MedlineCitation").find("PMID").text + '">' + '</a>\n' + 'Abstract:' + (
|
||||
child.find("MedlineCitation").find("Article").find("Abstract").find(
|
||||
"AbstractText").text if child.find("MedlineCitation").find(
|
||||
"Article").find("Abstract") else "No abstract available")} for child in
|
||||
pubmedcnt.findall("PubmedArticle")]
|
||||
except Exception as e:
|
||||
return PubMed.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not pubmed_res:
|
||||
return PubMed.be_output("")
|
||||
|
||||
df = pd.DataFrame(pubmed_res)
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
@ -1,83 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
from rag.utils import num_tokens_from_string, encoder
|
||||
|
||||
|
||||
class RelevantParam(GenerateParam):
|
||||
|
||||
"""
|
||||
Define the Relevant component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.prompt = ""
|
||||
self.yes = ""
|
||||
self.no = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
self.check_empty(self.yes, "[Relevant] 'Yes'")
|
||||
self.check_empty(self.no, "[Relevant] 'No'")
|
||||
|
||||
def get_prompt(self):
|
||||
self.prompt = """
|
||||
You are a grader assessing relevance of a retrieved document to a user question.
|
||||
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
||||
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
||||
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
||||
No other words needed except 'yes' or 'no'.
|
||||
"""
|
||||
return self.prompt
|
||||
|
||||
|
||||
class Relevant(Generate, ABC):
|
||||
component_name = "Relevant"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
q = ""
|
||||
for r, c in self._canvas.history[::-1]:
|
||||
if r == "user":
|
||||
q = c
|
||||
break
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Relevant.be_output(self._param.no)
|
||||
ans = "Documents: \n" + ans
|
||||
ans = f"Question: {q}\n" + ans
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
|
||||
if num_tokens_from_string(ans) >= chat_mdl.max_length - 4:
|
||||
ans = encoder.decode(encoder.encode(ans)[:chat_mdl.max_length - 4])
|
||||
|
||||
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
||||
self._param.gen_conf())
|
||||
|
||||
logging.debug(ans)
|
||||
if ans.lower().find("yes") >= 0:
|
||||
return Relevant.be_output(self._param.yes)
|
||||
if ans.lower().find("no") >= 0:
|
||||
return Relevant.be_output(self._param.no)
|
||||
assert False, f"Relevant component got: {ans}"
|
||||
|
||||
def debug(self, **kwargs):
|
||||
return self._run([], **kwargs)
|
||||
|
||||
@ -1,89 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from rag.app.tag import label_question
|
||||
|
||||
|
||||
class RetrievalParam(ComponentParamBase):
|
||||
|
||||
"""
|
||||
Define the Retrieval component parameters.
|
||||
"""
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.similarity_threshold = 0.2
|
||||
self.keywords_similarity_weight = 0.5
|
||||
self.top_n = 8
|
||||
self.top_k = 1024
|
||||
self.kb_ids = []
|
||||
self.rerank_id = ""
|
||||
self.empty_response = ""
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.similarity_threshold, "[Retrieval] Similarity threshold")
|
||||
self.check_decimal_float(self.keywords_similarity_weight, "[Retrieval] Keyword similarity weight")
|
||||
self.check_positive_number(self.top_n, "[Retrieval] Top N")
|
||||
|
||||
|
||||
class Retrieval(ComponentBase, ABC):
|
||||
component_name = "Retrieval"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
||||
if not kbs:
|
||||
return Retrieval.be_output("")
|
||||
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
||||
|
||||
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
||||
self._canvas.set_embedding_model(embd_nms[0])
|
||||
|
||||
rerank_mdl = None
|
||||
if self._param.rerank_id:
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
||||
|
||||
kbinfos = settings.retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
||||
1, self._param.top_n,
|
||||
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
||||
aggs=False, rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(query, kbs))
|
||||
|
||||
if not kbinfos["chunks"]:
|
||||
df = Retrieval.be_output("")
|
||||
if self._param.empty_response and self._param.empty_response.strip():
|
||||
df["empty_response"] = self._param.empty_response
|
||||
return df
|
||||
|
||||
df = pd.DataFrame(kbinfos["chunks"])
|
||||
df["content"] = df["content_with_weight"]
|
||||
del df["content_with_weight"]
|
||||
logging.debug("{} {}".format(query, df))
|
||||
return df
|
||||
|
||||
|
||||
@ -1,142 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
from api.db import LLMType
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from agent.component import GenerateParam, Generate
|
||||
|
||||
|
||||
class RewriteQuestionParam(GenerateParam):
|
||||
"""
|
||||
Define the QuestionRewrite component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.temperature = 0.9
|
||||
self.prompt = ""
|
||||
self.language = ""
|
||||
|
||||
def check(self):
|
||||
super().check()
|
||||
|
||||
def get_prompt(self, conv, language, query):
|
||||
prompt = """
|
||||
Role: A helpful assistant
|
||||
Task: Generate a full user question that would follow the conversation.
|
||||
Requirements & Restrictions:
|
||||
- Text generated MUST be in the same language of the original user's question.
|
||||
- If the user's latest question is completely, don't do anything, just return the original question.
|
||||
- DON'T generate anything except a refined question."""
|
||||
|
||||
if language:
|
||||
prompt += f"""
|
||||
- Text generated MUST be in {language}"""
|
||||
|
||||
prompt += f"""
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
# Example 1
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
###############
|
||||
Output: What's the name of Donald Trump's mother?
|
||||
------------
|
||||
# Example 2
|
||||
## Conversation
|
||||
USER: What is the name of Donald Trump's father?
|
||||
ASSISTANT: Fred Trump.
|
||||
USER: And his mother?
|
||||
ASSISTANT: Mary Trump.
|
||||
USER: What's her full name?
|
||||
###############
|
||||
Output: What's the full name of Donald Trump's mother Mary Trump?
|
||||
######################
|
||||
# Real Data
|
||||
## Conversation
|
||||
{conv}
|
||||
###############
|
||||
"""
|
||||
return prompt
|
||||
|
||||
|
||||
class RewriteQuestion(Generate, ABC):
|
||||
component_name = "RewriteQuestion"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
hist = self._canvas.get_history(self._param.message_history_window_size)
|
||||
query = self.get_input()
|
||||
query = str(query["content"][0]) if "content" in query else ""
|
||||
conv = []
|
||||
for m in hist:
|
||||
if m["role"] not in ["user", "assistant"]:
|
||||
continue
|
||||
conv.append("{}: {}".format(m["role"].upper(), m["content"]))
|
||||
conv = "\n".join(conv)
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
ans = chat_mdl.chat(self._param.get_prompt(conv, self.gen_lang(self._param.language), query),
|
||||
[{"role": "user", "content": "Output: "}], self._param.gen_conf())
|
||||
self._canvas.history.pop()
|
||||
self._canvas.history.append(("user", ans))
|
||||
return RewriteQuestion.be_output(ans)
|
||||
|
||||
@staticmethod
|
||||
def gen_lang(language):
|
||||
# convert code lang to language word for the prompt
|
||||
language_dict = {'af': 'Afrikaans', 'ak': 'Akan', 'sq': 'Albanian', 'ws': 'Samoan', 'am': 'Amharic',
|
||||
'ar': 'Arabic', 'hy': 'Armenian', 'az': 'Azerbaijani', 'eu': 'Basque', 'be': 'Belarusian',
|
||||
'bem': 'Bemba', 'bn': 'Bengali', 'bh': 'Bihari',
|
||||
'xx-bork': 'Bork', 'bs': 'Bosnian', 'br': 'Breton', 'bg': 'Bulgarian', 'bt': 'Bhutani',
|
||||
'km': 'Cambodian', 'ca': 'Catalan', 'chr': 'Cherokee', 'ny': 'Chichewa', 'zh-cn': 'Chinese',
|
||||
'zh-tw': 'Chinese', 'co': 'Corsican',
|
||||
'hr': 'Croatian', 'cs': 'Czech', 'da': 'Danish', 'nl': 'Dutch', 'xx-elmer': 'Elmer',
|
||||
'en': 'English', 'eo': 'Esperanto', 'et': 'Estonian', 'ee': 'Ewe', 'fo': 'Faroese',
|
||||
'tl': 'Filipino', 'fi': 'Finnish', 'fr': 'French',
|
||||
'fy': 'Frisian', 'gaa': 'Ga', 'gl': 'Galician', 'ka': 'Georgian', 'de': 'German',
|
||||
'el': 'Greek', 'kl': 'Greenlandic', 'gn': 'Guarani', 'gu': 'Gujarati', 'xx-hacker': 'Hacker',
|
||||
'ht': 'Haitian Creole', 'ha': 'Hausa', 'haw': 'Hawaiian',
|
||||
'iw': 'Hebrew', 'hi': 'Hindi', 'hu': 'Hungarian', 'is': 'Icelandic', 'ig': 'Igbo',
|
||||
'id': 'Indonesian', 'ia': 'Interlingua', 'ga': 'Irish', 'it': 'Italian', 'ja': 'Japanese',
|
||||
'jw': 'Javanese', 'kn': 'Kannada', 'kk': 'Kazakh', 'rw': 'Kinyarwanda',
|
||||
'rn': 'Kirundi', 'xx-klingon': 'Klingon', 'kg': 'Kongo', 'ko': 'Korean', 'kri': 'Krio',
|
||||
'ku': 'Kurdish', 'ckb': 'Kurdish (Sorani)', 'ky': 'Kyrgyz', 'lo': 'Laothian', 'la': 'Latin',
|
||||
'lv': 'Latvian', 'ln': 'Lingala', 'lt': 'Lithuanian',
|
||||
'loz': 'Lozi', 'lg': 'Luganda', 'ach': 'Luo', 'mk': 'Macedonian', 'mg': 'Malagasy',
|
||||
'ms': 'Malay', 'ml': 'Malayalam', 'mt': 'Maltese', 'mv': 'Maldivian', 'mi': 'Maori',
|
||||
'mr': 'Marathi', 'mfe': 'Mauritian Creole', 'mo': 'Moldavian', 'mn': 'Mongolian',
|
||||
'sr-me': 'Montenegrin', 'my': 'Burmese', 'ne': 'Nepali', 'pcm': 'Nigerian Pidgin',
|
||||
'nso': 'Northern Sotho', 'no': 'Norwegian', 'nn': 'Norwegian Nynorsk', 'oc': 'Occitan',
|
||||
'or': 'Oriya', 'om': 'Oromo', 'ps': 'Pashto', 'fa': 'Persian',
|
||||
'xx-pirate': 'Pirate', 'pl': 'Polish', 'pt': 'Portuguese', 'pt-br': 'Portuguese (Brazilian)',
|
||||
'pt-pt': 'Portuguese (Portugal)', 'pa': 'Punjabi', 'qu': 'Quechua', 'ro': 'Romanian',
|
||||
'rm': 'Romansh', 'nyn': 'Runyankole', 'ru': 'Russian', 'gd': 'Scots Gaelic',
|
||||
'sr': 'Serbian', 'sh': 'Serbo-Croatian', 'st': 'Sesotho', 'tn': 'Setswana',
|
||||
'crs': 'Seychellois Creole', 'sn': 'Shona', 'sd': 'Sindhi', 'si': 'Sinhalese', 'sk': 'Slovak',
|
||||
'sl': 'Slovenian', 'so': 'Somali', 'es': 'Spanish', 'es-419': 'Spanish (Latin America)',
|
||||
'su': 'Sundanese',
|
||||
'sw': 'Swahili', 'sv': 'Swedish', 'tg': 'Tajik', 'ta': 'Tamil', 'tt': 'Tatar', 'te': 'Telugu',
|
||||
'th': 'Thai', 'ti': 'Tigrinya', 'to': 'Tongan', 'lua': 'Tshiluba', 'tum': 'Tumbuka',
|
||||
'tr': 'Turkish', 'tk': 'Turkmen', 'tw': 'Twi',
|
||||
'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 'vu': 'Vanuatu',
|
||||
'vi': 'Vietnamese', 'cy': 'Welsh', 'wo': 'Wolof', 'xh': 'Xhosa', 'yi': 'Yiddish',
|
||||
'yo': 'Yoruba', 'zu': 'Zulu'}
|
||||
if language in language_dict:
|
||||
return language_dict[language]
|
||||
else:
|
||||
return ""
|
||||
100
agent/component/string_transform.py
Normal file
100
agent/component/string_transform.py
Normal file
@ -0,0 +1,100 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import re
|
||||
from abc import ABC
|
||||
from jinja2 import Template as Jinja2Template
|
||||
from agent.component.base import ComponentParamBase
|
||||
from api.utils.api_utils import timeout
|
||||
from .message import Message
|
||||
|
||||
|
||||
class StringTransformParam(ComponentParamBase):
|
||||
"""
|
||||
Define the code sandbox component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.method = "split"
|
||||
self.script = ""
|
||||
self.split_ref = ""
|
||||
self.delimiters = [","]
|
||||
self.outputs = {"result": {"value": "", "type": "string"}}
|
||||
|
||||
def check(self):
|
||||
self.check_valid_value(self.method, "Support method", ["split", "merge"])
|
||||
self.check_empty(self.delimiters, "delimiters")
|
||||
|
||||
|
||||
class StringTransform(Message, ABC):
|
||||
component_name = "StringTransform"
|
||||
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
if self._param.method == "split":
|
||||
return {
|
||||
"line": {
|
||||
"name": "String",
|
||||
"type": "line"
|
||||
}
|
||||
}
|
||||
return {k: {
|
||||
"name": o["name"],
|
||||
"type": "line"
|
||||
} for k, o in self.get_input_elements_from_text(self._param.script).items()}
|
||||
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 10*60)))
|
||||
def _invoke(self, **kwargs):
|
||||
if self._param.method == "split":
|
||||
self._split(kwargs.get("line"))
|
||||
else:
|
||||
self._merge(kwargs)
|
||||
|
||||
def _split(self, line:str|None = None):
|
||||
var = self._canvas.get_variable_value(self._param.split_ref) if not line else line
|
||||
if not var:
|
||||
var = ""
|
||||
assert isinstance(var, str), "The input variable is not a string: {}".format(type(var))
|
||||
self.set_input_value(self._param.split_ref, var)
|
||||
res = []
|
||||
for i,s in enumerate(re.split(r"(%s)"%("|".join([re.escape(d) for d in self._param.delimiters])), var, flags=re.DOTALL)):
|
||||
if i % 2 == 1:
|
||||
continue
|
||||
res.append(s)
|
||||
self.set_output("result", res)
|
||||
|
||||
def _merge(self, kwargs:dict[str, str] = {}):
|
||||
script = self._param.script
|
||||
script, kwargs = self.get_kwargs(script, kwargs, self._param.delimiters[0])
|
||||
|
||||
if self._is_jinjia2(script):
|
||||
template = Jinja2Template(script)
|
||||
try:
|
||||
script = template.render(kwargs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for k,v in kwargs.items():
|
||||
if not v:
|
||||
v = ""
|
||||
script = re.sub(k, lambda match: v, script)
|
||||
|
||||
self.set_output("result", script)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return f"It's {self._param.method}ing."
|
||||
|
||||
|
||||
@ -13,8 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import numbers
|
||||
import os
|
||||
from abc import ABC
|
||||
from typing import Any
|
||||
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from api.utils.api_utils import timeout
|
||||
|
||||
|
||||
class SwitchParam(ComponentParamBase):
|
||||
@ -34,7 +39,7 @@ class SwitchParam(ComponentParamBase):
|
||||
}
|
||||
"""
|
||||
self.conditions = []
|
||||
self.end_cpn_id = "answer:0"
|
||||
self.end_cpn_ids = []
|
||||
self.operators = ['contains', 'not contains', 'start with', 'end with', 'empty', 'not empty', '=', '≠', '>',
|
||||
'<', '≥', '≤']
|
||||
|
||||
@ -43,54 +48,46 @@ class SwitchParam(ComponentParamBase):
|
||||
for cond in self.conditions:
|
||||
if not cond["to"]:
|
||||
raise ValueError("[Switch] 'To' can not be empty!")
|
||||
self.check_empty(self.end_cpn_ids, "[Switch] the ELSE/Other destination can not be empty.")
|
||||
|
||||
def get_input_form(self) -> dict[str, dict]:
|
||||
return {
|
||||
"urls": {
|
||||
"name": "URLs",
|
||||
"type": "line"
|
||||
}
|
||||
}
|
||||
|
||||
class Switch(ComponentBase, ABC):
|
||||
component_name = "Switch"
|
||||
|
||||
def get_dependent_components(self):
|
||||
res = []
|
||||
for cond in self._param.conditions:
|
||||
for item in cond["items"]:
|
||||
if not item["cpn_id"]:
|
||||
continue
|
||||
if item["cpn_id"].find("begin") >= 0:
|
||||
continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
res.append(cid)
|
||||
|
||||
return list(set(res))
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
@timeout(int(os.environ.get("COMPONENT_EXEC_TIMEOUT", 3)))
|
||||
def _invoke(self, **kwargs):
|
||||
for cond in self._param.conditions:
|
||||
res = []
|
||||
for item in cond["items"]:
|
||||
if not item["cpn_id"]:
|
||||
continue
|
||||
cid = item["cpn_id"].split("@")[0]
|
||||
if item["cpn_id"].find("@") > 0:
|
||||
cpn_id, key = item["cpn_id"].split("@")
|
||||
for p in self._canvas.get_component(cid)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
res.append(self.process_operator(p.get("value",""), item["operator"], item.get("value", "")))
|
||||
break
|
||||
else:
|
||||
out = self._canvas.get_component(cid)["obj"].output()[1]
|
||||
cpn_input = "" if "content" not in out.columns else " ".join([str(s) for s in out["content"]])
|
||||
res.append(self.process_operator(cpn_input, item["operator"], item.get("value", "")))
|
||||
|
||||
cpn_v = self._canvas.get_variable_value(item["cpn_id"])
|
||||
self.set_input_value(item["cpn_id"], cpn_v)
|
||||
operatee = item.get("value", "")
|
||||
if isinstance(cpn_v, numbers.Number):
|
||||
operatee = float(operatee)
|
||||
res.append(self.process_operator(cpn_v, item["operator"], operatee))
|
||||
if cond["logical_operator"] != "and" and any(res):
|
||||
return Switch.be_output(cond["to"])
|
||||
self.set_output("next", [self._canvas.get_component_name(cpn_id) for cpn_id in cond["to"]])
|
||||
self.set_output("_next", cond["to"])
|
||||
return
|
||||
|
||||
if all(res):
|
||||
return Switch.be_output(cond["to"])
|
||||
self.set_output("next", [self._canvas.get_component_name(cpn_id) for cpn_id in cond["to"]])
|
||||
self.set_output("_next", cond["to"])
|
||||
return
|
||||
|
||||
return Switch.be_output(self._param.end_cpn_id)
|
||||
|
||||
def process_operator(self, input: str, operator: str, value: str) -> bool:
|
||||
if not isinstance(input, str) or not isinstance(value, str):
|
||||
raise ValueError('Invalid input or value type: string')
|
||||
self.set_output("next", [self._canvas.get_component_name(cpn_id) for cpn_id in self._param.end_cpn_ids])
|
||||
self.set_output("_next", self._param.end_cpn_ids)
|
||||
|
||||
def process_operator(self, input: Any, operator: str, value: Any) -> bool:
|
||||
if operator == "contains":
|
||||
return True if value.lower() in input.lower() else False
|
||||
elif operator == "not contains":
|
||||
@ -128,4 +125,7 @@ class Switch(ComponentBase, ABC):
|
||||
except Exception:
|
||||
return True if input <= value else False
|
||||
|
||||
raise ValueError('Not supported operator' + operator)
|
||||
raise ValueError('Not supported operator' + operator)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
return "I’m weighing a few options and will pick the next step shortly."
|
||||
@ -1,136 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from jinja2 import Template as Jinja2Template
|
||||
|
||||
|
||||
class TemplateParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Generate component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.content = ""
|
||||
self.parameters = []
|
||||
|
||||
def check(self):
|
||||
self.check_empty(self.content, "[Template] Content")
|
||||
return True
|
||||
|
||||
|
||||
class Template(ComponentBase):
|
||||
component_name = "Template"
|
||||
|
||||
def get_dependent_components(self):
|
||||
inputs = self.get_input_elements()
|
||||
cpnts = set([i["key"] for i in inputs if i["key"].lower().find("answer") < 0 and i["key"].lower().find("begin") < 0])
|
||||
return list(cpnts)
|
||||
|
||||
def get_input_elements(self):
|
||||
key_set = set([])
|
||||
res = []
|
||||
for r in re.finditer(r"\{([a-z]+[:@][a-z0-9_-]+)\}", self._param.content, flags=re.IGNORECASE):
|
||||
cpn_id = r.group(1)
|
||||
if cpn_id in key_set:
|
||||
continue
|
||||
if cpn_id.lower().find("begin@") == 0:
|
||||
cpn_id, key = cpn_id.split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] != key:
|
||||
continue
|
||||
res.append({"key": r.group(1), "name": p["name"]})
|
||||
key_set.add(r.group(1))
|
||||
continue
|
||||
cpn_nm = self._canvas.get_component_name(cpn_id)
|
||||
if not cpn_nm:
|
||||
continue
|
||||
res.append({"key": cpn_id, "name": cpn_nm})
|
||||
key_set.add(cpn_id)
|
||||
return res
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
content = self._param.content
|
||||
|
||||
self._param.inputs = []
|
||||
for para in self.get_input_elements():
|
||||
if para["key"].lower().find("begin@") == 0:
|
||||
cpn_id, key = para["key"].split("@")
|
||||
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
||||
if p["key"] == key:
|
||||
value = p.get("value", "")
|
||||
self.make_kwargs(para, kwargs, value)
|
||||
break
|
||||
else:
|
||||
assert False, f"Can't find parameter '{key}' for {cpn_id}"
|
||||
continue
|
||||
|
||||
component_id = para["key"]
|
||||
cpn = self._canvas.get_component(component_id)["obj"]
|
||||
if cpn.component_name.lower() == "answer":
|
||||
hist = self._canvas.get_history(1)
|
||||
if hist:
|
||||
hist = hist[0]["content"]
|
||||
else:
|
||||
hist = ""
|
||||
self.make_kwargs(para, kwargs, hist)
|
||||
continue
|
||||
|
||||
_, out = cpn.output(allow_partial=False)
|
||||
|
||||
result = ""
|
||||
if "content" in out.columns:
|
||||
result = "\n".join(
|
||||
[o if isinstance(o, str) else str(o) for o in out["content"]]
|
||||
)
|
||||
|
||||
self.make_kwargs(para, kwargs, result)
|
||||
|
||||
template = Jinja2Template(content)
|
||||
|
||||
try:
|
||||
content = template.render(kwargs)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for n, v in kwargs.items():
|
||||
try:
|
||||
v = json.dumps(v, ensure_ascii=False)
|
||||
except Exception:
|
||||
pass
|
||||
content = re.sub(
|
||||
r"\{%s\}" % re.escape(n), v, content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(\\\"|\")", "", content
|
||||
)
|
||||
content = re.sub(
|
||||
r"(#+)", r" \1 ", content
|
||||
)
|
||||
|
||||
return Template.be_output(content)
|
||||
|
||||
def make_kwargs(self, para, kwargs, value):
|
||||
self._param.inputs.append(
|
||||
{"component_id": para["key"], "content": value}
|
||||
)
|
||||
try:
|
||||
value = json.loads(value)
|
||||
except Exception:
|
||||
pass
|
||||
kwargs[para["key"]] = value
|
||||
@ -1,80 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
import pywencai
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class WenCaiParam(ComponentParamBase):
|
||||
"""
|
||||
Define the WenCai component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.query_type = "stock"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.query_type, "Query type",
|
||||
['stock', 'zhishu', 'fund', 'hkstock', 'usstock', 'threeboard', 'conbond', 'insurance',
|
||||
'futures', 'lccp',
|
||||
'foreign_exchange'])
|
||||
|
||||
|
||||
class WenCai(ComponentBase, ABC):
|
||||
component_name = "WenCai"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = ",".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return WenCai.be_output("")
|
||||
|
||||
try:
|
||||
wencai_res = []
|
||||
res = pywencai.get(query=ans, query_type=self._param.query_type, perpage=self._param.top_n)
|
||||
if isinstance(res, pd.DataFrame):
|
||||
wencai_res.append({"content": res.to_markdown()})
|
||||
if isinstance(res, dict):
|
||||
for item in res.items():
|
||||
if isinstance(item[1], list):
|
||||
wencai_res.append({"content": item[0] + "\n" + pd.DataFrame(item[1]).to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], str):
|
||||
wencai_res.append({"content": item[0] + "\n" + item[1]})
|
||||
continue
|
||||
if isinstance(item[1], dict):
|
||||
if "meta" in item[1].keys():
|
||||
continue
|
||||
wencai_res.append({"content": pd.DataFrame.from_dict(item[1], orient='index').to_markdown()})
|
||||
continue
|
||||
if isinstance(item[1], pd.DataFrame):
|
||||
if "image_url" in item[1].columns:
|
||||
continue
|
||||
wencai_res.append({"content": item[1].to_markdown()})
|
||||
continue
|
||||
|
||||
wencai_res.append({"content": item[0] + "\n" + str(item[1])})
|
||||
except Exception as e:
|
||||
return WenCai.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not wencai_res:
|
||||
return WenCai.be_output("")
|
||||
|
||||
return pd.DataFrame(wencai_res)
|
||||
@ -1,67 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import wikipedia
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
|
||||
|
||||
class WikipediaParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Wikipedia component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.top_n = 10
|
||||
self.language = "en"
|
||||
|
||||
def check(self):
|
||||
self.check_positive_integer(self.top_n, "Top N")
|
||||
self.check_valid_value(self.language, "Wikipedia languages",
|
||||
['af', 'pl', 'ar', 'ast', 'az', 'bg', 'nan', 'bn', 'be', 'ca', 'cs', 'cy', 'da', 'de',
|
||||
'et', 'el', 'en', 'es', 'eo', 'eu', 'fa', 'fr', 'gl', 'ko', 'hy', 'hi', 'hr', 'id',
|
||||
'it', 'he', 'ka', 'lld', 'la', 'lv', 'lt', 'hu', 'mk', 'arz', 'ms', 'min', 'my', 'nl',
|
||||
'ja', 'nb', 'nn', 'ce', 'uz', 'pt', 'kk', 'ro', 'ru', 'ceb', 'sk', 'sl', 'sr', 'sh',
|
||||
'fi', 'sv', 'ta', 'tt', 'th', 'tg', 'azb', 'tr', 'uk', 'ur', 'vi', 'war', 'zh', 'yue'])
|
||||
|
||||
|
||||
class Wikipedia(ComponentBase, ABC):
|
||||
component_name = "Wikipedia"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return Wikipedia.be_output("")
|
||||
|
||||
try:
|
||||
wiki_res = []
|
||||
wikipedia.set_lang(self._param.language)
|
||||
wiki_engine = wikipedia
|
||||
for wiki_key in wiki_engine.search(ans, results=self._param.top_n):
|
||||
page = wiki_engine.page(title=wiki_key, auto_suggest=False)
|
||||
wiki_res.append({"content": '<a href="' + page.url + '">' + page.title + '</a> ' + page.summary})
|
||||
except Exception as e:
|
||||
return Wikipedia.be_output("**ERROR**: " + str(e))
|
||||
|
||||
if not wiki_res:
|
||||
return Wikipedia.be_output("")
|
||||
|
||||
df = pd.DataFrame(wiki_res)
|
||||
logging.debug(f"df: {df}")
|
||||
return df
|
||||
@ -1,84 +0,0 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from abc import ABC
|
||||
import pandas as pd
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
import yfinance as yf
|
||||
|
||||
|
||||
class YahooFinanceParam(ComponentParamBase):
|
||||
"""
|
||||
Define the YahooFinance component parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.info = True
|
||||
self.history = False
|
||||
self.count = False
|
||||
self.financials = False
|
||||
self.income_stmt = False
|
||||
self.balance_sheet = False
|
||||
self.cash_flow_statement = False
|
||||
self.news = True
|
||||
|
||||
def check(self):
|
||||
self.check_boolean(self.info, "get all stock info")
|
||||
self.check_boolean(self.history, "get historical market data")
|
||||
self.check_boolean(self.count, "show share count")
|
||||
self.check_boolean(self.financials, "show financials")
|
||||
self.check_boolean(self.income_stmt, "income statement")
|
||||
self.check_boolean(self.balance_sheet, "balance sheet")
|
||||
self.check_boolean(self.cash_flow_statement, "cash flow statement")
|
||||
self.check_boolean(self.news, "show news")
|
||||
|
||||
|
||||
class YahooFinance(ComponentBase, ABC):
|
||||
component_name = "YahooFinance"
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
ans = self.get_input()
|
||||
ans = "".join(ans["content"]) if "content" in ans else ""
|
||||
if not ans:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
yohoo_res = []
|
||||
try:
|
||||
msft = yf.Ticker(ans)
|
||||
if self._param.info:
|
||||
yohoo_res.append({"content": "info:\n" + pd.Series(msft.info).to_markdown() + "\n"})
|
||||
if self._param.history:
|
||||
yohoo_res.append({"content": "history:\n" + msft.history().to_markdown() + "\n"})
|
||||
if self._param.financials:
|
||||
yohoo_res.append({"content": "calendar:\n" + pd.DataFrame(msft.calendar).to_markdown() + "\n"})
|
||||
if self._param.balance_sheet:
|
||||
yohoo_res.append({"content": "balance sheet:\n" + msft.balance_sheet.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly balance sheet:\n" + msft.quarterly_balance_sheet.to_markdown() + "\n"})
|
||||
if self._param.cash_flow_statement:
|
||||
yohoo_res.append({"content": "cash flow statement:\n" + msft.cashflow.to_markdown() + "\n"})
|
||||
yohoo_res.append(
|
||||
{"content": "quarterly cash flow statement:\n" + msft.quarterly_cashflow.to_markdown() + "\n"})
|
||||
if self._param.news:
|
||||
yohoo_res.append({"content": "news:\n" + pd.DataFrame(msft.news).to_markdown() + "\n"})
|
||||
except Exception:
|
||||
logging.exception("YahooFinance got exception")
|
||||
|
||||
if not yohoo_res:
|
||||
return YahooFinance.be_output("")
|
||||
|
||||
return pd.DataFrame(yohoo_res)
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
726
agent/templates/advanced_ingestion_pipeline.json
Normal file
726
agent/templates/advanced_ingestion_pipeline.json
Normal file
File diff suppressed because one or more lines are too long
421
agent/templates/choose_your_knowledge_base_agent.json
Normal file
421
agent/templates/choose_your_knowledge_base_agent.json
Normal file
File diff suppressed because one or more lines are too long
439
agent/templates/choose_your_knowledge_base_workflow.json
Normal file
439
agent/templates/choose_your_knowledge_base_workflow.json
Normal file
File diff suppressed because one or more lines are too long
493
agent/templates/chunk_summary.json
Normal file
493
agent/templates/chunk_summary.json
Normal file
File diff suppressed because one or more lines are too long
802
agent/templates/customer_review_analysis.json
Normal file
802
agent/templates/customer_review_analysis.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
885
agent/templates/customer_support.json
Normal file
885
agent/templates/customer_support.json
Normal file
File diff suppressed because one or more lines are too long
427
agent/templates/cv_analysis_and_candidate_evaluation.json
Normal file
427
agent/templates/cv_analysis_and_candidate_evaluation.json
Normal file
File diff suppressed because one or more lines are too long
852
agent/templates/deep_research.json
Normal file
852
agent/templates/deep_research.json
Normal file
File diff suppressed because one or more lines are too long
853
agent/templates/deep_search_r.json
Normal file
853
agent/templates/deep_search_r.json
Normal file
File diff suppressed because one or more lines are too long
1054
agent/templates/ecommerce_customer_service_workflow.json
Normal file
1054
agent/templates/ecommerce_customer_service_workflow.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
906
agent/templates/generate_SEO_blog.json
Normal file
906
agent/templates/generate_SEO_blog.json
Normal file
@ -0,0 +1,906 @@
|
||||
{
|
||||
"id": 8,
|
||||
"title": {
|
||||
"en": "Generate SEO Blog",
|
||||
"zh": "生成SEO博客"},
|
||||
"description": {
|
||||
"en": "This is a multi-agent version of the SEO blog generation workflow. It simulates a small team of AI “writers”, where each agent plays a specialized role — just like a real editorial team.",
|
||||
"zh": "多智能体架构可根据简单的用户输入自动生成完整的SEO博客文章。模拟小型“作家”团队,其中每个智能体扮演一个专业角色——就像真正的编辑团队。"},
|
||||
"canvas_type": "Agent",
|
||||
"dsl": {
|
||||
"components": {
|
||||
"Agent:LuckyApplesGrab": {
|
||||
"downstream": [
|
||||
"Message:ModernSwansThrow"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The user query is {sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Lead Agent**, responsible for initiating the multi-agent SEO blog generation process. You will receive the user\u2019s topic and blog goal, interpret the intent, and coordinate the downstream writing agents.\n\n# Goals\n\n1. Parse the user's initial input.\n\n2. Generate a high-level blog intent summary and writing plan.\n\n3. Provide clear instructions to the following Sub_Agents:\n\n - `Outline Agent` \u2192 Create the blog outline.\n\n - `Body Agent` \u2192 Write all sections based on outline.\n\n - `Editor Agent` \u2192 Polish and finalize the blog post.\n\n4. Merge outputs into a complete, readable blog draft in Markdown format.\n\n# Input\n\nYou will receive:\n\n- Blog topic\n\n- Target audience\n\n- Blog goal (e.g., SEO, education, product marketing)\n\n# Output Format\n\n```markdown\n\n## Parsed Writing Plan\n\n- **Topic**: [Extracted from user input]\n\n- **Audience**: [Summarized from user input]\n\n- **Intent**: [Inferred goal and style]\n\n- **Blog Type**: [e.g., Tutorial / Informative Guide / Marketing Content]\n\n- **Long-tail Keywords**: \n\n - keyword 1\n\n - keyword 2\n\n - keyword 3\n\n - ...\n\n## Instructions for Outline Agent\n\nPlease generate a structured outline including H2 and H3 headings. Assign 1\u20132 relevant keywords to each section. Keep it aligned with the user\u2019s intent and audience level.\n\n## Instructions for Body Agent\n\nWrite the full content based on the outline. Each section should be concise (500\u2013600 words), informative, and optimized for SEO. Use `Tavily Search` only when additional examples or context are needed.\n\n## Instructions for Editor Agent\n\nReview and refine the combined content. Improve transitions, ensure keyword integration, and add a meta title + meta description. Maintain Markdown formatting.\n\n\n## Guides\n\n- Do not generate blog content directly.\n\n- Focus on correct intent recognition and instruction generation.\n\n- Keep communication to downstream agents simple, scoped, and accurate.\n\n\n## Input Examples (and how to handle them)\n\nInput: \"I want to write about RAGFlow.\"\n\u2192 Output: Informative Guide, Audience: AI developers, Intent: explain what RAGFlow is and its use cases\n\nInput: \"Need a blog to promote our prompt design tool.\"\n\u2192 Output: Marketing Content, Audience: product managers or tool adopters, Intent: raise awareness and interest in the product\n\nInput: \"How to get more Google traffic using AI\"\n\u2192 Output: How-to, Audience: SEO marketers, Intent: guide readers on applying AI for SEO growth",
|
||||
"temperature": "0.1",
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "Agent",
|
||||
"id": "Agent:SlickSpidersTurn",
|
||||
"name": "Outline Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "Generates a clear and SEO-friendly blog outline using H2/H3 headings based on the topic, audience, and intent provided by the lead agent. Each section includes suggested keywords for optimized downstream writing.\n",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.3,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 2,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Balance",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.2,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "{sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Outline Agent**, a sub-agent in a multi-agent SEO blog writing system. You operate under the instruction of the `Lead Agent`, and your sole responsibility is to create a clear, well-structured, and SEO-optimized blog outline.\n\n# Tool Access:\n\n- You have access to a search tool called `Tavily Search`.\n\n- If you are unsure how to structure a section, you may call this tool to search for related blog outlines or content from Google.\n\n- Do not overuse it. Your job is to extract **structure**, not to write paragraphs.\n\n\n# Goals\n\n1. Create a well-structured outline with appropriate H2 and H3 headings.\n\n2. Ensure logical flow from introduction to conclusion.\n\n3. Assign 1\u20132 suggested long-tail keywords to each major section for SEO alignment.\n\n4. Make the structure suitable for downstream paragraph writing.\n\n\n\n\n#Note\n\n- Use concise, scannable section titles.\n\n- Do not write full paragraphs.\n\n- Prioritize clarity, logical progression, and SEO alignment.\n\n\n\n- If the blog type is \u201cTutorial\u201d or \u201cHow-to\u201d, include step-based sections.\n\n\n# Input\n\nYou will receive:\n\n- Writing Type (e.g., Tutorial, Informative Guide)\n\n- Target Audience\n\n- User Intent Summary\n\n- 3\u20135 long-tail keywords\n\n\nUse this information to design a structure that both informs readers and maximizes search engine visibility.\n\n# Output Format\n\n```markdown\n\n## Blog Title (suggested)\n\n[Give a short, SEO-friendly title suggestion]\n\n## Outline\n\n### Introduction\n\n- Purpose of the article\n\n- Brief context\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 1]\n\n- [Short description of what this section will cover]\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 2]\n\n- [Short description of what this section will cover]\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 3]\n\n- [Optional H3 Subsection Title A]\n\n - [Explanation of sub-point]\n\n- [Optional H3 Subsection Title B]\n\n - [Explanation of sub-point]\n\n- **Suggested keywords**: [keyword1]\n\n### Conclusion\n\n- Recap key takeaways\n\n- Optional CTA (Call to Action)\n\n- **Suggested keywords**: [keyword3]\n\n",
|
||||
"temperature": 0.5,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "TavilySearch",
|
||||
"name": "TavilySearch",
|
||||
"params": {
|
||||
"api_key": "",
|
||||
"days": 7,
|
||||
"exclude_domains": [],
|
||||
"include_answer": false,
|
||||
"include_domains": [],
|
||||
"include_image_descriptions": false,
|
||||
"include_images": false,
|
||||
"include_raw_content": true,
|
||||
"max_results": 5,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"query": "sys.query",
|
||||
"search_depth": "basic",
|
||||
"topic": "general"
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.85,
|
||||
"user_prompt": "This is the order you need to send to the agent.",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"component_name": "Agent",
|
||||
"id": "Agent:IcyPawsRescue",
|
||||
"name": "Body Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "Writes the full blog content section-by-section following the outline structure. It integrates target keywords naturally and uses Tavily Search only when additional facts or examples are needed.\n",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "{sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Body Agent**, a sub-agent in a multi-agent SEO blog writing system. You operate under the instruction of the `Lead Agent`, and your job is to write the full blog content based on the outline created by the `OutlineWriter_Agent`.\n\n\n\n# Tool Access:\n\nYou can use the `Tavily Search` tool to retrieve relevant content, statistics, or examples to support each section you're writing.\n\nUse it **only** when the provided outline lacks enough information, or if the section requires factual grounding.\n\nAlways cite the original link or indicate source where possible.\n\n\n# Goals\n\n1. Write each section (based on H2/H3 structure) as a complete and natural blog paragraph.\n\n2. Integrate the suggested long-tail keywords naturally into each section.\n\n3. When appropriate, use the `Tavily Search` tool to enrich your writing with relevant facts, examples, or quotes.\n\n4. Ensure each section is clear, engaging, and informative, suitable for both human readers and search engines.\n\n\n# Style Guidelines\n\n- Write in a tone appropriate to the audience. Be explanatory, not promotional, unless it's a marketing blog.\n\n- Avoid generic filler content. Prioritize clarity, structure, and value.\n\n- Ensure SEO keywords are embedded seamlessly, not forcefully.\n\n\n\n- Maintain writing rhythm. Vary sentence lengths. Use transitions between ideas.\n\n\n# Input\n\n\nYou will receive:\n\n- Blog title\n\n- Structured outline (including section titles, keywords, and descriptions)\n\n- Target audience\n\n- Blog type and user intent\n\nYou must **follow the outline strictly**. Write content **section-by-section**, based on the structure.\n\n\n# Output Format\n\n```markdown\n\n## H2: [Section Title]\n\n[Your generated content for this section \u2014 500-600 words, using keywords naturally.]\n\n",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "TavilySearch",
|
||||
"name": "TavilySearch",
|
||||
"params": {
|
||||
"api_key": "",
|
||||
"days": 7,
|
||||
"exclude_domains": [],
|
||||
"include_answer": false,
|
||||
"include_domains": [],
|
||||
"include_image_descriptions": false,
|
||||
"include_images": false,
|
||||
"include_raw_content": true,
|
||||
"max_results": 5,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"query": "sys.query",
|
||||
"search_depth": "basic",
|
||||
"topic": "general"
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "This is the order you need to send to the agent.",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"component_name": "Agent",
|
||||
"id": "Agent:TenderAdsAllow",
|
||||
"name": "Editor Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "Polishes and finalizes the entire blog post. Enhances clarity, checks keyword usage, improves flow, and generates a meta title and description for SEO. Operates after all sections are completed.\n\n",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 2,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "{sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Editor Agent**, the final agent in a multi-agent SEO blog writing workflow. You are responsible for finalizing the blog post for both human readability and SEO effectiveness.\n\n# Goals\n\n1. Polish the entire blog content for clarity, coherence, and style.\n\n2. Improve transitions between sections, ensure logical flow.\n\n3. Verify that keywords are used appropriately and effectively.\n\n4. Conduct a lightweight SEO audit \u2014 checking keyword density, structure (H1/H2/H3), and overall searchability.\n\n\n\n## Integration Responsibilities\n\n- Maintain alignment with Lead Agent's original intent and audience\n\n- Preserve the structure and keyword strategy from Outline Agent\n\n- Enhance and polish Body Agent's content without altering core information\n\n# Style Guidelines\n\n- Be precise. Avoid bloated or vague language.\n\n- Maintain an informative and engaging tone, suitable to the target audience.\n\n- Do not remove keywords unless absolutely necessary for clarity.\n\n- Ensure paragraph flow and section continuity.\n\n\n\n# Input\n\nYou will receive:\n\n- Full blog content, written section-by-section\n\n- Original outline with suggested keywords\n\n- Target audience and writing type\n\n# Output Format\n\n```markdown\n\n[The revised, fully polished blog post content goes here.]\n",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "This is the order you need to send to the agent.",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"Message:ModernSwansThrow": {
|
||||
"downstream": [],
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"content": [
|
||||
"{Agent:LuckyApplesGrab@content}"
|
||||
]
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Agent:LuckyApplesGrab"
|
||||
]
|
||||
},
|
||||
"begin": {
|
||||
"downstream": [
|
||||
"Agent:LuckyApplesGrab"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "Hi! I'm your SEO blog assistant.\n\nTo get started, please tell me:\n1. What topic you want the blog to cover\n2. Who is the target audience\n3. What you hope to achieve with this blog (e.g., SEO traffic, teaching beginners, promoting a product)\n"
|
||||
}
|
||||
},
|
||||
"upstream": []
|
||||
}
|
||||
},
|
||||
"globals": {
|
||||
"sys.conversation_turns": 0,
|
||||
"sys.files": [],
|
||||
"sys.query": "",
|
||||
"sys.user_id": ""
|
||||
},
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__beginstart-Agent:LuckyApplesGrabend",
|
||||
"source": "begin",
|
||||
"sourceHandle": "start",
|
||||
"target": "Agent:LuckyApplesGrab",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:LuckyApplesGrabstart-Message:ModernSwansThrowend",
|
||||
"source": "Agent:LuckyApplesGrab",
|
||||
"sourceHandle": "start",
|
||||
"target": "Message:ModernSwansThrow",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:LuckyApplesGrabagentBottom-Agent:SlickSpidersTurnagentTop",
|
||||
"source": "Agent:LuckyApplesGrab",
|
||||
"sourceHandle": "agentBottom",
|
||||
"target": "Agent:SlickSpidersTurn",
|
||||
"targetHandle": "agentTop"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:LuckyApplesGrabagentBottom-Agent:IcyPawsRescueagentTop",
|
||||
"source": "Agent:LuckyApplesGrab",
|
||||
"sourceHandle": "agentBottom",
|
||||
"target": "Agent:IcyPawsRescue",
|
||||
"targetHandle": "agentTop"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:LuckyApplesGrabagentBottom-Agent:TenderAdsAllowagentTop",
|
||||
"source": "Agent:LuckyApplesGrab",
|
||||
"sourceHandle": "agentBottom",
|
||||
"target": "Agent:TenderAdsAllow",
|
||||
"targetHandle": "agentTop"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:SlickSpidersTurntool-Tool:ThreeWallsRingend",
|
||||
"source": "Agent:SlickSpidersTurn",
|
||||
"sourceHandle": "tool",
|
||||
"target": "Tool:ThreeWallsRing",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:IcyPawsRescuetool-Tool:FloppyJokesItchend",
|
||||
"source": "Agent:IcyPawsRescue",
|
||||
"sourceHandle": "tool",
|
||||
"target": "Tool:FloppyJokesItch",
|
||||
"targetHandle": "end"
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "Hi! I'm your SEO blog assistant.\n\nTo get started, please tell me:\n1. What topic you want the blog to cover\n2. Who is the target audience\n3. What you hope to achieve with this blog (e.g., SEO traffic, teaching beginners, promoting a product)\n"
|
||||
},
|
||||
"label": "Begin",
|
||||
"name": "begin"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "begin",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 38.19445084117184,
|
||||
"y": 183.9781832844475
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "left",
|
||||
"targetPosition": "right",
|
||||
"type": "beginNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The user query is {sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Lead Agent**, responsible for initiating the multi-agent SEO blog generation process. You will receive the user\u2019s topic and blog goal, interpret the intent, and coordinate the downstream writing agents.\n\n# Goals\n\n1. Parse the user's initial input.\n\n2. Generate a high-level blog intent summary and writing plan.\n\n3. Provide clear instructions to the following Sub_Agents:\n\n - `Outline Agent` \u2192 Create the blog outline.\n\n - `Body Agent` \u2192 Write all sections based on outline.\n\n - `Editor Agent` \u2192 Polish and finalize the blog post.\n\n4. Merge outputs into a complete, readable blog draft in Markdown format.\n\n# Input\n\nYou will receive:\n\n- Blog topic\n\n- Target audience\n\n- Blog goal (e.g., SEO, education, product marketing)\n\n# Output Format\n\n```markdown\n\n## Parsed Writing Plan\n\n- **Topic**: [Extracted from user input]\n\n- **Audience**: [Summarized from user input]\n\n- **Intent**: [Inferred goal and style]\n\n- **Blog Type**: [e.g., Tutorial / Informative Guide / Marketing Content]\n\n- **Long-tail Keywords**: \n\n - keyword 1\n\n - keyword 2\n\n - keyword 3\n\n - ...\n\n## Instructions for Outline Agent\n\nPlease generate a structured outline including H2 and H3 headings. Assign 1\u20132 relevant keywords to each section. Keep it aligned with the user\u2019s intent and audience level.\n\n## Instructions for Body Agent\n\nWrite the full content based on the outline. Each section should be concise (500\u2013600 words), informative, and optimized for SEO. Use `Tavily Search` only when additional examples or context are needed.\n\n## Instructions for Editor Agent\n\nReview and refine the combined content. Improve transitions, ensure keyword integration, and add a meta title + meta description. Maintain Markdown formatting.\n\n\n## Guides\n\n- Do not generate blog content directly.\n\n- Focus on correct intent recognition and instruction generation.\n\n- Keep communication to downstream agents simple, scoped, and accurate.\n\n\n## Input Examples (and how to handle them)\n\nInput: \"I want to write about RAGFlow.\"\n\u2192 Output: Informative Guide, Audience: AI developers, Intent: explain what RAGFlow is and its use cases\n\nInput: \"Need a blog to promote our prompt design tool.\"\n\u2192 Output: Marketing Content, Audience: product managers or tool adopters, Intent: raise awareness and interest in the product\n\nInput: \"How to get more Google traffic using AI\"\n\u2192 Output: How-to, Audience: SEO marketers, Intent: guide readers on applying AI for SEO growth",
|
||||
"temperature": "0.1",
|
||||
"temperatureEnabled": true,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Lead Agent"
|
||||
},
|
||||
"id": "Agent:LuckyApplesGrab",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 350,
|
||||
"y": 200
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"content": [
|
||||
"{Agent:LuckyApplesGrab@content}"
|
||||
]
|
||||
},
|
||||
"label": "Message",
|
||||
"name": "Response"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Message:ModernSwansThrow",
|
||||
"measured": {
|
||||
"height": 56,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 669.394830760932,
|
||||
"y": 190.72421137520644
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "messageNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "Generates a clear and SEO-friendly blog outline using H2/H3 headings based on the topic, audience, and intent provided by the lead agent. Each section includes suggested keywords for optimized downstream writing.\n",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.3,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 2,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Balance",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.2,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "{sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Outline Agent**, a sub-agent in a multi-agent SEO blog writing system. You operate under the instruction of the `Lead Agent`, and your sole responsibility is to create a clear, well-structured, and SEO-optimized blog outline.\n\n# Tool Access:\n\n- You have access to a search tool called `Tavily Search`.\n\n- If you are unsure how to structure a section, you may call this tool to search for related blog outlines or content from Google.\n\n- Do not overuse it. Your job is to extract **structure**, not to write paragraphs.\n\n\n# Goals\n\n1. Create a well-structured outline with appropriate H2 and H3 headings.\n\n2. Ensure logical flow from introduction to conclusion.\n\n3. Assign 1\u20132 suggested long-tail keywords to each major section for SEO alignment.\n\n4. Make the structure suitable for downstream paragraph writing.\n\n\n\n\n#Note\n\n- Use concise, scannable section titles.\n\n- Do not write full paragraphs.\n\n- Prioritize clarity, logical progression, and SEO alignment.\n\n\n\n- If the blog type is \u201cTutorial\u201d or \u201cHow-to\u201d, include step-based sections.\n\n\n# Input\n\nYou will receive:\n\n- Writing Type (e.g., Tutorial, Informative Guide)\n\n- Target Audience\n\n- User Intent Summary\n\n- 3\u20135 long-tail keywords\n\n\nUse this information to design a structure that both informs readers and maximizes search engine visibility.\n\n# Output Format\n\n```markdown\n\n## Blog Title (suggested)\n\n[Give a short, SEO-friendly title suggestion]\n\n## Outline\n\n### Introduction\n\n- Purpose of the article\n\n- Brief context\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 1]\n\n- [Short description of what this section will cover]\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 2]\n\n- [Short description of what this section will cover]\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 3]\n\n- [Optional H3 Subsection Title A]\n\n - [Explanation of sub-point]\n\n- [Optional H3 Subsection Title B]\n\n - [Explanation of sub-point]\n\n- **Suggested keywords**: [keyword1]\n\n### Conclusion\n\n- Recap key takeaways\n\n- Optional CTA (Call to Action)\n\n- **Suggested keywords**: [keyword3]\n\n",
|
||||
"temperature": 0.5,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "TavilySearch",
|
||||
"name": "TavilySearch",
|
||||
"params": {
|
||||
"api_key": "",
|
||||
"days": 7,
|
||||
"exclude_domains": [],
|
||||
"include_answer": false,
|
||||
"include_domains": [],
|
||||
"include_image_descriptions": false,
|
||||
"include_images": false,
|
||||
"include_raw_content": true,
|
||||
"max_results": 5,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"query": "sys.query",
|
||||
"search_depth": "basic",
|
||||
"topic": "general"
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.85,
|
||||
"user_prompt": "This is the order you need to send to the agent.",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Outline Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:SlickSpidersTurn",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 100.60137004146719,
|
||||
"y": 411.67654846431367
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "Writes the full blog content section-by-section following the outline structure. It integrates target keywords naturally and uses Tavily Search only when additional facts or examples are needed.\n",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "{sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Body Agent**, a sub-agent in a multi-agent SEO blog writing system. You operate under the instruction of the `Lead Agent`, and your job is to write the full blog content based on the outline created by the `OutlineWriter_Agent`.\n\n\n\n# Tool Access:\n\nYou can use the `Tavily Search` tool to retrieve relevant content, statistics, or examples to support each section you're writing.\n\nUse it **only** when the provided outline lacks enough information, or if the section requires factual grounding.\n\nAlways cite the original link or indicate source where possible.\n\n\n# Goals\n\n1. Write each section (based on H2/H3 structure) as a complete and natural blog paragraph.\n\n2. Integrate the suggested long-tail keywords naturally into each section.\n\n3. When appropriate, use the `Tavily Search` tool to enrich your writing with relevant facts, examples, or quotes.\n\n4. Ensure each section is clear, engaging, and informative, suitable for both human readers and search engines.\n\n\n# Style Guidelines\n\n- Write in a tone appropriate to the audience. Be explanatory, not promotional, unless it's a marketing blog.\n\n- Avoid generic filler content. Prioritize clarity, structure, and value.\n\n- Ensure SEO keywords are embedded seamlessly, not forcefully.\n\n\n\n- Maintain writing rhythm. Vary sentence lengths. Use transitions between ideas.\n\n\n# Input\n\n\nYou will receive:\n\n- Blog title\n\n- Structured outline (including section titles, keywords, and descriptions)\n\n- Target audience\n\n- Blog type and user intent\n\nYou must **follow the outline strictly**. Write content **section-by-section**, based on the structure.\n\n\n# Output Format\n\n```markdown\n\n## H2: [Section Title]\n\n[Your generated content for this section \u2014 500-600 words, using keywords naturally.]\n\n",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "TavilySearch",
|
||||
"name": "TavilySearch",
|
||||
"params": {
|
||||
"api_key": "",
|
||||
"days": 7,
|
||||
"exclude_domains": [],
|
||||
"include_answer": false,
|
||||
"include_domains": [],
|
||||
"include_image_descriptions": false,
|
||||
"include_images": false,
|
||||
"include_raw_content": true,
|
||||
"max_results": 5,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"query": "sys.query",
|
||||
"search_depth": "basic",
|
||||
"topic": "general"
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "This is the order you need to send to the agent.",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Body Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:IcyPawsRescue",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 439.3374395738501,
|
||||
"y": 366.1408588516909
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "Polishes and finalizes the entire blog post. Enhances clarity, checks keyword usage, improves flow, and generates a meta title and description for SEO. Operates after all sections are completed.\n\n",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 2,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "{sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Editor Agent**, the final agent in a multi-agent SEO blog writing workflow. You are responsible for finalizing the blog post for both human readability and SEO effectiveness.\n\n# Goals\n\n1. Polish the entire blog content for clarity, coherence, and style.\n\n2. Improve transitions between sections, ensure logical flow.\n\n3. Verify that keywords are used appropriately and effectively.\n\n4. Conduct a lightweight SEO audit \u2014 checking keyword density, structure (H1/H2/H3), and overall searchability.\n\n\n\n## Integration Responsibilities\n\n- Maintain alignment with Lead Agent's original intent and audience\n\n- Preserve the structure and keyword strategy from Outline Agent\n\n- Enhance and polish Body Agent's content without altering core information\n\n# Style Guidelines\n\n- Be precise. Avoid bloated or vague language.\n\n- Maintain an informative and engaging tone, suitable to the target audience.\n\n- Do not remove keywords unless absolutely necessary for clarity.\n\n- Ensure paragraph flow and section continuity.\n\n\n\n# Input\n\nYou will receive:\n\n- Full blog content, written section-by-section\n\n- Original outline with suggested keywords\n\n- Target audience and writing type\n\n# Output Format\n\n```markdown\n\n[The revised, fully polished blog post content goes here.]\n",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "This is the order you need to send to the agent.",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Editor Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:TenderAdsAllow",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 730.8513124709204,
|
||||
"y": 327.351197329827
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"description": "This is an agent for a specific task.",
|
||||
"user_prompt": "This is the order you need to send to the agent."
|
||||
},
|
||||
"label": "Tool",
|
||||
"name": "flow.tool_0"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Tool:ThreeWallsRing",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -26.93431957115564,
|
||||
"y": 531.4384641920368
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "toolNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"description": "This is an agent for a specific task.",
|
||||
"user_prompt": "This is the order you need to send to the agent."
|
||||
},
|
||||
"label": "Tool",
|
||||
"name": "flow.tool_1"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Tool:FloppyJokesItch",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 414.6786783453011,
|
||||
"y": 499.39483076093194
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "toolNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "This is a multi-agent version of the SEO blog generation workflow. It simulates a small team of AI \u201cwriters\u201d, where each agent plays a specialized role \u2014 just like a real editorial team.\n\nInstead of one AI doing everything in order, this version uses a **Lead Agent** to assign tasks to different sub-agents, who then write and edit the blog in parallel. The Lead Agent manages everything and produces the final output.\n\n### Why use multi-agent format?\n\n- Better control over each stage of writing \n- Easier to reuse agents across tasks \n- More human-like workflow (planning \u2192 writing \u2192 editing \u2192 publishing) \n- Easier to scale and customize for advanced users\n\n### Flow Summary:\n\n1. `LeadWriter_Agent` takes your input and creates a plan\n2. It sends that plan to:\n - `OutlineWriter_Agent`: build blog structure\n - `BodyWriter_Agent`: write full content\n - `FinalEditor_Agent`: polish and finalize\n3. `LeadWriter_Agent` collects all results and outputs the final blog post\n"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Workflow Overall Description"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 208,
|
||||
"id": "Note:ElevenVansInvent",
|
||||
"measured": {
|
||||
"height": 208,
|
||||
"width": 518
|
||||
},
|
||||
"position": {
|
||||
"x": -336.6586460874556,
|
||||
"y": 113.43253511344867
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 518
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "**Purpose**: \nThis is the central agent that controls the entire writing process.\n\n**What it does**:\n- Reads your blog topic and intent\n- Generates a clear writing plan (topic, audience, goal, keywords)\n- Sends instructions to all sub-agents\n- Waits for their responses and checks quality\n- If any section is missing or weak, it can request a rewrite\n- Finally, it assembles all parts into a complete blog and sends it back to you\n"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Lead Agent"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 146,
|
||||
"id": "Note:EmptyClubsGreet",
|
||||
"measured": {
|
||||
"height": 146,
|
||||
"width": 334
|
||||
},
|
||||
"position": {
|
||||
"x": 390.1408623279084,
|
||||
"y": 2.6521144030202493
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 334
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "**Purpose**: \nThis agent is responsible for building the blog's structure. It creates an outline that shows what the article will cover and how it's organized.\n\n**What it does**:\n- Suggests a blog title that matches the topic and keywords \n- Breaks the article into sections using H2 and H3 headers \n- Adds a short description of what each section should include \n- Assigns SEO keywords to each section for better search visibility \n- Uses search data (via Tavily Search) to find how similar blogs are structured"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Outline Agent"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 157,
|
||||
"id": "Note:CurlyTigersDouble",
|
||||
"measured": {
|
||||
"height": 157,
|
||||
"width": 394
|
||||
},
|
||||
"position": {
|
||||
"x": -60.03139680691618,
|
||||
"y": 595.8208080534818
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 394
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "**Purpose**: \nThis agent is in charge of writing the full blog content, section by section, based on the outline it receives.\n\n**What it does**:\n- Takes each section heading from the outline (H2 / H3)\n- Writes a complete paragraph (150\u2013220 words) under each section\n- Naturally includes the keywords provided for that section\n- Uses the Tavily Search tool to add real-world examples, definitions, or facts if needed\n- Makes sure each section is clear, useful, and easy to read\n"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Body Agent"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 164,
|
||||
"id": "Note:StrongKingsCamp",
|
||||
"measured": {
|
||||
"height": 164,
|
||||
"width": 408
|
||||
},
|
||||
"position": {
|
||||
"x": 446.54943226110845,
|
||||
"y": 590.9443887062529
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 408
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "**Purpose**: \nThis agent reviews, polishes, and finalizes the blog post written by the BodyWriter_Agent. It ensures everything is clean, smooth, and SEO-compliant.\n\n**What it does**:\n- Improves grammar, sentence flow, and transitions \n- Makes sure the content reads naturally and professionally \n- Checks whether keywords are present and well integrated (but not overused) \n- Verifies that the structure follows the correct H1/H2/H3 format \n"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Editor Agent"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 147,
|
||||
"id": "Note:OpenOttersShow",
|
||||
"measured": {
|
||||
"height": 147,
|
||||
"width": 357
|
||||
},
|
||||
"position": {
|
||||
"x": 976.6858726228803,
|
||||
"y": 422.7404806291804
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 357
|
||||
}
|
||||
]
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"retrieval": []
|
||||
},
|
||||
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAAwADADASIAAhEBAxEB/8QAGQAAAwEBAQAAAAAAAAAAAAAABgkKBwUI/8QAMBAAAAYCAQIEBQQCAwAAAAAAAQIDBAUGBxEhCAkAEjFBFFFhcaETFiKRFyOx8PH/xAAaAQACAwEBAAAAAAAAAAAAAAACAwABBgQF/8QALBEAAgIBAgUCBAcAAAAAAAAAAQIDBBEFEgATITFRIkEGIzJhFBUWgaGx8P/aAAwDAQACEQMRAD8AfF2hez9089t7pvxgQMa1Gb6qZ6oQE9m/NEvCIStyPfJSOF/M1epzMugo/qtMqbiRc1mJjoJKCLMNIxKcsLJedfO1Ct9cI63x9fx6CA/19t+oh4LFA5HfuAgP/A8eOIsnsTBrkBHXA7+v53+Q+ficTgJft9gIgA+/P9/1r342O/YA8A8k3/if+IbAN7+2/f8AAiI6H19PGoPyESTMZQPKUAHkQEN+3r9dh78/YPGUTk2wb/qAZZIugH1OHH5DjkdfbnWw2DsOxPj+xjrnx2H39unBopJGBn9s+PHv1HXjPJtH+J+B40O9a16h/wB/92j/ALrPa/wR104UyAobHlXhuo2HrEtK4qy3CwjKOuJLRHJLSkXWrFKs/gVrJVrE8TUiH8bPrP20UEu8m4hNpMJJuTOfnbUw/kUqyZgMHGjAO9+mtDsQ53sdcB6eMhnpEjhNQxRKICAgHy5+/roOdjr7c+J6O4x07dx484/n7nzw1gexBGfIPkZ/3t39uGpqc6+fP5/Ht8vGFZCzJjWpWuBxvO2yPjrtclUUK7BqmUI4fuASeyhG5FzFI0Bw4aQ0iZNoDgzvRW4qtyFkI4XmwyEk2YNnDp0sVBu3IUyy5iqH8gqKERSIRNIii67hddRJs1at01Xbx2sgzZoLu10UFJR+4V1A5cxF3FqNcLvjwcno43uuLrOxZYjujaClcb4QQfxEizpFiQyM9olcueRnjC2ZMt9iY06zL0qytrMSqSOVGsfHMaGhZ3l4lSRI2MqE74zJvRTveNFWWIh3RWw+XCAM5icKQLrCH57T17FhErSlRXnWvyZXKQwWJ3eraD14p5YuZCFgacskK2oGkVuKO5GYTHzf7DaD12cBD3DgPOIDrWw9PnrXPgDkpVsUDGMG+DD6E9gHXIjrYjwUPQTCXYgHPhIV974+F6E1hpC14Yzmzj56YaQEeZhXsayD1zLPW7pygxaMf81Nzu1iJsnIuDIKnaJAkPldqrHaoORZ73tMVEbFdSXT9nVgRQgnBq6j8e/HCIEATpAnH5KlmRVkFRFJwks/bqImSXJ5VFyA3N6Ikh3bCW3YHp5cowOmCfTgA+xJCnrjtwHKcLvJj2ZGcTRFj19kEhckdzgEjKnABGSSzdc1Fe5byXXGNjKdvRcw5NxvLidNZFFCxUa62KrzMaChw8hhYScFJtROAgmuLByq1MsgkZYPaVVuDe0wraRaqAdJwgRQo+YR8xTlAQNx6b49w41vXiJpCalLh1jZhyrTqRM4+jstdRmYryNkydLQRWg1LNGcWd5jIFFvCythlIySa0mNu74sKRQtaWsTmupqPItw0lE52ufpyYzrSkx6cw5bLmBEpkTsz+dt8P5QFuCRtAIkBH9MuwKHICIaDQhnojMs9mKaeGcrMxXlQtAYkdVljimRrE5MqI4zL8oSqQ6wxjodBqK05qdK3Vo3aCSVkBW7bjuC1NFJJBPaqyx6fp6pWkliYLXK2XrukkRu2CCVoSWMgsdMyySKwoLFcIGWSTUMg4IBgTcICoBhRcplMcpFkhIqQp1ClMBTmA0Zfe1zpjvHfXff65bZlzXpB3jjGTgiirmPjAfs16PHqHeQ75Wbj3xxZpOEkV3LRJJSPdomUBZISJLncV2k+8D07dxXp7xsYuTapA9UkJUYWIzNhadnWEZeCXGLQQiJi1ViHfhHL2unWh+mlORsrW0JFpEFnGVfm1mU4kq0FY3eD6corJncv6dr5NLSMNXVaTUksjTiMnaq8uFfSVuDyiJ1iZpy0LOJtpa3YfkcQ5fdozyxI2m5qqcrHN61YYmHsh6v3o9ParYmYJEtlhIx6+gUbjgD23M6oqg92YL0JyF6Bps+qDValVA9h9Lj5SZI3SHXdEQlj1wiQtLLIe6pGzjO3BlBkK1hxpblLVH5wdW0BcFKf/JwRtjsot2z8omaSdxbzzk1iEjsE0AM9rrRZNRIrVyo7dGO6E+oh8axLlJ5H5VaJKx7ePRGFbW6vUeFfHQIWPTI9Tm7HHfuhqY7E6C7JFqUzM6iZXIoncNxX7+bIVdJnTT48x3OQU1krIDW3UeixVhyISzYz6cadY5Xph6TseRNTRsTElzzBn9Vlly0TAERsdgnMYyLROjyFbg5R4ZlsGaMT4yNi2Zlq1GwjZB3jq0PsaJfA3t0jL0W0Y9xf1V41lpWckXMLaZiwxuKYPqc6LlHdkeRF+Qxswx5ASDqBVrsL+2A/N6SiCbYymV2BywJiMZj3GRRMTnL+lVyHCll3R7Szv0vqXMtQ74T+HijljIScLaEpkKCB3rqMBIi0jPs5JeOKTZMZEi5VVnouzy0k3jXjWSMlY6UcVGDxlKMVDqx91SILWSi3D2KdgYy3kP8E9X/AE1SnRXBNdNRMlefT6g7aY6giK+cPLGNg0bY68rcnpsNh9PqIBve/EcPQ3WIq2dR93xpSgk5SAZ9R6MLAOZFUkpLSUDXp6/KPpGUkmTdswlnKnwbl5ITMdGwcXJi7LKsqzUmT5tWYmkXuF9wjBvb76b7dHheazJ9RElUJOCxViuMlUJC0Gtz6PKyjLBY4qMWUe12r1xZ6lOyT6XPEBKN2CkTDOlZd02TBdTMt7Upx2knrkdCv1UKjDKn1A7XBYH6SCOOrWn5Oi/DtRiu+GleRthDL8rXdVjZlcfWrSIxVlGGGCOnH//Z"
|
||||
}
|
||||
268
agent/templates/image_lingo.json
Normal file
268
agent/templates/image_lingo.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
331
agent/templates/knowledge_base_report.json
Normal file
331
agent/templates/knowledge_base_report.json
Normal file
@ -0,0 +1,331 @@
|
||||
{
|
||||
"id": 20,
|
||||
"title": {
|
||||
"en": "Report Agent Using Knowledge Base",
|
||||
"zh": "知识库检索智能体"},
|
||||
"description": {
|
||||
"en": "A report generation assistant using local knowledge base, with advanced capabilities in task planning, reasoning, and reflective analysis. Recommended for academic research paper Q&A",
|
||||
"zh": "一个使用本地知识库的报告生成助手,具备高级能力,包括任务规划、推理和反思性分析。推荐用于学术研究论文问答。"},
|
||||
"canvas_type": "Agent",
|
||||
"dsl": {
|
||||
"components": {
|
||||
"Agent:NewPumasLick": {
|
||||
"downstream": [
|
||||
"Message:OrangeYearsShine"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "qwen3-235b-a22b-instruct-2507@Tongyi-Qianwen",
|
||||
"maxTokensEnabled": true,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 128000,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "# User Query\n {sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "## Role & Task\nYou are a **\u201cKnowledge Base Retrieval Q\\&A Agent\u201d** whose goal is to break down the user\u2019s question into retrievable subtasks, and then produce a multi-source-verified, structured, and actionable research report using the internal knowledge base.\n## Execution Framework (Detailed Steps & Key Points)\n1. **Assessment & Decomposition**\n * Actions:\n * Automatically extract: main topic, subtopics, entities (people/organizations/products/technologies), time window, geographic/business scope.\n * Output as a list: N facts/data points that must be collected (*N* ranges from 5\u201320 depending on question complexity).\n2. **Query Type Determination (Rule-Based)**\n * Example rules:\n * If the question involves a single issue but requests \u201cmethod comparison/multiple explanations\u201d \u2192 use **depth-first**.\n * If the question can naturally be split into \u22653 independent sub-questions \u2192 use **breadth-first**.\n * If the question can be answered by a single fact/specification/definition \u2192 use **simple query**.\n3. **Research Plan Formulation**\n * Depth-first: define 3\u20135 perspectives (methodology/stakeholders/time dimension/technical route, etc.), assign search keywords, target document types, and output format for each perspective.\n * Breadth-first: list subtasks, prioritize them, and assign search terms.\n * Simple query: directly provide the search sentence and required fields.\n4. **Retrieval Execution**\n * After retrieval: perform coverage check (does it contain the key facts?) and quality check (source diversity, authority, latest update time).\n * If standards are not met, automatically loop: rewrite queries (synonyms/cross-domain terms) and retry \u22643 times, or flag as requiring external search.\n5. **Integration & Reasoning**\n * Build the answer using a **fact\u2013evidence\u2013reasoning** chain. For each conclusion, attach 1\u20132 strongest pieces of evidence.\n---\n## Quality Gate Checklist (Verify at Each Stage)\n* **Stage 1 (Decomposition)**:\n * [ ] Key concepts and expected outputs identified\n * [ ] Required facts/data points listed\n* **Stage 2 (Retrieval)**:\n * [ ] Meets quality standards (see above)\n * [ ] If not met: execute query iteration\n* **Stage 3 (Generation)**:\n * [ ] Each conclusion has at least one direct evidence source\n * [ ] State assumptions/uncertainties\n * [ ] Provide next-step suggestions or experiment/retrieval plans\n * [ ] Final length and depth match user expectations (comply with word count/format if specified)\n---\n## Core Principles\n1. **Strict reliance on the knowledge base**: answers must be **fully bounded** by the content retrieved from the knowledge base.\n2. **No fabrication**: do not generate, infer, or create information that is not explicitly present in the knowledge base.\n3. **Accuracy first**: prefer incompleteness over inaccurate content.\n4. **Output format**:\n * Hierarchically clear modular structure\n * Logical grouping according to the MECE principle\n * Professionally presented formatting\n * Step-by-step cognitive guidance\n * Reasonable use of headings and dividers for clarity\n * *Italicize* key parameters\n * **Bold** critical information\n5. **LaTeX formula requirements**:\n * Inline formulas: start and end with `$`\n * Block formulas: start and end with `$$`, each `$$` on its own line\n * Block formula content must comply with LaTeX math syntax\n * Verify formula correctness\n---\n## Additional Notes (Interaction & Failure Strategy)\n* If the knowledge base does not cover critical facts: explicitly inform the user (with sample wording)\n* For time-sensitive issues: enforce time filtering in the search request, and indicate the latest retrieval date in the answer.\n* Language requirement: answer in the user\u2019s preferred language\n",
|
||||
"temperature": "0.1",
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "Retrieval",
|
||||
"name": "Retrieval",
|
||||
"params": {
|
||||
"cross_languages": [],
|
||||
"description": "",
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"Message:OrangeYearsShine": {
|
||||
"downstream": [],
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"content": [
|
||||
"{Agent:NewPumasLick@content}"
|
||||
]
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Agent:NewPumasLick"
|
||||
]
|
||||
},
|
||||
"begin": {
|
||||
"downstream": [
|
||||
"Agent:NewPumasLick"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "\u4f60\u597d\uff01 \u6211\u662f\u4f60\u7684\u52a9\u7406\uff0c\u6709\u4ec0\u4e48\u53ef\u4ee5\u5e2e\u5230\u4f60\u7684\u5417\uff1f"
|
||||
}
|
||||
},
|
||||
"upstream": []
|
||||
}
|
||||
},
|
||||
"globals": {
|
||||
"sys.conversation_turns": 0,
|
||||
"sys.files": [],
|
||||
"sys.query": "",
|
||||
"sys.user_id": ""
|
||||
},
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__beginstart-Agent:NewPumasLickend",
|
||||
"source": "begin",
|
||||
"sourceHandle": "start",
|
||||
"target": "Agent:NewPumasLick",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:NewPumasLickstart-Message:OrangeYearsShineend",
|
||||
"markerEnd": "logo",
|
||||
"source": "Agent:NewPumasLick",
|
||||
"sourceHandle": "start",
|
||||
"style": {
|
||||
"stroke": "rgba(91, 93, 106, 1)",
|
||||
"strokeWidth": 1
|
||||
},
|
||||
"target": "Message:OrangeYearsShine",
|
||||
"targetHandle": "end",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:NewPumasLicktool-Tool:AllBirdsNailend",
|
||||
"selected": false,
|
||||
"source": "Agent:NewPumasLick",
|
||||
"sourceHandle": "tool",
|
||||
"target": "Tool:AllBirdsNail",
|
||||
"targetHandle": "end"
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "\u4f60\u597d\uff01 \u6211\u662f\u4f60\u7684\u52a9\u7406\uff0c\u6709\u4ec0\u4e48\u53ef\u4ee5\u5e2e\u5230\u4f60\u7684\u5417\uff1f"
|
||||
},
|
||||
"label": "Begin",
|
||||
"name": "begin"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "begin",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -9.569875358221438,
|
||||
"y": 205.84018385864917
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "left",
|
||||
"targetPosition": "right",
|
||||
"type": "beginNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"content": [
|
||||
"{Agent:NewPumasLick@content}"
|
||||
]
|
||||
},
|
||||
"label": "Message",
|
||||
"name": "Response"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Message:OrangeYearsShine",
|
||||
"measured": {
|
||||
"height": 56,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 734.4061285881053,
|
||||
"y": 199.9706031723009
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "messageNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "qwen3-235b-a22b-instruct-2507@Tongyi-Qianwen",
|
||||
"maxTokensEnabled": true,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 128000,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "# User Query\n {sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "## Role & Task\nYou are a **\u201cKnowledge Base Retrieval Q\\&A Agent\u201d** whose goal is to break down the user\u2019s question into retrievable subtasks, and then produce a multi-source-verified, structured, and actionable research report using the internal knowledge base.\n## Execution Framework (Detailed Steps & Key Points)\n1. **Assessment & Decomposition**\n * Actions:\n * Automatically extract: main topic, subtopics, entities (people/organizations/products/technologies), time window, geographic/business scope.\n * Output as a list: N facts/data points that must be collected (*N* ranges from 5\u201320 depending on question complexity).\n2. **Query Type Determination (Rule-Based)**\n * Example rules:\n * If the question involves a single issue but requests \u201cmethod comparison/multiple explanations\u201d \u2192 use **depth-first**.\n * If the question can naturally be split into \u22653 independent sub-questions \u2192 use **breadth-first**.\n * If the question can be answered by a single fact/specification/definition \u2192 use **simple query**.\n3. **Research Plan Formulation**\n * Depth-first: define 3\u20135 perspectives (methodology/stakeholders/time dimension/technical route, etc.), assign search keywords, target document types, and output format for each perspective.\n * Breadth-first: list subtasks, prioritize them, and assign search terms.\n * Simple query: directly provide the search sentence and required fields.\n4. **Retrieval Execution**\n * After retrieval: perform coverage check (does it contain the key facts?) and quality check (source diversity, authority, latest update time).\n * If standards are not met, automatically loop: rewrite queries (synonyms/cross-domain terms) and retry \u22643 times, or flag as requiring external search.\n5. **Integration & Reasoning**\n * Build the answer using a **fact\u2013evidence\u2013reasoning** chain. For each conclusion, attach 1\u20132 strongest pieces of evidence.\n---\n## Quality Gate Checklist (Verify at Each Stage)\n* **Stage 1 (Decomposition)**:\n * [ ] Key concepts and expected outputs identified\n * [ ] Required facts/data points listed\n* **Stage 2 (Retrieval)**:\n * [ ] Meets quality standards (see above)\n * [ ] If not met: execute query iteration\n* **Stage 3 (Generation)**:\n * [ ] Each conclusion has at least one direct evidence source\n * [ ] State assumptions/uncertainties\n * [ ] Provide next-step suggestions or experiment/retrieval plans\n * [ ] Final length and depth match user expectations (comply with word count/format if specified)\n---\n## Core Principles\n1. **Strict reliance on the knowledge base**: answers must be **fully bounded** by the content retrieved from the knowledge base.\n2. **No fabrication**: do not generate, infer, or create information that is not explicitly present in the knowledge base.\n3. **Accuracy first**: prefer incompleteness over inaccurate content.\n4. **Output format**:\n * Hierarchically clear modular structure\n * Logical grouping according to the MECE principle\n * Professionally presented formatting\n * Step-by-step cognitive guidance\n * Reasonable use of headings and dividers for clarity\n * *Italicize* key parameters\n * **Bold** critical information\n5. **LaTeX formula requirements**:\n * Inline formulas: start and end with `$`\n * Block formulas: start and end with `$$`, each `$$` on its own line\n * Block formula content must comply with LaTeX math syntax\n * Verify formula correctness\n---\n## Additional Notes (Interaction & Failure Strategy)\n* If the knowledge base does not cover critical facts: explicitly inform the user (with sample wording)\n* For time-sensitive issues: enforce time filtering in the search request, and indicate the latest retrieval date in the answer.\n* Language requirement: answer in the user\u2019s preferred language\n",
|
||||
"temperature": "0.1",
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "Retrieval",
|
||||
"name": "Retrieval",
|
||||
"params": {
|
||||
"cross_languages": [],
|
||||
"description": "",
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Knowledge Base Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:NewPumasLick",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 347.00048227952215,
|
||||
"y": 186.49109364794631
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"description": "This is an agent for a specific task.",
|
||||
"user_prompt": "This is the order you need to send to the agent."
|
||||
},
|
||||
"label": "Tool",
|
||||
"name": "flow.tool_10"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Tool:AllBirdsNail",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 220.24819746977118,
|
||||
"y": 403.31576836482583
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "toolNode"
|
||||
}
|
||||
]
|
||||
},
|
||||
"history": [],
|
||||
"memory": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"retrieval": []
|
||||
},
|
||||
"avatar": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAH0klEQVR4nO2ZC1BU1wGG/3uRp/IygG+DGK0GOjE1U6cxI4tT03Y0E+kENbaJbKpj60wzgNMwnTjuEtu0miGasY+0krI202kMVEnVxtoOLG00oVa0LajVBDcSEI0REFBgkZv/3GWXfdzdvctuHs7kmzmec9//d+45914XCXc4Xwjk1+59VJGGF7C5QAFSWBvgyWmWLl7IKiny6QNL173B5YjB84bOyrpKA4B1DLySdQpLKAiZGtZ7a/KMVoQJz6UfEZyhTWwaEBmssiLvCueu6BJg8EwFqGTTAC+uvNWC9w82sRWcux/JwaSHstjywcogRt4RG0KExwWG4QsVYCebKSwe3L5lR9OOWjyzfg2WL/0a1/jncO3b2FHxGnKeWYqo+Giu8UEMrWJKWBACPMY/DG+63txhvnKshUu+DF2/hayMDFRsL+VScDb++AVc6OjAuInxXPJl2tfnIikrzUyJMi7qQmLRhOEr2fOFbX/7P6STF7BqoWevfdij4NWGQfx+57OYO2sG1wSnsek8Nm15EU8sikF6ouelXz9ph7JwDqYt+5IIZaGEkauDIrH4wPBmhjexCSEws+VdVG1M4NIoj+2xYzBuJtavWcEl/VS8dggx/ZdQvcGzQwp+cxOXsu5RBQQMVkYJM4LA/Txh+ELFMWFVPARS5kFiabZdx8Olh7l17BzdvhzZmROhdJ3j6D/nIyBgOCMlLAgA9xmF4TMV4BSbrgnrLiBl5rOsRCRRbDUsBzQFiJjY91PCBj9w+yiP1lXWsTLAjc9YQGB9I8+Yx1oTiUWFvW9QgDo2PdASaDp/EQ8/sRnhcPTVcuTMncXwQQVESL9DidscaPW+QEtAICRu9PSxFTpJiePV8AI9AsTvXZBY/Pa+wJ9ApNApIILm8S5Y4QXXQwhYFH6csemDP4G3G5v579i5d04mknknQhDYS4HCrCVr/mC3D305KnbCEpvVIia5Onw6WaWw+KAl0Np+FUXbdiMcyoqfUoeRHoFrJ1uRtnBG1/9Mf/3LtElp+VwF2wcd7woJib1vUPwMH4GWQCQJJtBa/V9cPmFD8uQUpMdNGDhY8bNYrobh8acHu270/l0ImJWRt64Wn6WACN9z5gq2lXwPW8pfweT0icP/fH23vO9QLYq3/QKyLBmFQI3CUcT9NdESEEPItKsSN3r7MBaSJoxHWZERM6ZmMLy2gDP8/pd/og418dTL37hFSUpMUC5f+UiWZcnY9s5+ixCwUiCXx2iiJdDNx6f4pgkH8Q3lbxK7h8+enoHha1cRNdMp8axiHxo6+/5bVdk8DSROYIW1X7QEIom3wHD3gEf4vu1bVYEJZeWQ0zJQvmcfyiv2QZak6raG/QWfK4Ez9mTc5v8xPMJfuojoxXmIX/9DOMe+FCWbcHu4BJJ0YEwCx0824bFNW9HesB+CqYu+jepfPYcHF+aoPXS8sQl/+vU2bgmOU2C+qRc9/YrrPPbGBtzavd0nvCxLxui4pJrBm911PFwak4CYA80cj+JCAiGUzYkmxrSY4N2c3GLi6UEIFL/wRxxqkhmHnTEpDQcrfq6ea+hcE8bNy3GFzyq4H22HW1Kd4WMSkg1jmsSRpKj0Rzhy4gNUv/y8Gjrv8SJK3OWScA+fMn/ysVPPvTmeh6nh1TcxBUJ+jEaKYr7N36x7h+Edj0pB6+WrLokn87+BrTt/p4ZPzZ6MM7/8R2//h33vOcNzdwgBMwVMbGvySQmo4a0NqOZccU7YmGXLEfPQUlUid/XT6B8YdIU/99vjsPcOdEhDsfOd4QVCwKB8yp8SWuG1njbTl83DpMWz1PCKAswuWPDI0e8WebyAJBbxNdrF7cls+hBpAb3h3XtehL/3+4u7D35rQwpP4YFTwMJ91rHpQyQFQgmf9sAMNL9Ur4afv/FBjIuPVj+n4YVTwMD96tj0IVICoYYXv/q1VJ1Sl8UveQyaRwErvOB6B5SwKhqP00gI6A0vhsycJ7/KIzxhyHqGN0ADbnNAAYOicRfCFdAb/p50Gbfuc/wy5w1D5lOghk0fuG0USlgVr7sQjoDe8C8WxKGKPy2KjzlvAQb02/sCbh+FApngX1QUtyeSuwDi0hxFByV7L+LIf3r5kvpp4PBr07Hqvn71Y85bgOG6WS2ggA1+4D6eUKKQApVsqngI6KSkqh9HzsoM/3zg8Oz5VQ9E8wjf30YFDGdkeAsCwH18oYRZGXk7C4HuYxcwe6rjQsFovzaEvoFxqNkTOPzMjGikJso8wsF77XYkLx6dAwxWxvBmBIH7aUMJi8J3w0DnTVz7dyvX6KPzVBt+kL8cmzesRq9ps2Z48bRJmOIapS7E4zM2lXNt5CcU6ID7+ocSZkqY2NRN6ysnsHbJEpR8ZwV6t5Yg+iuLELf2KVd48VwXQf3BQGUMb4ZOuH9gKFEIYJfiNrEDcXZHHV4q3YRv5i7ikgM94RlETNgihrcgBHhccCiRCf7VhBK5rAPyr9I/Y/WKPEyfksH/9NjQ2dODhsYzwcLXsypkeBtCRGLRDUUMAMyKHxEx4dtrzyP97nQMygripiQiKi4aSbPvQmKW7+OXF69ntYvBa1iPCYklZEZECsGm4ja0Ops7EJsaj4SprlU+8IJiqIjAFga3Ikx4vvAYkTGALxyWFArlsnbBC9Sz6mI5zWKNRGh3JJY7mjte4GOz+r4tkRbxQQAAAABJRU5ErkJggg=="
|
||||
}
|
||||
331
agent/templates/knowledge_base_report_r.json
Normal file
331
agent/templates/knowledge_base_report_r.json
Normal file
@ -0,0 +1,331 @@
|
||||
{
|
||||
"id": 21,
|
||||
"title": {
|
||||
"en": "Report Agent Using Knowledge Base",
|
||||
"zh": "知识库检索智能体"},
|
||||
"description": {
|
||||
"en": "A report generation assistant using local knowledge base, with advanced capabilities in task planning, reasoning, and reflective analysis. Recommended for academic research paper Q&A",
|
||||
"zh": "一个使用本地知识库的报告生成助手,具备高级能力,包括任务规划、推理和反思性分析。推荐用于学术研究论文问答。"},
|
||||
"canvas_type": "Recommended",
|
||||
"dsl": {
|
||||
"components": {
|
||||
"Agent:NewPumasLick": {
|
||||
"downstream": [
|
||||
"Message:OrangeYearsShine"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "qwen3-235b-a22b-instruct-2507@Tongyi-Qianwen",
|
||||
"maxTokensEnabled": true,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 128000,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "# User Query\n {sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "## Role & Task\nYou are a **\u201cKnowledge Base Retrieval Q\\&A Agent\u201d** whose goal is to break down the user\u2019s question into retrievable subtasks, and then produce a multi-source-verified, structured, and actionable research report using the internal knowledge base.\n## Execution Framework (Detailed Steps & Key Points)\n1. **Assessment & Decomposition**\n * Actions:\n * Automatically extract: main topic, subtopics, entities (people/organizations/products/technologies), time window, geographic/business scope.\n * Output as a list: N facts/data points that must be collected (*N* ranges from 5\u201320 depending on question complexity).\n2. **Query Type Determination (Rule-Based)**\n * Example rules:\n * If the question involves a single issue but requests \u201cmethod comparison/multiple explanations\u201d \u2192 use **depth-first**.\n * If the question can naturally be split into \u22653 independent sub-questions \u2192 use **breadth-first**.\n * If the question can be answered by a single fact/specification/definition \u2192 use **simple query**.\n3. **Research Plan Formulation**\n * Depth-first: define 3\u20135 perspectives (methodology/stakeholders/time dimension/technical route, etc.), assign search keywords, target document types, and output format for each perspective.\n * Breadth-first: list subtasks, prioritize them, and assign search terms.\n * Simple query: directly provide the search sentence and required fields.\n4. **Retrieval Execution**\n * After retrieval: perform coverage check (does it contain the key facts?) and quality check (source diversity, authority, latest update time).\n * If standards are not met, automatically loop: rewrite queries (synonyms/cross-domain terms) and retry \u22643 times, or flag as requiring external search.\n5. **Integration & Reasoning**\n * Build the answer using a **fact\u2013evidence\u2013reasoning** chain. For each conclusion, attach 1\u20132 strongest pieces of evidence.\n---\n## Quality Gate Checklist (Verify at Each Stage)\n* **Stage 1 (Decomposition)**:\n * [ ] Key concepts and expected outputs identified\n * [ ] Required facts/data points listed\n* **Stage 2 (Retrieval)**:\n * [ ] Meets quality standards (see above)\n * [ ] If not met: execute query iteration\n* **Stage 3 (Generation)**:\n * [ ] Each conclusion has at least one direct evidence source\n * [ ] State assumptions/uncertainties\n * [ ] Provide next-step suggestions or experiment/retrieval plans\n * [ ] Final length and depth match user expectations (comply with word count/format if specified)\n---\n## Core Principles\n1. **Strict reliance on the knowledge base**: answers must be **fully bounded** by the content retrieved from the knowledge base.\n2. **No fabrication**: do not generate, infer, or create information that is not explicitly present in the knowledge base.\n3. **Accuracy first**: prefer incompleteness over inaccurate content.\n4. **Output format**:\n * Hierarchically clear modular structure\n * Logical grouping according to the MECE principle\n * Professionally presented formatting\n * Step-by-step cognitive guidance\n * Reasonable use of headings and dividers for clarity\n * *Italicize* key parameters\n * **Bold** critical information\n5. **LaTeX formula requirements**:\n * Inline formulas: start and end with `$`\n * Block formulas: start and end with `$$`, each `$$` on its own line\n * Block formula content must comply with LaTeX math syntax\n * Verify formula correctness\n---\n## Additional Notes (Interaction & Failure Strategy)\n* If the knowledge base does not cover critical facts: explicitly inform the user (with sample wording)\n* For time-sensitive issues: enforce time filtering in the search request, and indicate the latest retrieval date in the answer.\n* Language requirement: answer in the user\u2019s preferred language\n",
|
||||
"temperature": "0.1",
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "Retrieval",
|
||||
"name": "Retrieval",
|
||||
"params": {
|
||||
"cross_languages": [],
|
||||
"description": "",
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"Message:OrangeYearsShine": {
|
||||
"downstream": [],
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"content": [
|
||||
"{Agent:NewPumasLick@content}"
|
||||
]
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Agent:NewPumasLick"
|
||||
]
|
||||
},
|
||||
"begin": {
|
||||
"downstream": [
|
||||
"Agent:NewPumasLick"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "\u4f60\u597d\uff01 \u6211\u662f\u4f60\u7684\u52a9\u7406\uff0c\u6709\u4ec0\u4e48\u53ef\u4ee5\u5e2e\u5230\u4f60\u7684\u5417\uff1f"
|
||||
}
|
||||
},
|
||||
"upstream": []
|
||||
}
|
||||
},
|
||||
"globals": {
|
||||
"sys.conversation_turns": 0,
|
||||
"sys.files": [],
|
||||
"sys.query": "",
|
||||
"sys.user_id": ""
|
||||
},
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__beginstart-Agent:NewPumasLickend",
|
||||
"source": "begin",
|
||||
"sourceHandle": "start",
|
||||
"target": "Agent:NewPumasLick",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:NewPumasLickstart-Message:OrangeYearsShineend",
|
||||
"markerEnd": "logo",
|
||||
"source": "Agent:NewPumasLick",
|
||||
"sourceHandle": "start",
|
||||
"style": {
|
||||
"stroke": "rgba(91, 93, 106, 1)",
|
||||
"strokeWidth": 1
|
||||
},
|
||||
"target": "Message:OrangeYearsShine",
|
||||
"targetHandle": "end",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:NewPumasLicktool-Tool:AllBirdsNailend",
|
||||
"selected": false,
|
||||
"source": "Agent:NewPumasLick",
|
||||
"sourceHandle": "tool",
|
||||
"target": "Tool:AllBirdsNail",
|
||||
"targetHandle": "end"
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "\u4f60\u597d\uff01 \u6211\u662f\u4f60\u7684\u52a9\u7406\uff0c\u6709\u4ec0\u4e48\u53ef\u4ee5\u5e2e\u5230\u4f60\u7684\u5417\uff1f"
|
||||
},
|
||||
"label": "Begin",
|
||||
"name": "begin"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "begin",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": -9.569875358221438,
|
||||
"y": 205.84018385864917
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "left",
|
||||
"targetPosition": "right",
|
||||
"type": "beginNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"content": [
|
||||
"{Agent:NewPumasLick@content}"
|
||||
]
|
||||
},
|
||||
"label": "Message",
|
||||
"name": "Response"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Message:OrangeYearsShine",
|
||||
"measured": {
|
||||
"height": 56,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 734.4061285881053,
|
||||
"y": 199.9706031723009
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "messageNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "qwen3-235b-a22b-instruct-2507@Tongyi-Qianwen",
|
||||
"maxTokensEnabled": true,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 128000,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "# User Query\n {sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "## Role & Task\nYou are a **\u201cKnowledge Base Retrieval Q\\&A Agent\u201d** whose goal is to break down the user\u2019s question into retrievable subtasks, and then produce a multi-source-verified, structured, and actionable research report using the internal knowledge base.\n## Execution Framework (Detailed Steps & Key Points)\n1. **Assessment & Decomposition**\n * Actions:\n * Automatically extract: main topic, subtopics, entities (people/organizations/products/technologies), time window, geographic/business scope.\n * Output as a list: N facts/data points that must be collected (*N* ranges from 5\u201320 depending on question complexity).\n2. **Query Type Determination (Rule-Based)**\n * Example rules:\n * If the question involves a single issue but requests \u201cmethod comparison/multiple explanations\u201d \u2192 use **depth-first**.\n * If the question can naturally be split into \u22653 independent sub-questions \u2192 use **breadth-first**.\n * If the question can be answered by a single fact/specification/definition \u2192 use **simple query**.\n3. **Research Plan Formulation**\n * Depth-first: define 3\u20135 perspectives (methodology/stakeholders/time dimension/technical route, etc.), assign search keywords, target document types, and output format for each perspective.\n * Breadth-first: list subtasks, prioritize them, and assign search terms.\n * Simple query: directly provide the search sentence and required fields.\n4. **Retrieval Execution**\n * After retrieval: perform coverage check (does it contain the key facts?) and quality check (source diversity, authority, latest update time).\n * If standards are not met, automatically loop: rewrite queries (synonyms/cross-domain terms) and retry \u22643 times, or flag as requiring external search.\n5. **Integration & Reasoning**\n * Build the answer using a **fact\u2013evidence\u2013reasoning** chain. For each conclusion, attach 1\u20132 strongest pieces of evidence.\n---\n## Quality Gate Checklist (Verify at Each Stage)\n* **Stage 1 (Decomposition)**:\n * [ ] Key concepts and expected outputs identified\n * [ ] Required facts/data points listed\n* **Stage 2 (Retrieval)**:\n * [ ] Meets quality standards (see above)\n * [ ] If not met: execute query iteration\n* **Stage 3 (Generation)**:\n * [ ] Each conclusion has at least one direct evidence source\n * [ ] State assumptions/uncertainties\n * [ ] Provide next-step suggestions or experiment/retrieval plans\n * [ ] Final length and depth match user expectations (comply with word count/format if specified)\n---\n## Core Principles\n1. **Strict reliance on the knowledge base**: answers must be **fully bounded** by the content retrieved from the knowledge base.\n2. **No fabrication**: do not generate, infer, or create information that is not explicitly present in the knowledge base.\n3. **Accuracy first**: prefer incompleteness over inaccurate content.\n4. **Output format**:\n * Hierarchically clear modular structure\n * Logical grouping according to the MECE principle\n * Professionally presented formatting\n * Step-by-step cognitive guidance\n * Reasonable use of headings and dividers for clarity\n * *Italicize* key parameters\n * **Bold** critical information\n5. **LaTeX formula requirements**:\n * Inline formulas: start and end with `$`\n * Block formulas: start and end with `$$`, each `$$` on its own line\n * Block formula content must comply with LaTeX math syntax\n * Verify formula correctness\n---\n## Additional Notes (Interaction & Failure Strategy)\n* If the knowledge base does not cover critical facts: explicitly inform the user (with sample wording)\n* For time-sensitive issues: enforce time filtering in the search request, and indicate the latest retrieval date in the answer.\n* Language requirement: answer in the user\u2019s preferred language\n",
|
||||
"temperature": "0.1",
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "Retrieval",
|
||||
"name": "Retrieval",
|
||||
"params": {
|
||||
"cross_languages": [],
|
||||
"description": "",
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Knowledge Base Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:NewPumasLick",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 347.00048227952215,
|
||||
"y": 186.49109364794631
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"description": "This is an agent for a specific task.",
|
||||
"user_prompt": "This is the order you need to send to the agent."
|
||||
},
|
||||
"label": "Tool",
|
||||
"name": "flow.tool_10"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Tool:AllBirdsNail",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 220.24819746977118,
|
||||
"y": 403.31576836482583
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "toolNode"
|
||||
}
|
||||
]
|
||||
},
|
||||
"history": [],
|
||||
"memory": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"retrieval": []
|
||||
},
|
||||
"avatar": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAH0klEQVR4nO2ZC1BU1wGG/3uRp/IygG+DGK0GOjE1U6cxI4tT03Y0E+kENbaJbKpj60wzgNMwnTjuEtu0miGasY+0krI202kMVEnVxtoOLG00oVa0LajVBDcSEI0REFBgkZv/3GWXfdzdvctuHs7kmzmec9//d+45914XCXc4Xwjk1+59VJGGF7C5QAFSWBvgyWmWLl7IKiny6QNL173B5YjB84bOyrpKA4B1DLySdQpLKAiZGtZ7a/KMVoQJz6UfEZyhTWwaEBmssiLvCueu6BJg8EwFqGTTAC+uvNWC9w82sRWcux/JwaSHstjywcogRt4RG0KExwWG4QsVYCebKSwe3L5lR9OOWjyzfg2WL/0a1/jncO3b2FHxGnKeWYqo+Giu8UEMrWJKWBACPMY/DG+63txhvnKshUu+DF2/hayMDFRsL+VScDb++AVc6OjAuInxXPJl2tfnIikrzUyJMi7qQmLRhOEr2fOFbX/7P6STF7BqoWevfdij4NWGQfx+57OYO2sG1wSnsek8Nm15EU8sikF6ouelXz9ph7JwDqYt+5IIZaGEkauDIrH4wPBmhjexCSEws+VdVG1M4NIoj+2xYzBuJtavWcEl/VS8dggx/ZdQvcGzQwp+cxOXsu5RBQQMVkYJM4LA/Txh+ELFMWFVPARS5kFiabZdx8Olh7l17BzdvhzZmROhdJ3j6D/nIyBgOCMlLAgA9xmF4TMV4BSbrgnrLiBl5rOsRCRRbDUsBzQFiJjY91PCBj9w+yiP1lXWsTLAjc9YQGB9I8+Yx1oTiUWFvW9QgDo2PdASaDp/EQ8/sRnhcPTVcuTMncXwQQVESL9DidscaPW+QEtAICRu9PSxFTpJiePV8AI9AsTvXZBY/Pa+wJ9ApNApIILm8S5Y4QXXQwhYFH6csemDP4G3G5v579i5d04mknknQhDYS4HCrCVr/mC3D305KnbCEpvVIia5Onw6WaWw+KAl0Np+FUXbdiMcyoqfUoeRHoFrJ1uRtnBG1/9Mf/3LtElp+VwF2wcd7woJib1vUPwMH4GWQCQJJtBa/V9cPmFD8uQUpMdNGDhY8bNYrobh8acHu270/l0ImJWRt64Wn6WACN9z5gq2lXwPW8pfweT0icP/fH23vO9QLYq3/QKyLBmFQI3CUcT9NdESEEPItKsSN3r7MBaSJoxHWZERM6ZmMLy2gDP8/pd/og418dTL37hFSUpMUC5f+UiWZcnY9s5+ixCwUiCXx2iiJdDNx6f4pgkH8Q3lbxK7h8+enoHha1cRNdMp8axiHxo6+/5bVdk8DSROYIW1X7QEIom3wHD3gEf4vu1bVYEJZeWQ0zJQvmcfyiv2QZak6raG/QWfK4Ez9mTc5v8xPMJfuojoxXmIX/9DOMe+FCWbcHu4BJJ0YEwCx0824bFNW9HesB+CqYu+jepfPYcHF+aoPXS8sQl/+vU2bgmOU2C+qRc9/YrrPPbGBtzavd0nvCxLxui4pJrBm911PFwak4CYA80cj+JCAiGUzYkmxrSY4N2c3GLi6UEIFL/wRxxqkhmHnTEpDQcrfq6ea+hcE8bNy3GFzyq4H22HW1Kd4WMSkg1jmsSRpKj0Rzhy4gNUv/y8Gjrv8SJK3OWScA+fMn/ysVPPvTmeh6nh1TcxBUJ+jEaKYr7N36x7h+Edj0pB6+WrLokn87+BrTt/p4ZPzZ6MM7/8R2//h33vOcNzdwgBMwVMbGvySQmo4a0NqOZccU7YmGXLEfPQUlUid/XT6B8YdIU/99vjsPcOdEhDsfOd4QVCwKB8yp8SWuG1njbTl83DpMWz1PCKAswuWPDI0e8WebyAJBbxNdrF7cls+hBpAb3h3XtehL/3+4u7D35rQwpP4YFTwMJ91rHpQyQFQgmf9sAMNL9Ur4afv/FBjIuPVj+n4YVTwMD96tj0IVICoYYXv/q1VJ1Sl8UveQyaRwErvOB6B5SwKhqP00gI6A0vhsycJ7/KIzxhyHqGN0ADbnNAAYOicRfCFdAb/p50Gbfuc/wy5w1D5lOghk0fuG0USlgVr7sQjoDe8C8WxKGKPy2KjzlvAQb02/sCbh+FApngX1QUtyeSuwDi0hxFByV7L+LIf3r5kvpp4PBr07Hqvn71Y85bgOG6WS2ggA1+4D6eUKKQApVsqngI6KSkqh9HzsoM/3zg8Oz5VQ9E8wjf30YFDGdkeAsCwH18oYRZGXk7C4HuYxcwe6rjQsFovzaEvoFxqNkTOPzMjGikJso8wsF77XYkLx6dAwxWxvBmBIH7aUMJi8J3w0DnTVz7dyvX6KPzVBt+kL8cmzesRq9ps2Z48bRJmOIapS7E4zM2lXNt5CcU6ID7+ocSZkqY2NRN6ysnsHbJEpR8ZwV6t5Yg+iuLELf2KVd48VwXQf3BQGUMb4ZOuH9gKFEIYJfiNrEDcXZHHV4q3YRv5i7ikgM94RlETNgihrcgBHhccCiRCf7VhBK5rAPyr9I/Y/WKPEyfksH/9NjQ2dODhsYzwcLXsypkeBtCRGLRDUUMAMyKHxEx4dtrzyP97nQMygripiQiKi4aSbPvQmKW7+OXF69ntYvBa1iPCYklZEZECsGm4ja0Ops7EJsaj4SprlU+8IJiqIjAFga3Ikx4vvAYkTGALxyWFArlsnbBC9Sz6mI5zWKNRGh3JJY7mjte4GOz+r4tkRbxQQAAAABJRU5ErkJggg=="
|
||||
}
|
||||
919
agent/templates/market_generate_seo_blog.json
Normal file
919
agent/templates/market_generate_seo_blog.json
Normal file
@ -0,0 +1,919 @@
|
||||
{
|
||||
"id": 12,
|
||||
"title": {
|
||||
"en": "Generate SEO Blog",
|
||||
"zh": "生成SEO博客"},
|
||||
"description": {
|
||||
"en": "This workflow automatically generates a complete SEO-optimized blog article based on a simple user input. You don’t need any writing experience. Just provide a topic or short request — the system will handle the rest.",
|
||||
"zh": "此工作流根据简单的用户输入自动生成完整的SEO博客文章。你无需任何写作经验,只需提供一个主题或简短请求,系统将处理其余部分。"},
|
||||
"canvas_type": "Marketing",
|
||||
"dsl": {
|
||||
"components": {
|
||||
"Agent:BetterSitesSend": {
|
||||
"downstream": [
|
||||
"Agent:EagerNailsRemain"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.3,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Balance",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.2,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The parse and keyword agent output is {Agent:ClearRabbitsScream@content}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Outline_Agent**, responsible for generating a clear and SEO-optimized blog outline based on the user's parsed writing intent and keyword strategy.\n\n# Tool Access:\n\n- You have access to a search tool called `Tavily Search`.\n\n- If you are unsure how to structure a section, you may call this tool to search for related blog outlines or content from Google.\n\n- Do not overuse it. Your job is to extract **structure**, not to write paragraphs.\n\n\n# Goals\n\n1. Create a well-structured outline with appropriate H2 and H3 headings.\n\n2. Ensure logical flow from introduction to conclusion.\n\n3. Assign 1\u20132 suggested long-tail keywords to each major section for SEO alignment.\n\n4. Make the structure suitable for downstream paragraph writing.\n\n\n\n\n#Note\n\n- Use concise, scannable section titles.\n\n- Do not write full paragraphs.\n\n- Prioritize clarity, logical progression, and SEO alignment.\n\n\n\n- If the blog type is \u201cTutorial\u201d or \u201cHow-to\u201d, include step-based sections.\n\n\n# Input\n\nYou will receive:\n\n- Writing Type (e.g., Tutorial, Informative Guide)\n\n- Target Audience\n\n- User Intent Summary\n\n- 3\u20135 long-tail keywords\n\n\nUse this information to design a structure that both informs readers and maximizes search engine visibility.\n\n# Output Format\n\n```markdown\n\n## Blog Title (suggested)\n\n[Give a short, SEO-friendly title suggestion]\n\n## Outline\n\n### Introduction\n\n- Purpose of the article\n\n- Brief context\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 1]\n\n- [Short description of what this section will cover]\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 2]\n\n- [Short description of what this section will cover]\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 3]\n\n- [Optional H3 Subsection Title A]\n\n - [Explanation of sub-point]\n\n- [Optional H3 Subsection Title B]\n\n - [Explanation of sub-point]\n\n- **Suggested keywords**: [keyword1]\n\n### Conclusion\n\n- Recap key takeaways\n\n- Optional CTA (Call to Action)\n\n- **Suggested keywords**: [keyword3]\n\n",
|
||||
"temperature": 0.5,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "TavilySearch",
|
||||
"name": "TavilySearch",
|
||||
"params": {
|
||||
"api_key": "",
|
||||
"days": 7,
|
||||
"exclude_domains": [],
|
||||
"include_answer": false,
|
||||
"include_domains": [],
|
||||
"include_image_descriptions": false,
|
||||
"include_images": false,
|
||||
"include_raw_content": true,
|
||||
"max_results": 5,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"query": "sys.query",
|
||||
"search_depth": "basic",
|
||||
"topic": "general"
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.85,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Agent:ClearRabbitsScream"
|
||||
]
|
||||
},
|
||||
"Agent:ClearRabbitsScream": {
|
||||
"downstream": [
|
||||
"Agent:BetterSitesSend"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 1,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The user query is {sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Parse_And_Keyword_Agent**, responsible for interpreting a user's blog writing request and generating a structured writing intent summary and keyword strategy for SEO-optimized content generation.\n\n# Goals\n\n1. Extract and infer the user's true writing intent, even if the input is informal or vague.\n\n2. Identify the writing type, target audience, and implied goal.\n\n3. Suggest 3\u20135 long-tail keywords based on the input and context.\n\n4. Output all data in a Markdown format for downstream agents.\n\n# Operating Guidelines\n\n\n- If the user's input lacks clarity, make reasonable and **conservative** assumptions based on SEO best practices.\n\n- Always choose one clear \"Writing Type\" from the list below.\n\n- Your job is not to write the blog \u2014 only to structure the brief.\n\n# Output Format\n\n```markdown\n## Writing Type\n\n[Choose one: Tutorial / Informative Guide / Marketing Content / Case Study / Opinion Piece / How-to / Comparison Article]\n\n## Target Audience\n\n[Try to be specific based on clues in the input: e.g., marketing managers, junior developers, SEO beginners]\n\n## User Intent Summary\n\n[A 1\u20132 sentence summary of what the user wants to achieve with the blog post]\n\n## Suggested Long-tail Keywords\n\n- keyword 1\n\n- keyword 2\n\n- keyword 3\n\n- keyword 4 (optional)\n\n- keyword 5 (optional)\n\n\n\n\n## Input Examples (and how to handle them)\n\nInput: \"I want to write about RAGFlow.\"\n\u2192 Output: Informative Guide, Audience: AI developers, Intent: explain what RAGFlow is and its use cases\n\nInput: \"Need a blog to promote our prompt design tool.\"\n\u2192 Output: Marketing Content, Audience: product managers or tool adopters, Intent: raise awareness and interest in the product\n\n\n\nInput: \"How to get more Google traffic using AI\"\n\u2192 Output: How-to, Audience: SEO marketers, Intent: guide readers on applying AI for SEO growth",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"Agent:EagerNailsRemain": {
|
||||
"downstream": [
|
||||
"Agent:LovelyHeadsOwn"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 5,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The parse and keyword agent output is {Agent:ClearRabbitsScream@content}\n\n\n\nThe Outline agent output is {Agent:BetterSitesSend@content}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Body_Agent**, responsible for generating the full content of each section of an SEO-optimized blog based on the provided outline and keyword strategy.\n\n# Tool Access:\n\nYou can use the `Tavily Search` tool to retrieve relevant content, statistics, or examples to support each section you're writing.\n\nUse it **only** when the provided outline lacks enough information, or if the section requires factual grounding.\n\nAlways cite the original link or indicate source where possible.\n\n\n# Goals\n\n1. Write each section (based on H2/H3 structure) as a complete and natural blog paragraph.\n\n2. Integrate the suggested long-tail keywords naturally into each section.\n\n3. When appropriate, use the `Tavily Search` tool to enrich your writing with relevant facts, examples, or quotes.\n\n4. Ensure each section is clear, engaging, and informative, suitable for both human readers and search engines.\n\n\n# Style Guidelines\n\n- Write in a tone appropriate to the audience. Be explanatory, not promotional, unless it's a marketing blog.\n\n- Avoid generic filler content. Prioritize clarity, structure, and value.\n\n- Ensure SEO keywords are embedded seamlessly, not forcefully.\n\n\n\n- Maintain writing rhythm. Vary sentence lengths. Use transitions between ideas.\n\n\n# Input\n\n\nYou will receive:\n\n- Blog title\n\n- Structured outline (including section titles, keywords, and descriptions)\n\n- Target audience\n\n- Blog type and user intent\n\nYou must **follow the outline strictly**. Write content **section-by-section**, based on the structure.\n\n\n# Output Format\n\n```markdown\n\n## H2: [Section Title]\n\n[Your generated content for this section \u2014 500-600 words, using keywords naturally.]\n\n",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "TavilySearch",
|
||||
"name": "TavilySearch",
|
||||
"params": {
|
||||
"api_key": "",
|
||||
"days": 7,
|
||||
"exclude_domains": [],
|
||||
"include_answer": false,
|
||||
"include_domains": [],
|
||||
"include_image_descriptions": false,
|
||||
"include_images": false,
|
||||
"include_raw_content": true,
|
||||
"max_results": 5,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"query": "sys.query",
|
||||
"search_depth": "basic",
|
||||
"topic": "general"
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Agent:BetterSitesSend"
|
||||
]
|
||||
},
|
||||
"Agent:LovelyHeadsOwn": {
|
||||
"downstream": [
|
||||
"Message:LegalBeansBet"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 5,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The parse and keyword agent output is {Agent:ClearRabbitsScream@content}\n\nThe Outline agent output is {Agent:BetterSitesSend@content}\n\nThe Body agent output is {Agent:EagerNailsRemain@content}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Editor_Agent**, responsible for finalizing the blog post for both human readability and SEO effectiveness.\n\n# Goals\n\n1. Polish the entire blog content for clarity, coherence, and style.\n\n2. Improve transitions between sections, ensure logical flow.\n\n3. Verify that keywords are used appropriately and effectively.\n\n4. Conduct a lightweight SEO audit \u2014 checking keyword density, structure (H1/H2/H3), and overall searchability.\n\n\n\n# Style Guidelines\n\n- Be precise. Avoid bloated or vague language.\n\n- Maintain an informative and engaging tone, suitable to the target audience.\n\n- Do not remove keywords unless absolutely necessary for clarity.\n\n- Ensure paragraph flow and section continuity.\n\n\n# Input\n\nYou will receive:\n\n- Full blog content, written section-by-section\n\n- Original outline with suggested keywords\n\n- Target audience and writing type\n\n# Output Format\n\n```markdown\n\n[The revised, fully polished blog post content goes here.]\n\n",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Agent:EagerNailsRemain"
|
||||
]
|
||||
},
|
||||
"Message:LegalBeansBet": {
|
||||
"downstream": [],
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"content": [
|
||||
"{Agent:LovelyHeadsOwn@content}"
|
||||
]
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Agent:LovelyHeadsOwn"
|
||||
]
|
||||
},
|
||||
"begin": {
|
||||
"downstream": [
|
||||
"Agent:ClearRabbitsScream"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "Hi! I'm your SEO blog assistant.\n\nTo get started, please tell me:\n1. What topic you want the blog to cover\n2. Who is the target audience\n3. What you hope to achieve with this blog (e.g., SEO traffic, teaching beginners, promoting a product)\n"
|
||||
}
|
||||
},
|
||||
"upstream": []
|
||||
}
|
||||
},
|
||||
"globals": {
|
||||
"sys.conversation_turns": 0,
|
||||
"sys.files": [],
|
||||
"sys.query": "",
|
||||
"sys.user_id": ""
|
||||
},
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__beginstart-Agent:ClearRabbitsScreamend",
|
||||
"source": "begin",
|
||||
"sourceHandle": "start",
|
||||
"target": "Agent:ClearRabbitsScream",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:ClearRabbitsScreamstart-Agent:BetterSitesSendend",
|
||||
"source": "Agent:ClearRabbitsScream",
|
||||
"sourceHandle": "start",
|
||||
"target": "Agent:BetterSitesSend",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:BetterSitesSendtool-Tool:SharpPensBurnend",
|
||||
"source": "Agent:BetterSitesSend",
|
||||
"sourceHandle": "tool",
|
||||
"target": "Tool:SharpPensBurn",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:BetterSitesSendstart-Agent:EagerNailsRemainend",
|
||||
"source": "Agent:BetterSitesSend",
|
||||
"sourceHandle": "start",
|
||||
"target": "Agent:EagerNailsRemain",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__Agent:EagerNailsRemaintool-Tool:WickedDeerHealend",
|
||||
"source": "Agent:EagerNailsRemain",
|
||||
"sourceHandle": "tool",
|
||||
"target": "Tool:WickedDeerHeal",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:EagerNailsRemainstart-Agent:LovelyHeadsOwnend",
|
||||
"source": "Agent:EagerNailsRemain",
|
||||
"sourceHandle": "start",
|
||||
"target": "Agent:LovelyHeadsOwn",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:LovelyHeadsOwnstart-Message:LegalBeansBetend",
|
||||
"source": "Agent:LovelyHeadsOwn",
|
||||
"sourceHandle": "start",
|
||||
"target": "Message:LegalBeansBet",
|
||||
"targetHandle": "end"
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "Hi! I'm your SEO blog assistant.\n\nTo get started, please tell me:\n1. What topic you want the blog to cover\n2. Who is the target audience\n3. What you hope to achieve with this blog (e.g., SEO traffic, teaching beginners, promoting a product)\n"
|
||||
},
|
||||
"label": "Begin",
|
||||
"name": "begin"
|
||||
},
|
||||
"id": "begin",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 50,
|
||||
"y": 200
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "left",
|
||||
"targetPosition": "right",
|
||||
"type": "beginNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 1,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The user query is {sys.query}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Parse_And_Keyword_Agent**, responsible for interpreting a user's blog writing request and generating a structured writing intent summary and keyword strategy for SEO-optimized content generation.\n\n# Goals\n\n1. Extract and infer the user's true writing intent, even if the input is informal or vague.\n\n2. Identify the writing type, target audience, and implied goal.\n\n3. Suggest 3\u20135 long-tail keywords based on the input and context.\n\n4. Output all data in a Markdown format for downstream agents.\n\n# Operating Guidelines\n\n\n- If the user's input lacks clarity, make reasonable and **conservative** assumptions based on SEO best practices.\n\n- Always choose one clear \"Writing Type\" from the list below.\n\n- Your job is not to write the blog \u2014 only to structure the brief.\n\n# Output Format\n\n```markdown\n## Writing Type\n\n[Choose one: Tutorial / Informative Guide / Marketing Content / Case Study / Opinion Piece / How-to / Comparison Article]\n\n## Target Audience\n\n[Try to be specific based on clues in the input: e.g., marketing managers, junior developers, SEO beginners]\n\n## User Intent Summary\n\n[A 1\u20132 sentence summary of what the user wants to achieve with the blog post]\n\n## Suggested Long-tail Keywords\n\n- keyword 1\n\n- keyword 2\n\n- keyword 3\n\n- keyword 4 (optional)\n\n- keyword 5 (optional)\n\n\n\n\n## Input Examples (and how to handle them)\n\nInput: \"I want to write about RAGFlow.\"\n\u2192 Output: Informative Guide, Audience: AI developers, Intent: explain what RAGFlow is and its use cases\n\nInput: \"Need a blog to promote our prompt design tool.\"\n\u2192 Output: Marketing Content, Audience: product managers or tool adopters, Intent: raise awareness and interest in the product\n\n\n\nInput: \"How to get more Google traffic using AI\"\n\u2192 Output: How-to, Audience: SEO marketers, Intent: guide readers on applying AI for SEO growth",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Parse And Keyword Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:ClearRabbitsScream",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 344.7766966202233,
|
||||
"y": 234.82202253184496
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.3,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 3,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Balance",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.2,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The parse and keyword agent output is {Agent:ClearRabbitsScream@content}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Outline_Agent**, responsible for generating a clear and SEO-optimized blog outline based on the user's parsed writing intent and keyword strategy.\n\n# Tool Access:\n\n- You have access to a search tool called `Tavily Search`.\n\n- If you are unsure how to structure a section, you may call this tool to search for related blog outlines or content from Google.\n\n- Do not overuse it. Your job is to extract **structure**, not to write paragraphs.\n\n\n# Goals\n\n1. Create a well-structured outline with appropriate H2 and H3 headings.\n\n2. Ensure logical flow from introduction to conclusion.\n\n3. Assign 1\u20132 suggested long-tail keywords to each major section for SEO alignment.\n\n4. Make the structure suitable for downstream paragraph writing.\n\n\n\n\n#Note\n\n- Use concise, scannable section titles.\n\n- Do not write full paragraphs.\n\n- Prioritize clarity, logical progression, and SEO alignment.\n\n\n\n- If the blog type is \u201cTutorial\u201d or \u201cHow-to\u201d, include step-based sections.\n\n\n# Input\n\nYou will receive:\n\n- Writing Type (e.g., Tutorial, Informative Guide)\n\n- Target Audience\n\n- User Intent Summary\n\n- 3\u20135 long-tail keywords\n\n\nUse this information to design a structure that both informs readers and maximizes search engine visibility.\n\n# Output Format\n\n```markdown\n\n## Blog Title (suggested)\n\n[Give a short, SEO-friendly title suggestion]\n\n## Outline\n\n### Introduction\n\n- Purpose of the article\n\n- Brief context\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 1]\n\n- [Short description of what this section will cover]\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 2]\n\n- [Short description of what this section will cover]\n\n- **Suggested keywords**: [keyword1, keyword2]\n\n### H2: [Section Title 3]\n\n- [Optional H3 Subsection Title A]\n\n - [Explanation of sub-point]\n\n- [Optional H3 Subsection Title B]\n\n - [Explanation of sub-point]\n\n- **Suggested keywords**: [keyword1]\n\n### Conclusion\n\n- Recap key takeaways\n\n- Optional CTA (Call to Action)\n\n- **Suggested keywords**: [keyword3]\n\n",
|
||||
"temperature": 0.5,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "TavilySearch",
|
||||
"name": "TavilySearch",
|
||||
"params": {
|
||||
"api_key": "",
|
||||
"days": 7,
|
||||
"exclude_domains": [],
|
||||
"include_answer": false,
|
||||
"include_domains": [],
|
||||
"include_image_descriptions": false,
|
||||
"include_images": false,
|
||||
"include_raw_content": true,
|
||||
"max_results": 5,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"query": "sys.query",
|
||||
"search_depth": "basic",
|
||||
"topic": "general"
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.85,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Outline Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:BetterSitesSend",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 613.4368763415628,
|
||||
"y": 164.3074269048589
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"description": "This is an agent for a specific task.",
|
||||
"user_prompt": "This is the order you need to send to the agent."
|
||||
},
|
||||
"label": "Tool",
|
||||
"name": "flow.tool_0"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Tool:SharpPensBurn",
|
||||
"measured": {
|
||||
"height": 44,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 580.1877078861457,
|
||||
"y": 287.7669662022325
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "toolNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 5,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The parse and keyword agent output is {Agent:ClearRabbitsScream@content}\n\n\n\nThe Outline agent output is {Agent:BetterSitesSend@content}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Body_Agent**, responsible for generating the full content of each section of an SEO-optimized blog based on the provided outline and keyword strategy.\n\n# Tool Access:\n\nYou can use the `Tavily Search` tool to retrieve relevant content, statistics, or examples to support each section you're writing.\n\nUse it **only** when the provided outline lacks enough information, or if the section requires factual grounding.\n\nAlways cite the original link or indicate source where possible.\n\n\n# Goals\n\n1. Write each section (based on H2/H3 structure) as a complete and natural blog paragraph.\n\n2. Integrate the suggested long-tail keywords naturally into each section.\n\n3. When appropriate, use the `Tavily Search` tool to enrich your writing with relevant facts, examples, or quotes.\n\n4. Ensure each section is clear, engaging, and informative, suitable for both human readers and search engines.\n\n\n# Style Guidelines\n\n- Write in a tone appropriate to the audience. Be explanatory, not promotional, unless it's a marketing blog.\n\n- Avoid generic filler content. Prioritize clarity, structure, and value.\n\n- Ensure SEO keywords are embedded seamlessly, not forcefully.\n\n\n\n- Maintain writing rhythm. Vary sentence lengths. Use transitions between ideas.\n\n\n# Input\n\n\nYou will receive:\n\n- Blog title\n\n- Structured outline (including section titles, keywords, and descriptions)\n\n- Target audience\n\n- Blog type and user intent\n\nYou must **follow the outline strictly**. Write content **section-by-section**, based on the structure.\n\n\n# Output Format\n\n```markdown\n\n## H2: [Section Title]\n\n[Your generated content for this section \u2014 500-600 words, using keywords naturally.]\n\n",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [
|
||||
{
|
||||
"component_name": "TavilySearch",
|
||||
"name": "TavilySearch",
|
||||
"params": {
|
||||
"api_key": "",
|
||||
"days": 7,
|
||||
"exclude_domains": [],
|
||||
"include_answer": false,
|
||||
"include_domains": [],
|
||||
"include_image_descriptions": false,
|
||||
"include_images": false,
|
||||
"include_raw_content": true,
|
||||
"max_results": 5,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"query": "sys.query",
|
||||
"search_depth": "basic",
|
||||
"topic": "general"
|
||||
}
|
||||
}
|
||||
],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Body Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:EagerNailsRemain",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 889.0614605692713,
|
||||
"y": 247.00973041799065
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"description": "This is an agent for a specific task.",
|
||||
"user_prompt": "This is the order you need to send to the agent."
|
||||
},
|
||||
"label": "Tool",
|
||||
"name": "flow.tool_1"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Tool:WickedDeerHeal",
|
||||
"measured": {
|
||||
"height": 44,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 853.2006404239659,
|
||||
"y": 364.37541577229143
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "toolNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_comment": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": null,
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.5,
|
||||
"llm_id": "deepseek-chat@DeepSeek",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 5,
|
||||
"max_tokens": 4096,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"parameter": "Precise",
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.5,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "The parse and keyword agent output is {Agent:ClearRabbitsScream@content}\n\nThe Outline agent output is {Agent:BetterSitesSend@content}\n\nThe Body agent output is {Agent:EagerNailsRemain@content}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "# Role\n\nYou are the **Editor_Agent**, responsible for finalizing the blog post for both human readability and SEO effectiveness.\n\n# Goals\n\n1. Polish the entire blog content for clarity, coherence, and style.\n\n2. Improve transitions between sections, ensure logical flow.\n\n3. Verify that keywords are used appropriately and effectively.\n\n4. Conduct a lightweight SEO audit \u2014 checking keyword density, structure (H1/H2/H3), and overall searchability.\n\n\n\n# Style Guidelines\n\n- Be precise. Avoid bloated or vague language.\n\n- Maintain an informative and engaging tone, suitable to the target audience.\n\n- Do not remove keywords unless absolutely necessary for clarity.\n\n- Ensure paragraph flow and section continuity.\n\n\n# Input\n\nYou will receive:\n\n- Full blog content, written section-by-section\n\n- Original outline with suggested keywords\n\n- Target audience and writing type\n\n# Output Format\n\n```markdown\n\n[The revised, fully polished blog post content goes here.]\n\n",
|
||||
"temperature": 0.2,
|
||||
"temperatureEnabled": true,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.75,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "Editor Agent"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:LovelyHeadsOwn",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 1160.3332919804993,
|
||||
"y": 149.50806732882472
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"content": [
|
||||
"{Agent:LovelyHeadsOwn@content}"
|
||||
]
|
||||
},
|
||||
"label": "Message",
|
||||
"name": "Response"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Message:LegalBeansBet",
|
||||
"measured": {
|
||||
"height": 56,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 1370.6665839609984,
|
||||
"y": 267.0323933738015
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "messageNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "This workflow automatically generates a complete SEO-optimized blog article based on a simple user input. You don\u2019t need any writing experience. Just provide a topic or short request \u2014 the system will handle the rest.\n\nThe process includes the following key stages:\n\n1. **Understanding your topic and goals**\n2. **Designing the blog structure**\n3. **Writing high-quality content**\n\n\n"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Workflow Overall Description"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 205,
|
||||
"id": "Note:SlimyGhostsWear",
|
||||
"measured": {
|
||||
"height": 205,
|
||||
"width": 415
|
||||
},
|
||||
"position": {
|
||||
"x": -284.3143151688742,
|
||||
"y": 150.47632147913419
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 415
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "**Purpose**: \nThis agent reads the user\u2019s input and figures out what kind of blog needs to be written.\n\n**What it does**:\n- Understands the main topic you want to write about \n- Identifies who the blog is for (e.g., beginners, marketers, developers) \n- Determines the writing purpose (e.g., SEO traffic, product promotion, education) \n- Suggests 3\u20135 long-tail SEO keywords related to the topic"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Parse And Keyword Agent"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 152,
|
||||
"id": "Note:EmptyChairsShake",
|
||||
"measured": {
|
||||
"height": 152,
|
||||
"width": 340
|
||||
},
|
||||
"position": {
|
||||
"x": 295.04147626768133,
|
||||
"y": 372.2755718118446
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 340
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "**Purpose**: \nThis agent builds the blog structure \u2014 just like writing a table of contents before you start writing the full article.\n\n**What it does**:\n- Suggests a clear blog title that includes important keywords \n- Breaks the article into sections using H2 and H3 headings (like a professional blog layout) \n- Assigns 1\u20132 recommended keywords to each section to help with SEO \n- Follows the writing goal and target audience set in the previous step"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Outline Agent"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 146,
|
||||
"id": "Note:TallMelonsNotice",
|
||||
"measured": {
|
||||
"height": 146,
|
||||
"width": 343
|
||||
},
|
||||
"position": {
|
||||
"x": 598.5644991893463,
|
||||
"y": 5.801054564756448
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 343
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "**Purpose**: \nThis agent is responsible for writing the actual content of the blog \u2014 paragraph by paragraph \u2014 based on the outline created earlier.\n\n**What it does**:\n- Looks at each H2/H3 section in the outline \n- Writes 150\u2013220 words of clear, helpful, and well-structured content per section \n- Includes the suggested SEO keywords naturally (not keyword stuffing) \n- Uses real examples or facts if needed (by calling a web search tool like Tavily)"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Body Agent"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 137,
|
||||
"id": "Note:RipeCougarsBuild",
|
||||
"measured": {
|
||||
"height": 137,
|
||||
"width": 319
|
||||
},
|
||||
"position": {
|
||||
"x": 860.4854129814981,
|
||||
"y": 427.2196835690842
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 319
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "**Purpose**: \nThis agent reviews the entire blog draft to make sure it is smooth, professional, and SEO-friendly. It acts like a human editor before publishing.\n\n**What it does**:\n- Polishes the writing: improves sentence clarity, fixes awkward phrasing \n- Makes sure the content flows well from one section to the next \n- Double-checks keyword usage: are they present, natural, and not overused? \n- Verifies the blog structure (H1, H2, H3 headings) is correct \n- Adds two key SEO elements:\n - **Meta Title** (shows up in search results)\n - **Meta Description** (summary for Google and social sharing)"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Editor Agent"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"height": 146,
|
||||
"id": "Note:OpenTurkeysSell",
|
||||
"measured": {
|
||||
"height": 146,
|
||||
"width": 320
|
||||
},
|
||||
"position": {
|
||||
"x": 1129,
|
||||
"y": -30
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 320
|
||||
}
|
||||
]
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"retrieval": []
|
||||
},
|
||||
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAAwADADASIAAhEBAxEB/8QAGQAAAwEBAQAAAAAAAAAAAAAABgkKBwUI/8QAMBAAAAYCAQIEBQQCAwAAAAAAAQIDBAUGBxEhCAkAEjFBFFFhcaETFiKRFyOx8PH/xAAaAQACAwEBAAAAAAAAAAAAAAACAwABBgQF/8QALBEAAgIBAgUCBAcAAAAAAAAAAQIDBBEFEgATITFRIkEGIzJhFBUWgaGx8P/aAAwDAQACEQMRAD8AfF2hez9089t7pvxgQMa1Gb6qZ6oQE9m/NEvCIStyPfJSOF/M1epzMugo/qtMqbiRc1mJjoJKCLMNIxKcsLJedfO1Ct9cI63x9fx6CA/19t+oh4LFA5HfuAgP/A8eOIsnsTBrkBHXA7+v53+Q+ficTgJft9gIgA+/P9/1r342O/YA8A8k3/if+IbAN7+2/f8AAiI6H19PGoPyESTMZQPKUAHkQEN+3r9dh78/YPGUTk2wb/qAZZIugH1OHH5DjkdfbnWw2DsOxPj+xjrnx2H39unBopJGBn9s+PHv1HXjPJtH+J+B40O9a16h/wB/92j/ALrPa/wR104UyAobHlXhuo2HrEtK4qy3CwjKOuJLRHJLSkXWrFKs/gVrJVrE8TUiH8bPrP20UEu8m4hNpMJJuTOfnbUw/kUqyZgMHGjAO9+mtDsQ53sdcB6eMhnpEjhNQxRKICAgHy5+/roOdjr7c+J6O4x07dx484/n7nzw1gexBGfIPkZ/3t39uGpqc6+fP5/Ht8vGFZCzJjWpWuBxvO2yPjrtclUUK7BqmUI4fuASeyhG5FzFI0Bw4aQ0iZNoDgzvRW4qtyFkI4XmwyEk2YNnDp0sVBu3IUyy5iqH8gqKERSIRNIii67hddRJs1at01Xbx2sgzZoLu10UFJR+4V1A5cxF3FqNcLvjwcno43uuLrOxZYjujaClcb4QQfxEizpFiQyM9olcueRnjC2ZMt9iY06zL0qytrMSqSOVGsfHMaGhZ3l4lSRI2MqE74zJvRTveNFWWIh3RWw+XCAM5icKQLrCH57T17FhErSlRXnWvyZXKQwWJ3eraD14p5YuZCFgacskK2oGkVuKO5GYTHzf7DaD12cBD3DgPOIDrWw9PnrXPgDkpVsUDGMG+DD6E9gHXIjrYjwUPQTCXYgHPhIV974+F6E1hpC14Yzmzj56YaQEeZhXsayD1zLPW7pygxaMf81Nzu1iJsnIuDIKnaJAkPldqrHaoORZ73tMVEbFdSXT9nVgRQgnBq6j8e/HCIEATpAnH5KlmRVkFRFJwks/bqImSXJ5VFyA3N6Ikh3bCW3YHp5cowOmCfTgA+xJCnrjtwHKcLvJj2ZGcTRFj19kEhckdzgEjKnABGSSzdc1Fe5byXXGNjKdvRcw5NxvLidNZFFCxUa62KrzMaChw8hhYScFJtROAgmuLByq1MsgkZYPaVVuDe0wraRaqAdJwgRQo+YR8xTlAQNx6b49w41vXiJpCalLh1jZhyrTqRM4+jstdRmYryNkydLQRWg1LNGcWd5jIFFvCythlIySa0mNu74sKRQtaWsTmupqPItw0lE52ufpyYzrSkx6cw5bLmBEpkTsz+dt8P5QFuCRtAIkBH9MuwKHICIaDQhnojMs9mKaeGcrMxXlQtAYkdVljimRrE5MqI4zL8oSqQ6wxjodBqK05qdK3Vo3aCSVkBW7bjuC1NFJJBPaqyx6fp6pWkliYLXK2XrukkRu2CCVoSWMgsdMyySKwoLFcIGWSTUMg4IBgTcICoBhRcplMcpFkhIqQp1ClMBTmA0Zfe1zpjvHfXff65bZlzXpB3jjGTgiirmPjAfs16PHqHeQ75Wbj3xxZpOEkV3LRJJSPdomUBZISJLncV2k+8D07dxXp7xsYuTapA9UkJUYWIzNhadnWEZeCXGLQQiJi1ViHfhHL2unWh+mlORsrW0JFpEFnGVfm1mU4kq0FY3eD6corJncv6dr5NLSMNXVaTUksjTiMnaq8uFfSVuDyiJ1iZpy0LOJtpa3YfkcQ5fdozyxI2m5qqcrHN61YYmHsh6v3o9ParYmYJEtlhIx6+gUbjgD23M6oqg92YL0JyF6Bps+qDValVA9h9Lj5SZI3SHXdEQlj1wiQtLLIe6pGzjO3BlBkK1hxpblLVH5wdW0BcFKf/JwRtjsot2z8omaSdxbzzk1iEjsE0AM9rrRZNRIrVyo7dGO6E+oh8axLlJ5H5VaJKx7ePRGFbW6vUeFfHQIWPTI9Tm7HHfuhqY7E6C7JFqUzM6iZXIoncNxX7+bIVdJnTT48x3OQU1krIDW3UeixVhyISzYz6cadY5Xph6TseRNTRsTElzzBn9Vlly0TAERsdgnMYyLROjyFbg5R4ZlsGaMT4yNi2Zlq1GwjZB3jq0PsaJfA3t0jL0W0Y9xf1V41lpWckXMLaZiwxuKYPqc6LlHdkeRF+Qxswx5ASDqBVrsL+2A/N6SiCbYymV2BywJiMZj3GRRMTnL+lVyHCll3R7Szv0vqXMtQ74T+HijljIScLaEpkKCB3rqMBIi0jPs5JeOKTZMZEi5VVnouzy0k3jXjWSMlY6UcVGDxlKMVDqx91SILWSi3D2KdgYy3kP8E9X/AE1SnRXBNdNRMlefT6g7aY6giK+cPLGNg0bY68rcnpsNh9PqIBve/EcPQ3WIq2dR93xpSgk5SAZ9R6MLAOZFUkpLSUDXp6/KPpGUkmTdswlnKnwbl5ITMdGwcXJi7LKsqzUmT5tWYmkXuF9wjBvb76b7dHheazJ9RElUJOCxViuMlUJC0Gtz6PKyjLBY4qMWUe12r1xZ6lOyT6XPEBKN2CkTDOlZd02TBdTMt7Upx2knrkdCv1UKjDKn1A7XBYH6SCOOrWn5Oi/DtRiu+GleRthDL8rXdVjZlcfWrSIxVlGGGCOnH//Z"
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
716
agent/templates/sql_assistant.json
Normal file
716
agent/templates/sql_assistant.json
Normal file
@ -0,0 +1,716 @@
|
||||
{
|
||||
"id": 17,
|
||||
"title": {
|
||||
"en": "SQL Assistant",
|
||||
"zh": "SQL助理"},
|
||||
"description": {
|
||||
"en": "SQL Assistant is an AI-powered tool that lets business users turn plain-English questions into fully formed SQL queries. Simply type your question (e.g., “Show me last quarter’s top 10 products by revenue”) and SQL Assistant generates the exact SQL, runs it against your database, and returns the results in seconds. ",
|
||||
"zh": "用户能够将简单文本问题转化为完整的SQL查询并输出结果。只需输入您的问题(例如,“展示上个季度前十名按收入排序的产品”),SQL助理就会生成精确的SQL语句,对其运行您的数据库,并几秒钟内返回结果。"},
|
||||
"canvas_type": "Marketing",
|
||||
"dsl": {
|
||||
"components": {
|
||||
"Agent:WickedGoatsDivide": {
|
||||
"downstream": [
|
||||
"ExeSQL:TiredShirtsPull"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Agent",
|
||||
"params": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": "",
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "qwen-max@Tongyi-Qianwen",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 5,
|
||||
"max_tokens": 256,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.4,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "User's query: {sys.query}\n\nSchema: {Retrieval:HappyTiesFilm@formalized_content}\n\nSamples about question to SQL: {Retrieval:SmartNewsHammer@formalized_content}\n\nDescription about meanings of tables and files: {Retrieval:SweetDancersAppear@formalized_content}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "### ROLE\nYou are a Text-to-SQL assistant. \nGiven a relational database schema and a natural-language request, you must produce a **single, syntactically-correct MySQL query** that answers the request. \nReturn **nothing except the SQL statement itself**\u2014no code fences, no commentary, no explanations, no comments, no trailing semicolon if not required.\n\n\n### EXAMPLES \n-- Example 1 \nUser: List every product name and its unit price. \nSQL:\nSELECT name, unit_price FROM Products;\n\n-- Example 2 \nUser: Show the names and emails of customers who placed orders in January 2025. \nSQL:\nSELECT DISTINCT c.name, c.email\nFROM Customers c\nJOIN Orders o ON o.customer_id = c.id\nWHERE o.order_date BETWEEN '2025-01-01' AND '2025-01-31';\n\n-- Example 3 \nUser: How many orders have a status of \"Completed\" for each month in 2024? \nSQL:\nSELECT DATE_FORMAT(order_date, '%Y-%m') AS month,\n COUNT(*) AS completed_orders\nFROM Orders\nWHERE status = 'Completed'\n AND YEAR(order_date) = 2024\nGROUP BY month\nORDER BY month;\n\n-- Example 4 \nUser: Which products generated at least \\$10 000 in total revenue? \nSQL:\nSELECT p.id, p.name, SUM(oi.quantity * oi.unit_price) AS revenue\nFROM Products p\nJOIN OrderItems oi ON oi.product_id = p.id\nGROUP BY p.id, p.name\nHAVING revenue >= 10000\nORDER BY revenue DESC;\n\n\n### OUTPUT GUIDELINES\n1. Think through the schema and the request. \n2. Write **only** the final MySQL query. \n3. Do **not** wrap the query in back-ticks or markdown fences. \n4. Do **not** add explanations, comments, or additional text\u2014just the SQL.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": false,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.3,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Retrieval:HappyTiesFilm",
|
||||
"Retrieval:SmartNewsHammer",
|
||||
"Retrieval:SweetDancersAppear"
|
||||
]
|
||||
},
|
||||
"ExeSQL:TiredShirtsPull": {
|
||||
"downstream": [
|
||||
"Message:ShaggyMasksAttend"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "ExeSQL",
|
||||
"params": {
|
||||
"database": "",
|
||||
"db_type": "mysql",
|
||||
"host": "",
|
||||
"max_records": 1024,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"password": "20010812Yy!",
|
||||
"port": 3306,
|
||||
"sql": "{Agent:WickedGoatsDivide@content}",
|
||||
"username": "13637682833@163.com"
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"Agent:WickedGoatsDivide"
|
||||
]
|
||||
},
|
||||
"Message:ShaggyMasksAttend": {
|
||||
"downstream": [],
|
||||
"obj": {
|
||||
"component_name": "Message",
|
||||
"params": {
|
||||
"content": [
|
||||
"{ExeSQL:TiredShirtsPull@formalized_content}"
|
||||
]
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"ExeSQL:TiredShirtsPull"
|
||||
]
|
||||
},
|
||||
"Retrieval:HappyTiesFilm": {
|
||||
"downstream": [
|
||||
"Agent:WickedGoatsDivide"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"cross_languages": [],
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"query": "{sys.query}",
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"Retrieval:SmartNewsHammer": {
|
||||
"downstream": [
|
||||
"Agent:WickedGoatsDivide"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"cross_languages": [],
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"query": "{sys.query}",
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"Retrieval:SweetDancersAppear": {
|
||||
"downstream": [
|
||||
"Agent:WickedGoatsDivide"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Retrieval",
|
||||
"params": {
|
||||
"cross_languages": [],
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"query": "{sys.query}",
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
}
|
||||
},
|
||||
"upstream": [
|
||||
"begin"
|
||||
]
|
||||
},
|
||||
"begin": {
|
||||
"downstream": [
|
||||
"Retrieval:HappyTiesFilm",
|
||||
"Retrieval:SmartNewsHammer",
|
||||
"Retrieval:SweetDancersAppear"
|
||||
],
|
||||
"obj": {
|
||||
"component_name": "Begin",
|
||||
"params": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "Hi! I'm your SQL assistant. What can I do for you?"
|
||||
}
|
||||
},
|
||||
"upstream": []
|
||||
}
|
||||
},
|
||||
"globals": {
|
||||
"sys.conversation_turns": 0,
|
||||
"sys.files": [],
|
||||
"sys.query": "",
|
||||
"sys.user_id": ""
|
||||
},
|
||||
"graph": {
|
||||
"edges": [
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__beginstart-Retrieval:HappyTiesFilmend",
|
||||
"source": "begin",
|
||||
"sourceHandle": "start",
|
||||
"target": "Retrieval:HappyTiesFilm",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"id": "xy-edge__beginstart-Retrieval:SmartNewsHammerend",
|
||||
"source": "begin",
|
||||
"sourceHandle": "start",
|
||||
"target": "Retrieval:SmartNewsHammer",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__beginstart-Retrieval:SweetDancersAppearend",
|
||||
"source": "begin",
|
||||
"sourceHandle": "start",
|
||||
"target": "Retrieval:SweetDancersAppear",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Retrieval:HappyTiesFilmstart-Agent:WickedGoatsDivideend",
|
||||
"source": "Retrieval:HappyTiesFilm",
|
||||
"sourceHandle": "start",
|
||||
"target": "Agent:WickedGoatsDivide",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Retrieval:SmartNewsHammerstart-Agent:WickedGoatsDivideend",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:SmartNewsHammer",
|
||||
"sourceHandle": "start",
|
||||
"style": {
|
||||
"stroke": "rgba(91, 93, 106, 1)",
|
||||
"strokeWidth": 1
|
||||
},
|
||||
"target": "Agent:WickedGoatsDivide",
|
||||
"targetHandle": "end",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Retrieval:SweetDancersAppearstart-Agent:WickedGoatsDivideend",
|
||||
"markerEnd": "logo",
|
||||
"source": "Retrieval:SweetDancersAppear",
|
||||
"sourceHandle": "start",
|
||||
"style": {
|
||||
"stroke": "rgba(91, 93, 106, 1)",
|
||||
"strokeWidth": 1
|
||||
},
|
||||
"target": "Agent:WickedGoatsDivide",
|
||||
"targetHandle": "end",
|
||||
"type": "buttonEdge",
|
||||
"zIndex": 1001
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__Agent:WickedGoatsDividestart-ExeSQL:TiredShirtsPullend",
|
||||
"source": "Agent:WickedGoatsDivide",
|
||||
"sourceHandle": "start",
|
||||
"target": "ExeSQL:TiredShirtsPull",
|
||||
"targetHandle": "end"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"isHovered": false
|
||||
},
|
||||
"id": "xy-edge__ExeSQL:TiredShirtsPullstart-Message:ShaggyMasksAttendend",
|
||||
"source": "ExeSQL:TiredShirtsPull",
|
||||
"sourceHandle": "start",
|
||||
"target": "Message:ShaggyMasksAttend",
|
||||
"targetHandle": "end"
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"enablePrologue": true,
|
||||
"inputs": {},
|
||||
"mode": "conversational",
|
||||
"prologue": "Hi! I'm your SQL assistant. What can I do for you?"
|
||||
},
|
||||
"label": "Begin",
|
||||
"name": "begin"
|
||||
},
|
||||
"id": "begin",
|
||||
"measured": {
|
||||
"height": 48,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 50,
|
||||
"y": 200
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "left",
|
||||
"targetPosition": "right",
|
||||
"type": "beginNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cross_languages": [],
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"query": "{sys.query}",
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
},
|
||||
"label": "Retrieval",
|
||||
"name": "Schema"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Retrieval:HappyTiesFilm",
|
||||
"measured": {
|
||||
"height": 96,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 414,
|
||||
"y": 20.5
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "retrievalNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cross_languages": [],
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"query": "{sys.query}",
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
},
|
||||
"label": "Retrieval",
|
||||
"name": "Question to SQL"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Retrieval:SmartNewsHammer",
|
||||
"measured": {
|
||||
"height": 96,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 406.5,
|
||||
"y": 175.5
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "retrievalNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"cross_languages": [],
|
||||
"empty_response": "",
|
||||
"kb_ids": [],
|
||||
"keywords_similarity_weight": 0.7,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"query": "{sys.query}",
|
||||
"rerank_id": "",
|
||||
"similarity_threshold": 0.2,
|
||||
"top_k": 1024,
|
||||
"top_n": 8,
|
||||
"use_kg": false
|
||||
},
|
||||
"label": "Retrieval",
|
||||
"name": "Database Description"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Retrieval:SweetDancersAppear",
|
||||
"measured": {
|
||||
"height": 96,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 403.5,
|
||||
"y": 328
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "retrievalNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"delay_after_error": 1,
|
||||
"description": "",
|
||||
"exception_default_value": "",
|
||||
"exception_goto": [],
|
||||
"exception_method": "",
|
||||
"frequencyPenaltyEnabled": false,
|
||||
"frequency_penalty": 0.7,
|
||||
"llm_id": "qwen-max@Tongyi-Qianwen",
|
||||
"maxTokensEnabled": false,
|
||||
"max_retries": 3,
|
||||
"max_rounds": 5,
|
||||
"max_tokens": 256,
|
||||
"mcp": [],
|
||||
"message_history_window_size": 12,
|
||||
"outputs": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
}
|
||||
},
|
||||
"presencePenaltyEnabled": false,
|
||||
"presence_penalty": 0.4,
|
||||
"prompts": [
|
||||
{
|
||||
"content": "User's query: {sys.query}\n\nSchema: {Retrieval:HappyTiesFilm@formalized_content}\n\nSamples about question to SQL: {Retrieval:SmartNewsHammer@formalized_content}\n\nDescription about meanings of tables and files: {Retrieval:SweetDancersAppear@formalized_content}",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"sys_prompt": "### ROLE\nYou are a Text-to-SQL assistant. \nGiven a relational database schema and a natural-language request, you must produce a **single, syntactically-correct MySQL query** that answers the request. \nReturn **nothing except the SQL statement itself**\u2014no code fences, no commentary, no explanations, no comments, no trailing semicolon if not required.\n\n\n### EXAMPLES \n-- Example 1 \nUser: List every product name and its unit price. \nSQL:\nSELECT name, unit_price FROM Products;\n\n-- Example 2 \nUser: Show the names and emails of customers who placed orders in January 2025. \nSQL:\nSELECT DISTINCT c.name, c.email\nFROM Customers c\nJOIN Orders o ON o.customer_id = c.id\nWHERE o.order_date BETWEEN '2025-01-01' AND '2025-01-31';\n\n-- Example 3 \nUser: How many orders have a status of \"Completed\" for each month in 2024? \nSQL:\nSELECT DATE_FORMAT(order_date, '%Y-%m') AS month,\n COUNT(*) AS completed_orders\nFROM Orders\nWHERE status = 'Completed'\n AND YEAR(order_date) = 2024\nGROUP BY month\nORDER BY month;\n\n-- Example 4 \nUser: Which products generated at least \\$10 000 in total revenue? \nSQL:\nSELECT p.id, p.name, SUM(oi.quantity * oi.unit_price) AS revenue\nFROM Products p\nJOIN OrderItems oi ON oi.product_id = p.id\nGROUP BY p.id, p.name\nHAVING revenue >= 10000\nORDER BY revenue DESC;\n\n\n### OUTPUT GUIDELINES\n1. Think through the schema and the request. \n2. Write **only** the final MySQL query. \n3. Do **not** wrap the query in back-ticks or markdown fences. \n4. Do **not** add explanations, comments, or additional text\u2014just the SQL.",
|
||||
"temperature": 0.1,
|
||||
"temperatureEnabled": false,
|
||||
"tools": [],
|
||||
"topPEnabled": false,
|
||||
"top_p": 0.3,
|
||||
"user_prompt": "",
|
||||
"visual_files_var": ""
|
||||
},
|
||||
"label": "Agent",
|
||||
"name": "SQL Generator "
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Agent:WickedGoatsDivide",
|
||||
"measured": {
|
||||
"height": 84,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 981,
|
||||
"y": 174
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "agentNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"database": "",
|
||||
"db_type": "mysql",
|
||||
"host": "",
|
||||
"max_records": 1024,
|
||||
"outputs": {
|
||||
"formalized_content": {
|
||||
"type": "string",
|
||||
"value": ""
|
||||
},
|
||||
"json": {
|
||||
"type": "Array<Object>",
|
||||
"value": []
|
||||
}
|
||||
},
|
||||
"password": "20010812Yy!",
|
||||
"port": 3306,
|
||||
"sql": "{Agent:WickedGoatsDivide@content}",
|
||||
"username": "13637682833@163.com"
|
||||
},
|
||||
"label": "ExeSQL",
|
||||
"name": "ExeSQL"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "ExeSQL:TiredShirtsPull",
|
||||
"measured": {
|
||||
"height": 56,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 1211.5,
|
||||
"y": 212.5
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "ragNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"content": [
|
||||
"{ExeSQL:TiredShirtsPull@formalized_content}"
|
||||
]
|
||||
},
|
||||
"label": "Message",
|
||||
"name": "Message"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "Message:ShaggyMasksAttend",
|
||||
"measured": {
|
||||
"height": 56,
|
||||
"width": 200
|
||||
},
|
||||
"position": {
|
||||
"x": 1447.3125,
|
||||
"y": 181.5
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "messageNode"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "Searches for relevant database creation statements.\n\nIt should label with a knowledgebase to which the schema is dumped in. You could use \" General \" as parsing method, \" 2 \" as chunk size and \" ; \" as delimiter."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Note Schema"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 188,
|
||||
"id": "Note:ThickClubsFloat",
|
||||
"measured": {
|
||||
"height": 188,
|
||||
"width": 392
|
||||
},
|
||||
"position": {
|
||||
"x": 689,
|
||||
"y": -180.31251144409183
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 392
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "Searches for samples about question to SQL. \n\nYou could use \" Q&A \" as parsing method.\n\nPlease check this dataset:\nhttps://huggingface.co/datasets/InfiniFlow/text2sql"
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Note: Question to SQL"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 154,
|
||||
"id": "Note:ElevenLionsJoke",
|
||||
"measured": {
|
||||
"height": 154,
|
||||
"width": 345
|
||||
},
|
||||
"position": {
|
||||
"x": 693.5,
|
||||
"y": 138
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 345
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "Searches for description about meanings of tables and fields.\n\nYou could use \" General \" as parsing method, \" 2 \" as chunk size and \" ### \" as delimiter."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Note: Database Description"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 158,
|
||||
"id": "Note:ManyRosesTrade",
|
||||
"measured": {
|
||||
"height": 158,
|
||||
"width": 408
|
||||
},
|
||||
"position": {
|
||||
"x": 691.5,
|
||||
"y": 435.69736389555317
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 408
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "The Agent learns which tables may be available based on the responses from three knowledge bases and converts the user's input into SQL statements."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Note: SQL Generator"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"height": 132,
|
||||
"id": "Note:RudeHousesInvite",
|
||||
"measured": {
|
||||
"height": 132,
|
||||
"width": 383
|
||||
},
|
||||
"position": {
|
||||
"x": 1106.9254833678003,
|
||||
"y": 290.5891036507015
|
||||
},
|
||||
"resizing": false,
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode",
|
||||
"width": 383
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"form": {
|
||||
"text": "Connect to your database to execute SQL statements."
|
||||
},
|
||||
"label": "Note",
|
||||
"name": "Note: SQL Executor"
|
||||
},
|
||||
"dragHandle": ".note-drag-handle",
|
||||
"dragging": false,
|
||||
"id": "Note:HungryBatsLay",
|
||||
"measured": {
|
||||
"height": 136,
|
||||
"width": 255
|
||||
},
|
||||
"position": {
|
||||
"x": 1185,
|
||||
"y": -30
|
||||
},
|
||||
"selected": false,
|
||||
"sourcePosition": "right",
|
||||
"targetPosition": "left",
|
||||
"type": "noteNode"
|
||||
}
|
||||
]
|
||||
},
|
||||
"history": [],
|
||||
"messages": [],
|
||||
"path": [],
|
||||
"retrieval": []
|
||||
},
|
||||
"avatar": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAcFBQYFBAcGBQYIBwcIChELCgkJChUPEAwRGBUaGRgVGBcbHichGx0lHRcYIi4iJSgpKywrGiAvMy8qMicqKyr/2wBDAQcICAoJChQLCxQqHBgcKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKir/wAARCAAwADADAREAAhEBAxEB/8QAGgAAAwEBAQEAAAAAAAAAAAAABQYHBAMAAf/EADIQAAEDAwMCBAMHBQAAAAAAAAECAwQFESEABjESEyJBUYEUYXEHFSNSkaGxMjNictH/xAAZAQADAQEBAAAAAAAAAAAAAAACAwQBAAX/xAAlEQACAgICAgEEAwAAAAAAAAABAgARAyESMQRBEyIycYFCkbH/2gAMAwEAAhEDEQA/AKHt2DGpNHXDLrZdWtSrIub39tZ5GbGwPA+pmDFkX7x7idvra85xqQaFNkxUTVIVJQzf8QpBFjbgEenNs681MnA9WJ6fEOKJoxVpSpFLTCo6KEZlTlLcQBIJS20hAv1D1ve+qPk52b0IsYuIGtyt7ZkVVNP+H3A5GdlN2u7GQUBSfmkk8cXH10tmLD6Yl0CG5qmTXBMZiQEMuvupUoKdc6UeEi4FsqOeBxrsKnv1AY+hJ2l5yfu6qQ6/UZtPDRHZ+Eldpsqz1hSrXJGLXwRxqxUQizFs7galPYUFDKT+h15oMuImspQpFiL+2i1A3A1bgxmixUgwlT8ZfgJ/y8P8HXdRuPZoxaqtfkQKbKqF03jtEoDeFKV1lNgfK4H764XfccVUgipvdiwKpFaXMLklFg4juuqV0m3Izg/MaEZCDYMScYqiJOd6xmqfUVfBJcWwtHV1Elfi87k51ViyhrsxL4ivQj1KrFZjTGjTJ8aShdyph5SUqFhwPzX9jpC0dXUqZK3ViHNq7oNaVJjz2Vw5LCrdKknpULZyfMf801MfI1e5NmpAGHUL12EZNFWWlhXSUuWHKgk3xomwEDuDhzLysySU9EndEVyIz3GmxJR+KpBIdCLlRHn/AFEjjIF9AMJlZ8gLZ/qUiJSg1Tu0HO4plFj4FC1h9NYfHIU7kwzgnqCJlKLiCO2s6hKytWiPJoFdfnLW7HS0or6bqXbjg2AI99XjAa3NPlL6jFTduOR5sd1+oyfjQMONqI7QOMA4V7/pqjHjC9SLNn56I1HiqrqTUKM0hbq2lpst5CQSST54xjSPJbICOHUhawISiRQ02T2Uq6AAkqFj/GquJQks1iEr/INLU82bploKSFXusG9xfjHofXQuQUNRoQqQT0ZwVEST5687iZWGgpDsebNbaTDfKVL/ALnbQU/UkKNhjXpFt0BJBVXe/wAGGG6YMlvvNkjlBGmKeJimHIVc0TY89akCKspT28C5BKgDyR7fvrCFI+q/1DQsvVfudYcVyKw49KU6tZyQbmwHFhrOKr9s0uz0CAIpbr3RKo1Rbh02C4HJISp2ZIz0pJ8IQk5Nr/QXznSX6NSnGAwHI/gD/TM+3vtAj1arJpcpgtPdPSH0kFt5wDxAWOOLgamIAFwijCfD927N2tGXuNxlK2W0occUhJWpR+QzzrPjc+pvyqT3Ftf2zbObf7YYecb6CrrDAGfy20wYMkA5Vjbtev7b3nEcXRela27d1ogoWi/rnQsjrqZzHdwzKoKUsqWz3mOnJUlZJt8uokD621w+RdzgynUkUpoUafPZXMnSHlrKluyX1Eug8XF7GwxbgWxrubMO5WmNRsCKtLfcY3rAU0nIltkBP+w0X8Jjdz//2Q=="
|
||||
}
|
||||
1172
agent/templates/stock_research_report.json
Normal file
1172
agent/templates/stock_research_report.json
Normal file
File diff suppressed because one or more lines are too long
335
agent/templates/technical_docs_qa.json
Normal file
335
agent/templates/technical_docs_qa.json
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
369
agent/templates/title_chunker.json
Normal file
369
agent/templates/title_chunker.json
Normal file
File diff suppressed because one or more lines are too long
689
agent/templates/trip_planner.json
Normal file
689
agent/templates/trip_planner.json
Normal file
File diff suppressed because one or more lines are too long
875
agent/templates/web_search_assistant.json
Normal file
875
agent/templates/web_search_assistant.json
Normal file
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user