mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Feat: Support tool calling in Generate component (#7572)
### What problem does this PR solve? Hello, our use case requires LLM agent to invoke some tools, so I made a simple implementation here. This PR does two things: 1. A simple plugin mechanism based on `pluginlib`: This mechanism lives in the `plugin` directory. It will only load plugins from `plugin/embedded_plugins` for now. A sample plugin `bad_calculator.py` is placed in `plugin/embedded_plugins/llm_tools`, it accepts two numbers `a` and `b`, then give a wrong result `a + b + 100`. In the future, it can load plugins from external location with little code change. Plugins are divided into different types. The only plugin type supported in this PR is `llm_tools`, which must implement the `LLMToolPlugin` class in the `plugin/llm_tool_plugin.py`. More plugin types can be added in the future. 2. A tool selector in the `Generate` component: Added a tool selector to select one or more tools for LLM:  And with the `bad_calculator` tool, it results this with the `qwen-max` model:  ### Type of change - [ ] Bug Fix (non-breaking change which fixes an issue) - [x] New Feature (non-breaking change which adds functionality) - [ ] Documentation Update - [ ] Refactoring - [ ] Performance Improvement - [ ] Other (please describe): Co-authored-by: Yingfeng <yingfeng.zhang@gmail.com>
This commit is contained in:
@ -16,15 +16,29 @@
|
||||
import json
|
||||
import re
|
||||
from functools import partial
|
||||
from typing import Any
|
||||
import pandas as pd
|
||||
from api.db import LLMType
|
||||
from api.db.services.conversation_service import structure_answer
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from agent.component.base import ComponentBase, ComponentParamBase
|
||||
from plugin import GlobalPluginManager
|
||||
from plugin.llm_tool_plugin import llm_tool_metadata_to_openai_tool
|
||||
from rag.llm.chat_model import ToolCallSession
|
||||
from rag.prompts import message_fit_in
|
||||
|
||||
|
||||
class LLMToolPluginCallSession(ToolCallSession):
|
||||
def tool_call(self, name: str, arguments: dict[str, Any]) -> str:
|
||||
tool = GlobalPluginManager.get_llm_tool_by_name(name)
|
||||
|
||||
if tool is None:
|
||||
raise ValueError(f"LLM tool {name} does not exist")
|
||||
|
||||
return tool().invoke(**arguments)
|
||||
|
||||
|
||||
class GenerateParam(ComponentParamBase):
|
||||
"""
|
||||
Define the Generate component parameters.
|
||||
@ -41,6 +55,7 @@ class GenerateParam(ComponentParamBase):
|
||||
self.frequency_penalty = 0
|
||||
self.cite = True
|
||||
self.parameters = []
|
||||
self.llm_enabled_tools = []
|
||||
|
||||
def check(self):
|
||||
self.check_decimal_float(self.temperature, "[Generate] Temperature")
|
||||
@ -133,6 +148,15 @@ class Generate(ComponentBase):
|
||||
|
||||
def _run(self, history, **kwargs):
|
||||
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
||||
|
||||
if len(self._param.llm_enabled_tools) > 0:
|
||||
tools = GlobalPluginManager.get_llm_tools_by_names(self._param.llm_enabled_tools)
|
||||
|
||||
chat_mdl.bind_tools(
|
||||
LLMToolPluginCallSession(),
|
||||
[llm_tool_metadata_to_openai_tool(t.get_metadata()) for t in tools]
|
||||
)
|
||||
|
||||
prompt = self._param.prompt
|
||||
|
||||
retrieval_res = []
|
||||
|
||||
Reference in New Issue
Block a user