Compare commits

...

9 Commits

Author SHA1 Message Date
2a88ce6be1 Fix: terminate onnx inference session manually (#10076)
### What problem does this PR solve?

terminate onnx inference session and release memory manually.

Issue #5050 
Issue #9992 
Issue #8805

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-09-12 17:18:26 +08:00
664b781d62 Feat: Translate the fields of the embedded dialog box on the agent page #3221 (#10072)
### What problem does this PR solve?

Feat: Translate the fields of the embedded dialog box on the agent page
#3221
### Type of change


- [x] New Feature (non-breaking change which adds functionality)
2025-09-12 16:01:12 +08:00
65571e5254 Feat: dataflow supports text (#10058)
### What problem does this PR solve?

dataflow supports text.

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
2025-09-11 19:03:51 +08:00
aa30f20730 Feat: Agent component support inserting variables(#10048) (#10055)
### What problem does this PR solve?

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
2025-09-11 19:03:19 +08:00
b9b278d441 Docs: How to connect to an MCP server as a client (#10043)
### What problem does this PR solve?

#9769 

### Type of change


- [x] Documentation Update
2025-09-11 19:02:50 +08:00
e1d86cfee3 Feat: add TokenPony model provider (#9932)
### What problem does this PR solve?

Add TokenPony as a LLM provider

Co-authored-by: huangzl <huangzl@shinemo.com>
2025-09-11 17:25:31 +08:00
8ebd07337f The chat dialog box cannot be fully displayed on a small screen #10034 (#10049)
### What problem does this PR solve?

The chat dialog box cannot be fully displayed on a small screen #10034

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-09-11 13:32:23 +08:00
dd584d57b0 Fix: Hide dataflow related functions #9869 (#10045)
### What problem does this PR solve?

Fix: Hide dataflow related functions #9869

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-09-11 12:02:26 +08:00
3d39b96c6f Fix: token num exceed (#10046)
### What problem does this PR solve?

fix text input exceed token num limit when using siliconflow's embedding
model BAAI/bge-large-zh-v1.5 and BAAI/bge-large-en-v1.5, truncate before
input.

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-09-11 12:02:12 +08:00
22 changed files with 401 additions and 59 deletions

View File

@ -219,6 +219,70 @@
} }
] ]
}, },
{
"name": "TokenPony",
"logo": "",
"tags": "LLM",
"status": "1",
"llm": [
{
"llm_name": "qwen3-8b",
"tags": "LLM,CHAT,131k",
"max_tokens": 131000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "deepseek-v3-0324",
"tags": "LLM,CHAT,128k",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "qwen3-32b",
"tags": "LLM,CHAT,131k",
"max_tokens": 131000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "kimi-k2-instruct",
"tags": "LLM,CHAT,128K",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "deepseek-r1-0528",
"tags": "LLM,CHAT,164k",
"max_tokens": 164000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "qwen3-coder-480b",
"tags": "LLM,CHAT,1024k",
"max_tokens": 1024000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "glm-4.5",
"tags": "LLM,CHAT,131K",
"max_tokens": 131000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "deepseek-v3.1",
"tags": "LLM,CHAT,128k",
"max_tokens": 128000,
"model_type": "chat",
"is_tools": true
}
]
},
{ {
"name": "Tongyi-Qianwen", "name": "Tongyi-Qianwen",
"logo": "", "logo": "",

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
import gc
import logging import logging
import copy import copy
import time import time
@ -348,6 +348,12 @@ class TextRecognizer:
return img return img
def close(self):
# close session and release manually
logging.info('Close TextRecognizer.')
del self.predictor
gc.collect()
def __call__(self, img_list): def __call__(self, img_list):
img_num = len(img_list) img_num = len(img_list)
# Calculate the aspect ratio of all text bars # Calculate the aspect ratio of all text bars
@ -395,6 +401,9 @@ class TextRecognizer:
return rec_res, time.time() - st return rec_res, time.time() - st
def __del__(self):
self.close()
class TextDetector: class TextDetector:
def __init__(self, model_dir, device_id: int | None = None): def __init__(self, model_dir, device_id: int | None = None):
@ -479,6 +488,11 @@ class TextDetector:
dt_boxes = np.array(dt_boxes_new) dt_boxes = np.array(dt_boxes_new)
return dt_boxes return dt_boxes
def close(self):
logging.info("Close TextDetector.")
del self.predictor
gc.collect()
def __call__(self, img): def __call__(self, img):
ori_im = img.copy() ori_im = img.copy()
data = {'image': img} data = {'image': img}
@ -508,6 +522,9 @@ class TextDetector:
return dt_boxes, time.time() - st return dt_boxes, time.time() - st
def __del__(self):
self.close()
class OCR: class OCR:
def __init__(self, model_dir=None): def __init__(self, model_dir=None):

View File

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# #
import gc
import logging import logging
import os import os
import math import math
@ -406,6 +406,11 @@ class Recognizer:
"score": float(scores[i]) "score": float(scores[i])
} for i in indices] } for i in indices]
def close(self):
logging.info("Close recognizer.")
del self.ort_sess
gc.collect()
def __call__(self, image_list, thr=0.7, batch_size=16): def __call__(self, image_list, thr=0.7, batch_size=16):
res = [] res = []
images = [] images = []
@ -430,5 +435,7 @@ class Recognizer:
return res return res
def __del__(self):
self.close()

View File

@ -26,6 +26,84 @@ An **Agent** component is essential when you need the LLM to assist with summari
2. If your Agent involves dataset retrieval, ensure you [have properly configured your target knowledge base(s)](../../dataset/configure_knowledge_base.md). 2. If your Agent involves dataset retrieval, ensure you [have properly configured your target knowledge base(s)](../../dataset/configure_knowledge_base.md).
## Quickstart
### 1. Click on an **Agent** component to show its configuration panel
The corresponding configuration panel appears to the right of the canvas. Use this panel to define and fine-tune the **Agent** component's behavior.
### 2. Select your model
Click **Model**, and select a chat model from the dropdown menu.
:::tip NOTE
If no model appears, check if your have added a chat model on the **Model providers** page.
:::
### 3. Update system prompt (Optional)
The system prompt typically defines your model's role. You can either keep the system prompt as is or customize it to override the default.
### 4. Update user prompt
The user prompt typically defines your model's task. You will find the `sys.query` variable auto-populated. Type `/` or click **(x)** to view or add variables.
In this quickstart, we assume your **Agent** component is used standalone (without tools or sub-Agents below), then you may also need to specify retrieved chunks using the `formalized_content` variable:
![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/standalone_user_prompt_variable.jpg)
### 5. Skip Tools and Agent
The **+ Add tools** and **+ Add agent** sections are used *only* when you need to configure your **Agent** component as a planner (with tools or sub-Agents beneath). In this quickstart, we assume your **Agent** component is used standalone (without tools or sub-Agents beneath).
### 6. Choose the next component
When necessary, click the **+** button on the **Agent** component to choose the next component in the worflow from the dropdown list.
## Connect to an MCP server as a client
:::danger IMPORTANT
In this section, we assume your **Agent** will be configured as a planner, with a Tavily tool beneath it.
:::
### 1. Navigate to the MCP configuration page
![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/mcp_page.jpg)
### 2. Configure your Tavily MCP server
Update your MCP server's name, URL (including the API key), server type, and other necessary settings. When configured correctly, the available tools will be displayed.
![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/edit_mcp_server.jpg)
### 3. Navigate to your Agent's editing page
### 4. Connect to your MCP server
1. Click **+ Add tools**:
![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/add_tools.jpg)
2. Click **MCP** to show the available MCP servers.
3. Select your MCP server:
*The target MCP server appears below your Agent component, and your Agent will autonomously decide when to invoke the available tools it offers.*
![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/choose_tavily_mcp_server.jpg)
### 5. Update system prompt to specify trigger conditions (Optional)
To ensure reliable tool calls, you may specify within the system prompt which tasks should trigger each tool call.
### 6. View the availabe tools of your MCP server
On the canvas, click the newly-populated Tavily server to view and select its available tools:
![](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/tavily_mcp_server.jpg)
## Configurations ## Configurations
### Model ### Model
@ -69,7 +147,7 @@ An **Agent** component relies on keys (variables) to specify its data inputs. It
#### Advanced usage #### Advanced usage
From v0.20.5 onwards, four framework-level prompt blocks are available in the **System prompt** field. Type `/` or click **(x)** to view them; they appear under the **Framework** entry in the dropdown menu. From v0.20.5 onwards, four framework-level prompt blocks are available in the **System prompt** field, enabling you to customize and *override* prompts at the framework level. Type `/` or click **(x)** to view them; they appear under the **Framework** entry in the dropdown menu.
- `task_analysis` prompt block - `task_analysis` prompt block
- This block is responsible for analyzing tasks — either a user task or a task assigned by the lead Agent when the **Agent** component is acting as a Sub-Agent. - This block is responsible for analyzing tasks — either a user task or a task assigned by the lead Agent when the **Agent** component is acting as a Sub-Agent.
@ -100,6 +178,12 @@ From v0.20.5 onwards, four framework-level prompt blocks are available in the **
- `citation_guidelines` prompt block - `citation_guidelines` prompt block
- Reference design: [citation_prompt.md](https://github.com/infiniflow/ragflow/blob/main/rag/prompts/citation_prompt.md) - Reference design: [citation_prompt.md](https://github.com/infiniflow/ragflow/blob/main/rag/prompts/citation_prompt.md)
*The screenshots below show the framework prompt blocks available to an **Agent** component, both as a standalone and as a planner (with a Tavily tool below):*
![standalone](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/standalone_agent_framework_block.jpg)
![planner](https://raw.githubusercontent.com/infiniflow/ragflow-docs/main/images/planner_agent_framework_blocks.jpg)
### User prompt ### User prompt
The user-defined prompt. Defaults to `sys.query`, the user query. As a general rule, when using the **Agent** component as a standalone module (not as a planner), you usually need to specify the corresponding **Retrieval** components output variable (`formalized_content`) here as part of the input to the LLM. The user-defined prompt. Defaults to `sys.query`, the user query. As a general rule, when using the **Agent** component as a standalone module (not as a planner), you usually need to specify the corresponding **Retrieval** components output variable (`formalized_content`) here as part of the input to the LLM.
@ -129,7 +213,7 @@ Defines the maximum number of attempts the agent will make to retry a failed tas
The waiting period in seconds that the agent observes before retrying a failed task, helping to prevent immediate repeated attempts and allowing system conditions to improve. Defaults to 1 second. The waiting period in seconds that the agent observes before retrying a failed task, helping to prevent immediate repeated attempts and allowing system conditions to improve. Defaults to 1 second.
### Max rounds ### Max reflection rounds
Defines the maximum number reflection rounds of the selected chat model. Defaults to 1 round. Defines the maximum number reflection rounds of the selected chat model. Defaults to 1 round.

View File

@ -977,7 +977,7 @@ The languages that should be translated into, in order to achieve keywords retri
##### metadata_condition: `dict` ##### metadata_condition: `dict`
filter condition for meta_fields filter condition for `meta_fields`.
#### Returns #### Returns

View File

@ -28,11 +28,11 @@ Released on September 10, 2025.
### Improvements ### Improvements
- Agent Performance Optimized: Improved planning and reflection speed for simple tasks; optimized concurrent tool calls for parallelizable scenarios, significantly reducing overall response time. - Agent:
- Agent Prompt Framework exposed: Developers can now customize and override framework-level prompts in the system prompt section, enhancing flexibility and control. - Agent Performance Optimized: Improves planning and reflection speed for simple tasks; optimizes concurrent tool calls for parallelizable scenarios, significantly reducing overall response time.
- Execute SQL Component Enhanced: Replaced the original variable reference component with a text input field, allowing free-form SQL writing with variable support. - Four framework-level prompt blocks are available in the **System prompt** section, enabling customization and overriding of prompts at the framework level, thereby enhancing flexibility and control. See [here](./guides/agent/agent_component_reference/agent.mdx#advanced-usage).
- Chat: Re-enabled Reasoning and Cross-language search. - **Execute SQL** component enhanced: Replaces the original variable reference component with a text input field, allowing users to write free-form SQL queries and reference variables.
- Retrieval API Enhanced: Added metadata filtering support to the [Retrieve chunks](https://ragflow.io/docs/dev/http_api_reference#retrieve-chunks) method. - Chat: Re-enables **Reasoning** and **Cross-language search**.
### Added models ### Added models
@ -45,7 +45,21 @@ Released on September 10, 2025.
- Dataset: Deleted files remained searchable. - Dataset: Deleted files remained searchable.
- Chat: Unable to chat with an Ollama model. - Chat: Unable to chat with an Ollama model.
- Agent: Resolved issues including cite toggle failure, task mode requiring dialogue triggers, repeated answers in multi-turn dialogues, and duplicate summarization of parallel execution results. - Agent:
- A **Cite** toggle failure.
- An Agent in task mode still required a dialogue to trigger.
- Repeated answers in multi-turn dialogues.
- Duplicate summarization of parallel execution results.
### API changes
#### HTTP APIs
- Adds a body parameter `"metadata_condition"` to the [Retrieve chunks](./references/http_api_reference.md#retrieve-chunks) method, enabling metadata-based chunk filtering during retrieval. [#9877](https://github.com/infiniflow/ragflow/pull/9877)
#### Python APIs
- Adds a parameter `metadata_condition` to the [Retrieve chunks](./references/python_api_reference.md#retrieve-chunks) method, enabling metadata-based chunk filtering during retrieval. [#9877](https://github.com/infiniflow/ragflow/pull/9877)
## v0.20.4 ## v0.20.4

View File

@ -45,7 +45,10 @@ class ParserParam(ProcessParamBase):
"ppt": [], "ppt": [],
"image": [], "image": [],
"email": [], "email": [],
"text": [], "text": [
"text",
"json"
],
"audio": [], "audio": [],
"video": [], "video": [],
} }
@ -84,7 +87,12 @@ class ParserParam(ProcessParamBase):
"parse_method": "ocr", "parse_method": "ocr",
}, },
"email": {}, "email": {},
"text": {}, "text": {
"suffix": [
"txt"
],
"output_format": "json",
},
"audio": {}, "audio": {},
"video": {}, "video": {},
} }
@ -119,6 +127,11 @@ class ParserParam(ProcessParamBase):
image_parse_method = image_config.get("parse_method", "") image_parse_method = image_config.get("parse_method", "")
self.check_valid_value(image_parse_method.lower(), "Parse method abnormal.", ["ocr"]) self.check_valid_value(image_parse_method.lower(), "Parse method abnormal.", ["ocr"])
text_config = self.setups.get("text", "")
if text_config:
text_output_format = text_config.get("output_format", "")
self.check_valid_value(text_output_format, "Text output format abnormal.", self.allowed_output_format["text"])
def get_input_form(self) -> dict[str, dict]: def get_input_form(self) -> dict[str, dict]:
return {} return {}
@ -208,15 +221,13 @@ class Parser(ProcessBase):
from rag.app.naive import Markdown as naive_markdown_parser from rag.app.naive import Markdown as naive_markdown_parser
from rag.nlp import concat_img from rag.nlp import concat_img
self.callback(random.randint(1, 5) / 100.0, "Start to work on a Word Processor Document") self.callback(random.randint(1, 5) / 100.0, "Start to work on a markdown.")
blob = from_upstream.blob blob = from_upstream.blob
name = from_upstream.name name = from_upstream.name
conf = self._param.setups["markdown"] conf = self._param.setups["markdown"]
self.set_output("output_format", conf["output_format"]) self.set_output("output_format", conf["output_format"])
print("markdown {conf=}", flush=True)
markdown_parser = naive_markdown_parser() markdown_parser = naive_markdown_parser()
sections, tables = markdown_parser(name, blob, separate_tables=False) sections, tables = markdown_parser(name, blob, separate_tables=False)
@ -240,13 +251,33 @@ class Parser(ProcessBase):
self.set_output("json", json_results) self.set_output("json", json_results)
def _text(self, from_upstream: ParserFromUpstream):
from deepdoc.parser.utils import get_text
self.callback(random.randint(1, 5) / 100.0, "Start to work on a text.")
blob = from_upstream.blob
name = from_upstream.name
conf = self._param.setups["text"]
self.set_output("output_format", conf["output_format"])
# parse binary to text
text_content = get_text(name, binary=blob)
if conf.get("output_format") == "json":
result = [{"text": text_content}]
self.set_output("json", result)
else:
result = text_content
self.set_output("text", result)
async def _invoke(self, **kwargs): async def _invoke(self, **kwargs):
function_map = { function_map = {
"pdf": self._pdf, "pdf": self._pdf,
"markdown": self._markdown, "markdown": self._markdown,
"spreadsheet": self._spreadsheet, "spreadsheet": self._spreadsheet,
"word": self._word "word": self._word,
"text": self._text,
} }
try: try:
from_upstream = ParserFromUpstream.model_validate(kwargs) from_upstream = ParserFromUpstream.model_validate(kwargs)

View File

@ -44,7 +44,10 @@
"markdown" "markdown"
], ],
"output_format": "json" "output_format": "json"
} },
"text": {
"suffix": ["txt"],
"output_format": "json"
} }
} }
} }

View File

@ -1356,6 +1356,14 @@ class Ai302Chat(Base):
super().__init__(key, model_name, base_url, **kwargs) super().__init__(key, model_name, base_url, **kwargs)
class TokenPonyChat(Base):
_FACTORY_NAME = "TokenPony"
def __init__(self, key, model_name, base_url="https://ragflow.vip-api.tokenpony.cn/v1", **kwargs):
if not base_url:
base_url = "https://ragflow.vip-api.tokenpony.cn/v1"
class MeituanChat(Base): class MeituanChat(Base):
_FACTORY_NAME = "Meituan" _FACTORY_NAME = "Meituan"

View File

@ -751,6 +751,10 @@ class SILICONFLOWEmbed(Base):
token_count = 0 token_count = 0
for i in range(0, len(texts), batch_size): for i in range(0, len(texts), batch_size):
texts_batch = texts[i : i + batch_size] texts_batch = texts[i : i + batch_size]
if self.model_name in ["BAAI/bge-large-zh-v1.5", "BAAI/bge-large-en-v1.5"]:
# limit 512, 340 is almost safe
texts_batch = [" " if not text.strip() else truncate(text, 340) for text in texts_batch]
else:
texts_batch = [" " if not text.strip() else text for text in texts_batch] texts_batch = [" " if not text.strip() else text for text in texts_batch]
payload = { payload = {

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 16 KiB

View File

@ -139,7 +139,7 @@ function EmbedDialog({
</form> </form>
</Form> </Form>
<div> <div>
<span>Embed code</span> <span>{t('embedCode', { keyPrefix: 'search' })}</span>
<HightLightMarkdown>{text}</HightLightMarkdown> <HightLightMarkdown>{text}</HightLightMarkdown>
</div> </div>
<div className=" font-medium mt-4 mb-1"> <div className=" font-medium mt-4 mb-1">

View File

@ -54,6 +54,7 @@ export enum LLMFactory {
DeepInfra = 'DeepInfra', DeepInfra = 'DeepInfra',
Grok = 'Grok', Grok = 'Grok',
XAI = 'xAI', XAI = 'xAI',
TokenPony = 'TokenPony',
Meituan = 'Meituan', Meituan = 'Meituan',
} }
@ -114,5 +115,6 @@ export const IconMap = {
[LLMFactory.DeepInfra]: 'deepinfra', [LLMFactory.DeepInfra]: 'deepinfra',
[LLMFactory.Grok]: 'grok', [LLMFactory.Grok]: 'grok',
[LLMFactory.XAI]: 'xai', [LLMFactory.XAI]: 'xai',
[LLMFactory.TokenPony]: 'token-pony',
[LLMFactory.Meituan]: 'longcat', [LLMFactory.Meituan]: 'longcat',
}; };

View File

@ -632,6 +632,8 @@ General实体和关系提取提示来自 GitHub - microsoft/graphrag基于
}, },
cancel: '取消', cancel: '取消',
chatSetting: '聊天设置', chatSetting: '聊天设置',
avatarHidden: '隐藏头像',
locale: '地区',
}, },
setting: { setting: {
profile: '概要', profile: '概要',

View File

@ -62,7 +62,7 @@ function AgentChatBox() {
return ( return (
<> <>
<section className="flex flex-1 flex-col px-5 h-[90vh]"> <section className="flex flex-1 flex-col px-5 min-h-0 pb-4">
<div className="flex-1 overflow-auto" ref={messageContainerRef}> <div className="flex-1 overflow-auto" ref={messageContainerRef}>
<div> <div>
{/* <Spin spinning={sendLoading}> */} {/* <Spin spinning={sendLoading}> */}

View File

@ -9,7 +9,7 @@ export function ChatSheet({ hideModal }: IModalProps<any>) {
return ( return (
<Sheet open modal={false} onOpenChange={hideModal}> <Sheet open modal={false} onOpenChange={hideModal}>
<SheetContent <SheetContent
className={cn('top-20 p-0')} className={cn('top-20 bottom-0 p-0 flex flex-col h-auto')}
onInteractOutside={(e) => e.preventDefault()} onInteractOutside={(e) => e.preventDefault()}
> >
<SheetTitle className="hidden"></SheetTitle> <SheetTitle className="hidden"></SheetTitle>

View File

@ -145,7 +145,7 @@ function AgentForm({ node }: INextOperatorForm) {
<PromptEditor <PromptEditor
{...field} {...field}
placeholder={t('flow.messagePlaceholder')} placeholder={t('flow.messagePlaceholder')}
showToolbar={false} showToolbar={true}
extraOptions={extraOptions} extraOptions={extraOptions}
></PromptEditor> ></PromptEditor>
</FormControl> </FormControl>
@ -166,7 +166,7 @@ function AgentForm({ node }: INextOperatorForm) {
<section> <section>
<PromptEditor <PromptEditor
{...field} {...field}
showToolbar={false} showToolbar={true}
></PromptEditor> ></PromptEditor>
</section> </section>
</FormControl> </FormControl>

View File

@ -9,13 +9,7 @@ import { cn, formatBytes } from '@/lib/utils';
import { Routes } from '@/routes'; import { Routes } from '@/routes';
import { formatPureDate } from '@/utils/date'; import { formatPureDate } from '@/utils/date';
import { isEmpty } from 'lodash'; import { isEmpty } from 'lodash';
import { import { Banknote, Database, FileSearch2, GitGraph } from 'lucide-react';
Banknote,
Database,
DatabaseZap,
FileSearch2,
GitGraph,
} from 'lucide-react';
import { useMemo } from 'react'; import { useMemo } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { useHandleMenuClick } from './hooks'; import { useHandleMenuClick } from './hooks';
@ -34,11 +28,11 @@ export function SideBar({ refreshCount }: PropType) {
const items = useMemo(() => { const items = useMemo(() => {
const list = [ const list = [
{ // {
icon: DatabaseZap, // icon: DatabaseZap,
label: t(`knowledgeDetails.overview`), // label: t(`knowledgeDetails.overview`),
key: Routes.DataSetOverview, // key: Routes.DataSetOverview,
}, // },
{ {
icon: Database, icon: Database,
label: t(`knowledgeDetails.dataset`), label: t(`knowledgeDetails.dataset`),

View File

@ -17,16 +17,9 @@ import {
import { Input } from '@/components/ui/input'; import { Input } from '@/components/ui/input';
import { IModalProps } from '@/interfaces/common'; import { IModalProps } from '@/interfaces/common';
import { zodResolver } from '@hookform/resolvers/zod'; import { zodResolver } from '@hookform/resolvers/zod';
import { useForm, useWatch } from 'react-hook-form'; import { useForm } from 'react-hook-form';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { z } from 'zod'; import { z } from 'zod';
import {
DataExtractKnowledgeItem,
DataFlowItem,
EmbeddingModelItem,
ParseTypeItem,
TeamItem,
} from '../dataset/dataset-setting/configuration/common-item';
const FormId = 'dataset-creating-form'; const FormId = 'dataset-creating-form';
@ -54,10 +47,6 @@ export function InputForm({ onOk }: IModalProps<any>) {
function onSubmit(data: z.infer<typeof FormSchema>) { function onSubmit(data: z.infer<typeof FormSchema>) {
onOk?.(data.name); onOk?.(data.name);
} }
const parseType = useWatch({
control: form.control,
name: 'parseType',
});
return ( return (
<Form {...form}> <Form {...form}>
<form <form
@ -84,15 +73,6 @@ export function InputForm({ onOk }: IModalProps<any>) {
</FormItem> </FormItem>
)} )}
/> />
<EmbeddingModelItem line={2} />
<ParseTypeItem />
{parseType === 2 && (
<>
<DataFlowItem />
<DataExtractKnowledgeItem />
<TeamItem />
</>
)}
</form> </form>
</Form> </Form>
); );

View File

@ -0,0 +1,123 @@
import { ButtonLoading } from '@/components/ui/button';
import {
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
} from '@/components/ui/dialog';
import {
Form,
FormControl,
FormField,
FormItem,
FormLabel,
FormMessage,
} from '@/components/ui/form';
import { Input } from '@/components/ui/input';
import { IModalProps } from '@/interfaces/common';
import { zodResolver } from '@hookform/resolvers/zod';
import { useForm, useWatch } from 'react-hook-form';
import { useTranslation } from 'react-i18next';
import { z } from 'zod';
import {
DataExtractKnowledgeItem,
DataFlowItem,
EmbeddingModelItem,
ParseTypeItem,
TeamItem,
} from '../dataset/dataset-setting/configuration/common-item';
const FormId = 'dataset-creating-form';
export function InputForm({ onOk }: IModalProps<any>) {
const { t } = useTranslation();
const FormSchema = z.object({
name: z
.string()
.min(1, {
message: t('knowledgeList.namePlaceholder'),
})
.trim(),
parseType: z.number().optional(),
});
const form = useForm<z.infer<typeof FormSchema>>({
resolver: zodResolver(FormSchema),
defaultValues: {
name: '',
parseType: 1,
},
});
function onSubmit(data: z.infer<typeof FormSchema>) {
onOk?.(data.name);
}
const parseType = useWatch({
control: form.control,
name: 'parseType',
});
return (
<Form {...form}>
<form
onSubmit={form.handleSubmit(onSubmit)}
className="space-y-6"
id={FormId}
>
<FormField
control={form.control}
name="name"
render={({ field }) => (
<FormItem>
<FormLabel>
<span className="text-destructive mr-1"> *</span>
{t('knowledgeList.name')}
</FormLabel>
<FormControl>
<Input
placeholder={t('knowledgeList.namePlaceholder')}
{...field}
/>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<EmbeddingModelItem line={2} />
<ParseTypeItem />
{parseType === 2 && (
<>
<DataFlowItem />
<DataExtractKnowledgeItem />
<TeamItem />
</>
)}
</form>
</Form>
);
}
export function DatasetCreatingDialog({
hideModal,
onOk,
loading,
}: IModalProps<any>) {
const { t } = useTranslation();
return (
<Dialog open onOpenChange={hideModal}>
<DialogContent className="sm:max-w-[425px]">
<DialogHeader>
<DialogTitle>{t('knowledgeList.createKnowledgeBase')}</DialogTitle>
</DialogHeader>
<InputForm onOk={onOk}></InputForm>
<DialogFooter>
<ButtonLoading type="submit" form={FormId} loading={loading}>
{t('common.save')}
</ButtonLoading>
</DialogFooter>
</DialogContent>
</Dialog>
);
}

View File

@ -37,6 +37,7 @@ const llmFactoryToUrlMap = {
'https://huggingface.co/docs/text-embeddings-inference/quick_tour', 'https://huggingface.co/docs/text-embeddings-inference/quick_tour',
[LLMFactory.GPUStack]: 'https://docs.gpustack.ai/latest/quickstart', [LLMFactory.GPUStack]: 'https://docs.gpustack.ai/latest/quickstart',
[LLMFactory.VLLM]: 'https://docs.vllm.ai/en/latest/', [LLMFactory.VLLM]: 'https://docs.vllm.ai/en/latest/',
[LLMFactory.TokenPony]: 'https://docs.tokenpony.cn/#/',
}; };
type LlmFactory = keyof typeof llmFactoryToUrlMap; type LlmFactory = keyof typeof llmFactoryToUrlMap;