Compare commits

...

11 Commits

Author SHA1 Message Date
38289084a8 Chore/upgrade dashscope to 1.25.11 (#13007)
## Description
  Upgrade dashscope package to support text-embedding-v4 model.

  ## Changes
  - Update dashscope version from 1.20.11 to 1.25.11 in pyproject.toml

  ## Reason
The text-embedding-v4 model requires dashscope >= 1.25.0 to function
properly. This upgrade ensures compatibility with the latest embedding
models.

Co-authored-by: Clint-chan <Clint-chan@users.noreply.github.com>
2026-02-06 19:06:41 +08:00
279b01a028 Feat: MCP host mode supports STREAMABLE-HTTP endpoint (#13037)
### What problem does this PR solve?

MCP host mode supports STREAMABLE-HTTP endpoint

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
2026-02-06 16:22:43 +08:00
c130ac0f88 Fix: Lazy loading adds a loading state to the page (#13038)
### What problem does this PR solve?

Fix: Lazy loading adds a loading state to the page

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2026-02-06 16:20:52 +08:00
301ed76aa4 Fix: task cancel (#13034)
### What problem does this PR solve?

Fix: task cancel #11745 
### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2026-02-06 14:48:24 +08:00
13a6545e48 fix(rdbms): use brackets around field names to preserve distinction after chunking (#13010)
Fix RDBMS field separation after chunking by wrapping field names in
brackets (【field】: value). This ensures fields remain distinguishable
even when TxtParser strips newline delimiters during chunk merging.

Closes  #13001

Co-authored-by: mkdev11 <YOUR_GITHUB_ID+MkDev11@users.noreply.github.com>
2026-02-06 14:44:58 +08:00
yH
5333e764fc fix: optimize Excel row counting for files with abnormal max_row (#13018)
### What problem does this PR solve?

Some Excel files have abnormal `max_row` metadata (e.g.,
`max_row=1,048,534` with only 300 actual data rows). This causes:
- `row_number()` returns incorrect count, creating 350+ tasks instead of
1
- `list(ws.rows)` iterates through millions of empty rows, causing
system hang

This PR uses binary search to find the actual last row with data.

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
- [x] Performance Improvement

Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-06 14:43:52 +08:00
00c392e633 Fix: dataset page enter key to save (#13035)
### What problem does this PR solve?

Fix dataset page enter key to save 
Fix the warnings and optimize the code.

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2026-02-06 14:42:16 +08:00
4b0d65f089 Fix: correct llm_id for graphrag (#13032)
### What problem does this PR solve?

Fix: correct llm_id for graphrag #13030

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2026-02-06 14:05:32 +08:00
6a17e8cc85 Update basics (#13033)
### What problem does this PR solve?

### Type of change

- [x] Documentation Update
2026-02-06 13:15:33 +08:00
a68c56def7 fix: ensure all metadata filters are processed in AND logic (#13019)
### What problem does this PR solve?

Bug: When a filter key doesn't exist in metas or has no matching values,
the filter was skipped entirely, causing AND logic to fail.

Example:
- Filter 1: meeting_series = '宏观早8点' (matches doc1, doc2, doc3)
- Filter 2: date = '2026-03-05' (no matches)
- Expected: [] (AND should return empty)
- Actual: [doc1, doc2, doc3] (Filter 2 was skipped)

Root cause:
Old logic iterated metas.items() first, then filters. If a filter's key
wasn't in metas, it was never processed.

Fix:
Iterate filters first, then look up in metas. If key not found, treat as
no match (empty result), which correctly applies AND logic.

Changes:
- Changed loop order from 'for k in metas: for f in filters' to 'for f
in filters: if f.key in metas'
- Explicitly handle missing keys as empty results


### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)

Co-authored-by: Clint-chan <Clint-chan@users.noreply.github.com>
2026-02-06 12:57:27 +08:00
0586d5148d fixed vulnerabilities CVE-2025-53859 & CVE-2025-23419 (#13016)
### What problem does this PR solve?

Fixed vulnerabilities CVE-2025-53859 & CVE-2025-23419 by updating nginx
to 1.29.5-1~noble

### Type of change

- [X] Bug Fix (non-breaking change which fixes an issue)
<img width="709" height="54" alt="image"
src="https://github.com/user-attachments/assets/d8c3518f-bca4-4314-a85c-1aed1678f72e"
/>
2026-02-06 12:55:06 +08:00
19 changed files with 388 additions and 205 deletions

View File

@ -48,13 +48,22 @@ RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
apt install -y libatk-bridge2.0-0 && \
apt install -y libpython3-dev libgtk-4-1 libnss3 xdg-utils libgbm-dev && \
apt install -y libjemalloc-dev && \
apt install -y nginx unzip curl wget git vim less && \
apt install -y gnupg unzip curl wget git vim less && \
apt install -y ghostscript && \
apt install -y pandoc && \
apt install -y texlive && \
apt install -y fonts-freefont-ttf fonts-noto-cjk && \
apt install -y postgresql-client
ARG NGINX_VERSION=1.29.5-1~noble
RUN --mount=type=cache,id=ragflow_apt,target=/var/cache/apt,sharing=locked \
mkdir -p /etc/apt/keyrings && \
curl -fsSL https://nginx.org/keys/nginx_signing.key | gpg --dearmor -o /etc/apt/keyrings/nginx-archive-keyring.gpg && \
echo "deb [signed-by=/etc/apt/keyrings/nginx-archive-keyring.gpg] https://nginx.org/packages/mainline/ubuntu/ noble nginx" > /etc/apt/sources.list.d/nginx.list && \
apt update && \
apt install -y nginx=${NGINX_VERSION} && \
apt-mark hold nginx
# Install uv
RUN --mount=type=bind,from=infiniflow/ragflow_deps:latest,source=/,target=/deps \
if [ "$NEED_MIRROR" == "1" ]; then \

View File

@ -617,7 +617,9 @@ async def run():
return get_data_error_result(message="Document not found!")
if str(req["run"]) == TaskStatus.CANCEL.value:
if str(doc.run) == TaskStatus.RUNNING.value:
tasks = list(TaskService.query(doc_id=id))
has_unfinished_task = any((task.progress or 0) < 1 for task in tasks)
if str(doc.run) in [TaskStatus.RUNNING.value, TaskStatus.CANCEL.value] or has_unfinished_task:
cancel_all_task_of(id)
else:
return get_data_error_result(message="Cannot cancel a task that is not in RUNNING status")

View File

@ -204,7 +204,9 @@ class RDBMSConnector(LoadConnector, PollConnector):
value = row_dict[col]
if isinstance(value, (dict, list)):
value = json.dumps(value, ensure_ascii=False)
content_parts.append(f"{col}: {value}")
# Use brackets around field name to ensure it's distinguishable
# after chunking (TxtParser strips \n delimiters during merge)
content_parts.append(f"{col}】: {value}")
content = "\n".join(content_parts)

View File

@ -138,20 +138,24 @@ def meta_filter(metas: dict, filters: list[dict], logic: str = "and"):
ids.extend(docids)
return ids
for k, v2docs in metas.items():
for f in filters:
if k != f["key"]:
continue
for f in filters:
k = f["key"]
if k not in metas:
# Key not found in metas: treat as no match
ids = []
else:
v2docs = metas[k]
ids = filter_out(v2docs, f["op"], f["value"])
if not doc_ids:
doc_ids = set(ids)
if not doc_ids:
doc_ids = set(ids)
else:
if logic == "and":
doc_ids = doc_ids & set(ids)
if not doc_ids:
return []
else:
if logic == "and":
doc_ids = doc_ids & set(ids)
if not doc_ids:
return []
else:
doc_ids = doc_ids | set(ids)
doc_ids = doc_ids | set(ids)
return list(doc_ids)

View File

@ -156,6 +156,55 @@ class RAGFlowExcelParser:
continue
return raw_items
@staticmethod
def _get_actual_row_count(ws):
max_row = ws.max_row
if not max_row:
return 0
if max_row <= 10000:
return max_row
max_col = min(ws.max_column or 1, 50)
def row_has_data(row_idx):
for col_idx in range(1, max_col + 1):
cell = ws.cell(row=row_idx, column=col_idx)
if cell.value is not None and str(cell.value).strip():
return True
return False
if not any(row_has_data(i) for i in range(1, min(101, max_row + 1))):
return 0
left, right = 1, max_row
last_data_row = 1
while left <= right:
mid = (left + right) // 2
found = False
for r in range(mid, min(mid + 10, max_row + 1)):
if row_has_data(r):
found = True
last_data_row = max(last_data_row, r)
break
if found:
left = mid + 1
else:
right = mid - 1
for r in range(last_data_row, min(last_data_row + 500, max_row + 1)):
if row_has_data(r):
last_data_row = r
return last_data_row
@staticmethod
def _get_rows_limited(ws):
actual_rows = RAGFlowExcelParser._get_actual_row_count(ws)
if actual_rows == 0:
return []
return list(ws.iter_rows(min_row=1, max_row=actual_rows))
def html(self, fnm, chunk_rows=256):
from html import escape
@ -171,7 +220,7 @@ class RAGFlowExcelParser:
for sheetname in wb.sheetnames:
ws = wb[sheetname]
try:
rows = list(ws.rows)
rows = RAGFlowExcelParser._get_rows_limited(ws)
except Exception as e:
logging.warning(f"Skip sheet '{sheetname}' due to rows access error: {e}")
continue
@ -223,7 +272,7 @@ class RAGFlowExcelParser:
for sheetname in wb.sheetnames:
ws = wb[sheetname]
try:
rows = list(ws.rows)
rows = RAGFlowExcelParser._get_rows_limited(ws)
except Exception as e:
logging.warning(f"Skip sheet '{sheetname}' due to rows access error: {e}")
continue
@ -238,6 +287,8 @@ class RAGFlowExcelParser:
t = str(ti[i].value) if i < len(ti) else ""
t += ("" if t else "") + str(c.value)
fields.append(t)
if not fields:
continue
line = "; ".join(fields)
if sheetname.lower().find("sheet") < 0:
line += " ——" + sheetname
@ -249,14 +300,14 @@ class RAGFlowExcelParser:
if fnm.split(".")[-1].lower().find("xls") >= 0:
wb = RAGFlowExcelParser._load_excel_to_workbook(BytesIO(binary))
total = 0
for sheetname in wb.sheetnames:
try:
ws = wb[sheetname]
total += len(list(ws.rows))
except Exception as e:
logging.warning(f"Skip sheet '{sheetname}' due to rows access error: {e}")
continue
try:
ws = wb[sheetname]
total += RAGFlowExcelParser._get_actual_row_count(ws)
except Exception as e:
logging.warning(f"Skip sheet '{sheetname}' due to rows access error: {e}")
continue
return total
if fnm.split(".")[-1].lower() in ["csv", "txt"]:

View File

@ -31,7 +31,7 @@ At its core, an Agent Context Engine is built on a triumvirate of next-generatio
2. The Memory Layer: An Agents intelligence is defined by its ability to learn from interaction. The Memory Layer is a specialized retrieval system for dynamic, episodic data: conversation history, user preferences, and the agents own internal state (e.g., "waiting for human input"). It manages the lifecycle of this data—storing raw dialogue, triggering summarization into semantic memory, and retrieving relevant past interactions to provide continuity and personalization. Technologically, it is a close sibling to RAG, but focused on a temporal stream of data.
3. The Tool Orchestrator: As MCP (Model Context Protocol) enables the connection of hundreds of internal services as tools, a new problem arises: tool selection. The Context Engine solves this with Tool Retrieval. Instead of dumping all tool descriptions into the prompt, it maintains an index of tools and—critically—an index of Playbooks or Guidelines (best practices on when and how to use tools). For a given task, it retrieves only the most relevant tools and instructions, transforming the LLMs job from "searching a haystack" to "following a recipe."
3. The Tool Orchestrator: As MCP (Model Context Protocol) enables the connection of hundreds of internal services as tools, a new problem arises: tool selection. The Context Engine solves this with Tool Retrieval. Instead of dumping all tool descriptions into the prompt, it maintains an index of tools and—critically—an index of Skills (best practices on when and how to use tools). For a given task, it retrieves only the most relevant tools and instructions, transforming the LLMs job from "searching a haystack" to "following a recipe."
## Why we need a dedicated engine? The case for a unified substrate

View File

@ -3,7 +3,7 @@ sidebar_position: 1
slug: /what-is-rag
---
# What is Retreival-Augmented-Generation (RAG)?
# What is Retrieval-Augmented-Generation (RAG)?
Since large language models (LLMs) became the focus of technology, their ability to handle general knowledge has been astonishing. However, when questions shift to internal corporate documents, proprietary knowledge bases, or real-time data, the limitations of LLMs become glaringly apparent: they cannot access private information outside their training data. Retrieval-Augmented Generation (RAG) was born precisely to address this core need. Before an LLM generates an answer, it first retrieves the most relevant context from an external knowledge base and inputs it as "reference material" to the LLM, thereby guiding it to produce accurate answers. In short, RAG elevates LLMs from "relying on memory" to "having evidence to rely on," significantly improving their accuracy and trustworthiness in specialized fields and real-time information queries.

View File

@ -19,6 +19,10 @@ from mcp.client.streamable_http import streamablehttp_client
async def main():
try:
# To access RAGFlow server in `host` mode, you need to attach `api_key` for each request to indicate identification.
# async with streamablehttp_client("http://localhost:9382/mcp/", headers={"api_key": "ragflow-fixS-TicrohljzFkeLLWIaVhW7XlXPXIUW5solFor6o"}) as (read_stream, write_stream, _):
# Or follow the requirements of OAuth 2.1 Section 5 with Authorization header
# async with streamablehttp_client("http://localhost:9382/mcp/", headers={"Authorization": "Bearer ragflow-fixS-TicrohljzFkeLLWIaVhW7XlXPXIUW5solFor6o"}) as (read_stream, write_stream, _):
async with streamablehttp_client("http://localhost:9382/mcp/") as (read_stream, write_stream, _):
async with ClientSession(read_stream, write_stream) as session:
await session.initialize()

View File

@ -22,18 +22,18 @@ from collections import OrderedDict
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from functools import wraps
from typing import Any
import click
import httpx
import mcp.types as types
from mcp.server.lowlevel import Server
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.responses import JSONResponse, Response
from starlette.routing import Mount, Route
from strenum import StrEnum
import mcp.types as types
from mcp.server.lowlevel import Server
class LaunchMode(StrEnum):
SELF_HOST = "self-host"
@ -68,10 +68,6 @@ class RAGFlowConnector:
self.api_url = f"{self.base_url}/api/{self.version}"
self._async_client = None
def bind_api_key(self, api_key: str):
self.api_key = api_key
self.authorization_header = {"Authorization": f"Bearer {self.api_key}"}
async def _get_client(self):
if self._async_client is None:
self._async_client = httpx.AsyncClient(timeout=httpx.Timeout(60.0))
@ -82,16 +78,18 @@ class RAGFlowConnector:
await self._async_client.aclose()
self._async_client = None
async def _post(self, path, json=None, stream=False, files=None):
if not self.api_key:
async def _post(self, path, json=None, stream=False, files=None, api_key: str = ""):
if not api_key:
return None
client = await self._get_client()
res = await client.post(url=self.api_url + path, json=json, headers=self.authorization_header)
res = await client.post(url=self.api_url + path, json=json, headers={"Authorization": f"Bearer {api_key}"})
return res
async def _get(self, path, params=None):
async def _get(self, path, params=None, api_key: str = ""):
if not api_key:
return None
client = await self._get_client()
res = await client.get(url=self.api_url + path, params=params, headers=self.authorization_header)
res = await client.get(url=self.api_url + path, params=params, headers={"Authorization": f"Bearer {api_key}"})
return res
def _is_cache_valid(self, ts):
@ -129,8 +127,18 @@ class RAGFlowConnector:
self._document_metadata_cache[dataset_id] = (doc_id_meta_list, self._get_expiry_timestamp())
self._document_metadata_cache.move_to_end(dataset_id)
async def list_datasets(self, page: int = 1, page_size: int = 1000, orderby: str = "create_time", desc: bool = True, id: str | None = None, name: str | None = None):
res = await self._get("/datasets", {"page": page, "page_size": page_size, "orderby": orderby, "desc": desc, "id": id, "name": name})
async def list_datasets(
self,
*,
api_key: str,
page: int = 1,
page_size: int = 1000,
orderby: str = "create_time",
desc: bool = True,
id: str | None = None,
name: str | None = None,
):
res = await self._get("/datasets", {"page": page, "page_size": page_size, "orderby": orderby, "desc": desc, "id": id, "name": name}, api_key=api_key)
if not res or res.status_code != 200:
raise Exception([types.TextContent(type="text", text="Cannot process this operation.")])
@ -145,6 +153,8 @@ class RAGFlowConnector:
async def retrieval(
self,
*,
api_key: str,
dataset_ids,
document_ids=None,
question="",
@ -162,7 +172,7 @@ class RAGFlowConnector:
# If no dataset_ids provided or empty list, get all available dataset IDs
if not dataset_ids:
dataset_list_str = await self.list_datasets()
dataset_list_str = await self.list_datasets(api_key=api_key)
dataset_ids = []
# Parse the dataset list to extract IDs
@ -189,7 +199,7 @@ class RAGFlowConnector:
"document_ids": document_ids,
}
# Send a POST request to the backend service (using requests library as an example, actual implementation may vary)
res = await self._post("/retrieval", json=data_json)
res = await self._post("/retrieval", json=data_json, api_key=api_key)
if not res or res.status_code != 200:
raise Exception([types.TextContent(type="text", text="Cannot process this operation.")])
@ -199,7 +209,7 @@ class RAGFlowConnector:
chunks = []
# Cache document metadata and dataset information
document_cache, dataset_cache = await self._get_document_metadata_cache(dataset_ids, force_refresh=force_refresh)
document_cache, dataset_cache = await self._get_document_metadata_cache(dataset_ids, api_key=api_key, force_refresh=force_refresh)
# Process chunks with enhanced field mapping including per-chunk metadata
for chunk_data in data.get("chunks", []):
@ -228,7 +238,7 @@ class RAGFlowConnector:
raise Exception([types.TextContent(type="text", text=res.get("message"))])
async def _get_document_metadata_cache(self, dataset_ids, force_refresh=False):
async def _get_document_metadata_cache(self, dataset_ids, *, api_key: str, force_refresh=False):
"""Cache document metadata for all documents in the specified datasets"""
document_cache = {}
dataset_cache = {}
@ -238,7 +248,7 @@ class RAGFlowConnector:
dataset_meta = None if force_refresh else self._get_cached_dataset_metadata(dataset_id)
if not dataset_meta:
# First get dataset info for name
dataset_res = await self._get("/datasets", {"id": dataset_id, "page_size": 1})
dataset_res = await self._get("/datasets", {"id": dataset_id, "page_size": 1}, api_key=api_key)
if dataset_res and dataset_res.status_code == 200:
dataset_data = dataset_res.json()
if dataset_data.get("code") == 0 and dataset_data.get("data"):
@ -255,7 +265,9 @@ class RAGFlowConnector:
doc_id_meta_list = []
docs = {}
while page:
docs_res = await self._get(f"/datasets/{dataset_id}/documents?page={page}")
docs_res = await self._get(f"/datasets/{dataset_id}/documents?page={page}", api_key=api_key)
if not docs_res:
break
docs_data = docs_res.json()
if docs_data.get("code") == 0 and docs_data.get("data", {}).get("docs"):
for doc in docs_data["data"]["docs"]:
@ -335,9 +347,59 @@ async def sse_lifespan(server: Server) -> AsyncIterator[dict]:
app = Server("ragflow-mcp-server", lifespan=sse_lifespan)
AUTH_TOKEN_STATE_KEY = "ragflow_auth_token"
def with_api_key(required=True):
def _to_text(value: Any) -> str:
if isinstance(value, bytes):
return value.decode(errors="ignore")
return str(value)
def _extract_token_from_headers(headers: Any) -> str | None:
if not headers or not hasattr(headers, "get"):
return None
auth_keys = ("authorization", "Authorization", b"authorization", b"Authorization")
for key in auth_keys:
auth = headers.get(key)
if not auth:
continue
auth_text = _to_text(auth).strip()
if auth_text.lower().startswith("bearer "):
token = auth_text[7:].strip()
if token:
return token
api_key_keys = ("api_key", "x-api-key", "Api-Key", "X-API-Key", b"api_key", b"x-api-key", b"Api-Key", b"X-API-Key")
for key in api_key_keys:
token = headers.get(key)
if token:
token_text = _to_text(token).strip()
if token_text:
return token_text
return None
def _extract_token_from_request(request: Any) -> str | None:
if request is None:
return None
state = getattr(request, "state", None)
if state is not None:
token = getattr(state, AUTH_TOKEN_STATE_KEY, None)
if token:
return token
token = _extract_token_from_headers(getattr(request, "headers", None))
if token and state is not None:
setattr(state, AUTH_TOKEN_STATE_KEY, token)
return token
def with_api_key(required: bool = True):
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
@ -347,26 +409,14 @@ def with_api_key(required=True):
raise ValueError("Get RAGFlow Context failed")
connector = ragflow_ctx.conn
api_key = HOST_API_KEY
if MODE == LaunchMode.HOST:
headers = ctx.session._init_options.capabilities.experimental.get("headers", {})
token = None
# lower case here, because of Starlette conversion
auth = headers.get("authorization", "")
if auth.startswith("Bearer "):
token = auth.removeprefix("Bearer ").strip()
elif "api_key" in headers:
token = headers["api_key"]
if required and not token:
api_key = _extract_token_from_request(getattr(ctx, "request", None)) or ""
if required and not api_key:
raise ValueError("RAGFlow API key or Bearer token is required.")
connector.bind_api_key(token)
else:
connector.bind_api_key(HOST_API_KEY)
return await func(*args, connector=connector, **kwargs)
return await func(*args, connector=connector, api_key=api_key, **kwargs)
return wrapper
@ -375,8 +425,8 @@ def with_api_key(required=True):
@app.list_tools()
@with_api_key(required=True)
async def list_tools(*, connector) -> list[types.Tool]:
dataset_description = await connector.list_datasets()
async def list_tools(*, connector: RAGFlowConnector, api_key: str) -> list[types.Tool]:
dataset_description = await connector.list_datasets(api_key=api_key)
return [
types.Tool(
@ -446,7 +496,13 @@ async def list_tools(*, connector) -> list[types.Tool]:
@app.call_tool()
@with_api_key(required=True)
async def call_tool(name: str, arguments: dict, *, connector) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
async def call_tool(
name: str,
arguments: dict,
*,
connector: RAGFlowConnector,
api_key: str,
) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
if name == "ragflow_retrieval":
document_ids = arguments.get("document_ids", [])
dataset_ids = arguments.get("dataset_ids", [])
@ -462,7 +518,7 @@ async def call_tool(name: str, arguments: dict, *, connector) -> list[types.Text
# If no dataset_ids provided or empty list, get all available dataset IDs
if not dataset_ids:
dataset_list_str = await connector.list_datasets()
dataset_list_str = await connector.list_datasets(api_key=api_key)
dataset_ids = []
# Parse the dataset list to extract IDs
@ -477,6 +533,7 @@ async def call_tool(name: str, arguments: dict, *, connector) -> list[types.Text
continue
return await connector.retrieval(
api_key=api_key,
dataset_ids=dataset_ids,
document_ids=document_ids,
question=question,
@ -510,17 +567,13 @@ def create_starlette_app():
path = scope["path"]
if path.startswith("/messages/") or path.startswith("/sse") or path.startswith("/mcp"):
headers = dict(scope["headers"])
token = None
auth_header = headers.get(b"authorization")
if auth_header and auth_header.startswith(b"Bearer "):
token = auth_header.removeprefix(b"Bearer ").strip()
elif b"api_key" in headers:
token = headers[b"api_key"]
token = _extract_token_from_headers(headers)
if not token:
response = JSONResponse({"error": "Missing or invalid authorization header"}, status_code=401)
await response(scope, receive, send)
return
scope.setdefault("state", {})[AUTH_TOKEN_STATE_KEY] = token
await self.app(scope, receive, send)
@ -547,9 +600,8 @@ def create_starlette_app():
# Add streamable HTTP route if enabled
streamablehttp_lifespan = None
if TRANSPORT_STREAMABLE_HTTP_ENABLED:
from starlette.types import Receive, Scope, Send
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
from starlette.types import Receive, Scope, Send
session_manager = StreamableHTTPSessionManager(
app=app,
@ -558,8 +610,11 @@ def create_starlette_app():
stateless=True,
)
async def handle_streamable_http(scope: Scope, receive: Receive, send: Send) -> None:
await session_manager.handle_request(scope, receive, send)
class StreamableHTTPEntry:
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
await session_manager.handle_request(scope, receive, send)
streamable_http_entry = StreamableHTTPEntry()
@asynccontextmanager
async def streamablehttp_lifespan(app: Starlette) -> AsyncIterator[None]:
@ -570,7 +625,12 @@ def create_starlette_app():
finally:
logging.info("StreamableHTTP application shutting down...")
routes.append(Mount("/mcp", app=handle_streamable_http))
routes.extend(
[
Route("/mcp", endpoint=streamable_http_entry, methods=["GET", "POST", "DELETE"]),
Mount("/mcp", app=streamable_http_entry),
]
)
return Starlette(
debug=True,
@ -631,9 +691,6 @@ def main(base_url, host, port, mode, api_key, transport_sse_enabled, transport_s
if MODE == LaunchMode.SELF_HOST and not HOST_API_KEY:
raise click.UsageError("--api-key is required when --mode is 'self-host'")
if TRANSPORT_STREAMABLE_HTTP_ENABLED and MODE == LaunchMode.HOST:
raise click.UsageError("The --host mode is not supported with streamable-http transport yet.")
if not TRANSPORT_STREAMABLE_HTTP_ENABLED and JSON_RESPONSE:
JSON_RESPONSE = False
@ -690,7 +747,7 @@ if __name__ == "__main__":
--base-url=http://127.0.0.1:9380 \
--mode=self-host --api-key=ragflow-xxxxx
2. Host mode (multi-tenant, self-host only, clients must provide Authorization headers):
2. Host mode (multi-tenant, clients must provide Authorization headers):
uv run mcp/server/server.py --host=127.0.0.1 --port=9382 \
--base-url=http://127.0.0.1:9380 \
--mode=host

View File

@ -21,7 +21,7 @@ dependencies = [
"cn2an==0.5.22",
"cohere==5.6.2",
"Crawl4AI>=0.4.0,<1.0.0",
"dashscope==1.20.11",
"dashscope==1.25.11",
"deepl==1.18.0",
"demjson3==3.0.6",
"discord-py==2.3.2",

View File

@ -44,7 +44,7 @@ class Excel(ExcelParser):
wb = Excel._load_excel_to_workbook(BytesIO(binary))
total = 0
for sheet_name in wb.sheetnames:
total += len(list(wb[sheet_name].rows))
total += Excel._get_actual_row_count(wb[sheet_name])
res, fails, done = [], [], 0
rn = 0
flow_images = []
@ -66,7 +66,7 @@ class Excel(ExcelParser):
flow_images.append(img)
try:
rows = list(ws.rows)
rows = Excel._get_rows_limited(ws)
except Exception as e:
logging.warning(f"Skip sheet '{sheet_name}' due to rows access error: {e}")
continue

View File

@ -165,6 +165,8 @@ def set_progress(task_id, from_page=0, to_page=-1, prog=None, msg="Processing...
if cancel:
raise TaskCanceledException(msg)
logging.info(f"set_progress({task_id}), progress: {prog}, progress_msg: {msg}")
except TaskCanceledException:
raise
except DoesNotExist:
logging.warning(f"set_progress({task_id}) got exception DoesNotExist")
except Exception as e:
@ -693,6 +695,8 @@ async def run_dataflow(task: dict):
for i, ck in enumerate(chunks):
v = vects[i].tolist()
ck["q_%d_vec" % len(v)] = v
except TaskCanceledException:
raise
except Exception as e:
set_progress(task_id, prog=-1, msg=f"[ERROR]: {e}")
PipelineOperationLogService.create(document_id=doc_id, pipeline_id=dataflow_id,
@ -960,8 +964,9 @@ async def do_handle_task(task):
task_tenant_id = task["tenant_id"]
task_embedding_id = task["embd_id"]
task_language = task["language"]
task_llm_id = task["parser_config"].get("llm_id") or task["llm_id"]
task["llm_id"] = task_llm_id
doc_task_llm_id = task["parser_config"].get("llm_id") or task["llm_id"]
kb_task_llm_id = task['kb_parser_config'].get("llm_id") or task["llm_id"]
task['llm_id'] = kb_task_llm_id
task_dataset_id = task["kb_id"]
task_doc_id = task["doc_id"]
task_document_name = task["name"]
@ -1032,7 +1037,7 @@ async def do_handle_task(task):
return
# bind LLM for raptor
chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=task_llm_id, lang=task_language)
chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=kb_task_llm_id, lang=task_language)
# run RAPTOR
async with kg_limiter:
chunks, token_count = await run_raptor_for_kb(
@ -1076,7 +1081,7 @@ async def do_handle_task(task):
graphrag_conf = kb_parser_config.get("graphrag", {})
start_ts = timer()
chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=task_llm_id, lang=task_language)
chat_model = LLMBundle(task_tenant_id, LLMType.CHAT, llm_name=kb_task_llm_id, lang=task_language)
with_resolution = graphrag_conf.get("resolution", False)
with_community = graphrag_conf.get("community", False)
async with kg_limiter:
@ -1101,6 +1106,7 @@ async def do_handle_task(task):
return
else:
# Standard chunking methods
task['llm_id'] = doc_task_llm_id
start_ts = timer()
chunks = await build_chunks(task, progress_callback)
logging.info("Build document {}: {:.2f}s".format(task_document_name, timer() - start_ts))
@ -1111,6 +1117,8 @@ async def do_handle_task(task):
start_ts = timer()
try:
token_count, vector_size = await embedding(chunks, embedding_model, task_parser_config, progress_callback)
except TaskCanceledException:
raise
except Exception as e:
error_message = "Generate embedding error:{}".format(str(e))
progress_callback(-1, error_message)
@ -1128,13 +1136,17 @@ async def do_handle_task(task):
async def _maybe_insert_chunks(_chunks):
if has_canceled(task_id):
return True
progress_callback(-1, msg="Task has been canceled.")
return False
insert_result = await insert_chunks(task_id, task_tenant_id, task_dataset_id, _chunks, progress_callback)
return bool(insert_result)
try:
if not await _maybe_insert_chunks(chunks):
return
if has_canceled(task_id):
progress_callback(-1, msg="Task has been canceled.")
return
logging.info(
"Indexing doc({}), page({}-{}), chunks({}), elapsed: {:.2f}".format(
@ -1203,6 +1215,12 @@ async def handle_task():
DONE_TASKS += 1
CURRENT_TASKS.pop(task_id, None)
logging.info(f"handle_task done for task {json.dumps(task)}")
except TaskCanceledException as e:
DONE_TASKS += 1
CURRENT_TASKS.pop(task_id, None)
logging.info(
f"handle_task canceled for task {task_id}: {getattr(e, 'msg', str(e))}"
)
except Exception as e:
FAILED_TASKS += 1
CURRENT_TASKS.pop(task_id, None)

8
uv.lock generated
View File

@ -1557,15 +1557,17 @@ wheels = [
[[package]]
name = "dashscope"
version = "1.20.11"
version = "1.25.11"
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
dependencies = [
{ name = "aiohttp" },
{ name = "certifi" },
{ name = "cryptography" },
{ name = "requests" },
{ name = "websocket-client" },
]
wheels = [
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/25/21/0ddfa1aae7f45b3039d10d61ede77dedfc70d24ff946e7d0ecb92e9a2c85/dashscope-1.20.11-py3-none-any.whl", hash = "sha256:7367802c5ae136c6c1f4f8a16f9aba628e97adefae8afdebce6bbf518d0065d1", size = 1264221, upload-time = "2024-10-14T05:30:25.083Z" },
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/30/15/35551e6c6d3ea19df754ed32aa5f281b2052ef9e1ff1538f2708f74f3312/dashscope-1.25.11-py3-none-any.whl", hash = "sha256:93e86437f5f30e759e98292f0490e44eff00c337968363f27d29dd42ec7cc07c", size = 1342054, upload-time = "2026-02-03T02:49:48.711Z" },
]
[[package]]
@ -6271,7 +6273,7 @@ requires-dist = [
{ name = "cn2an", specifier = "==0.5.22" },
{ name = "cohere", specifier = "==5.6.2" },
{ name = "crawl4ai", specifier = ">=0.4.0,<1.0.0" },
{ name = "dashscope", specifier = "==1.20.11" },
{ name = "dashscope", specifier = "==1.25.11" },
{ name = "deepl", specifier = "==1.18.0" },
{ name = "demjson3", specifier = "==3.0.6" },
{ name = "discord-py", specifier = "==2.3.2" },

View File

@ -73,6 +73,7 @@ if (process.env.NODE_ENV === 'development') {
trackAllPureComponents: true,
trackExtraHooks: [],
logOnDifferentValues: true,
exclude: [/^RouterProvider$/],
});
},
);
@ -150,6 +151,13 @@ const RootProvider = ({ children }: React.PropsWithChildren) => {
);
};
const RouterProviderWrapper: React.FC<{ router: typeof routers }> = ({
router,
}) => {
return <RouterProvider router={router}></RouterProvider>;
};
RouterProviderWrapper.whyDidYouRender = false;
export default function AppContainer() {
// const [router, setRouter] = useState<any>(null);
@ -163,8 +171,7 @@ export default function AppContainer() {
return (
<RootProvider>
<RouterProvider router={routers}></RouterProvider>
{/* <RouterProvider router={router}></RouterProvider> */}
<RouterProviderWrapper router={routers} />
</RootProvider>
);
}

View File

@ -101,7 +101,6 @@ export const RAGFlowAvatar = memo(
}}
className={cn(
'bg-gradient-to-b',
`from-[${from}] to-[${to}]`,
'flex items-center justify-center',
'text-white ',
{ 'rounded-md': !isPerson },

View File

@ -4,6 +4,7 @@ import * as DialogPrimitive from '@radix-ui/react-dialog';
import { Loader, X } from 'lucide-react';
import { FC, ReactNode, useCallback, useEffect, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { DialogDescription } from '../dialog';
import { createPortalModal } from './modal-manage';
export interface ModalProps {
@ -184,6 +185,7 @@ const Modal: ModalType = ({
style={style}
onClick={(e) => e.stopPropagation()}
>
<DialogDescription></DialogDescription>
{/* title */}
{title && (
<div

View File

@ -3,6 +3,7 @@ import { ButtonLoading } from '@/components/ui/button';
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
@ -155,10 +156,20 @@ export function DatasetCreatingDialog({
return (
<Dialog open onOpenChange={hideModal}>
<DialogContent className="sm:max-w-[425px] focus-visible:!outline-none flex flex-col">
<DialogContent
className="sm:max-w-[425px] focus-visible:!outline-none flex flex-col"
onKeyDown={(e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
const form = document.getElementById(FormId) as HTMLFormElement;
form?.requestSubmit();
}
}}
>
<DialogHeader>
<DialogTitle>{t('knowledgeList.createKnowledgeBase')}</DialogTitle>
</DialogHeader>
<DialogDescription></DialogDescription>
<InputForm onOk={onOk}></InputForm>
<DialogFooter>
<ButtonLoading type="submit" form={FormId} loading={loading}>

View File

@ -1,4 +1,4 @@
import { lazy } from 'react';
import { lazy, Suspense } from 'react';
import { createBrowserRouter, Navigate, type RouteObject } from 'react-router';
import FallbackComponent from './components/fallback-component';
import { IS_ENTERPRISE } from './pages/admin/utils';
@ -66,252 +66,253 @@ export enum Routes {
AdminMonitoring = `${Admin}/monitoring`,
}
const routeConfig = [
const defaultRouteFallback = (
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/30 backdrop-blur-[1px]">
<div className="h-8 w-8 animate-spin rounded-full border-2 border-white/70 border-t-transparent" />
</div>
);
type LazyRouteConfig = Omit<RouteObject, 'Component' | 'children'> & {
Component?: () => Promise<{ default: React.ComponentType<any> }>;
children?: LazyRouteConfig[];
};
const withLazyRoute = (
importer: () => Promise<{ default: React.ComponentType<any> }>,
fallback: React.ReactNode = defaultRouteFallback,
) => {
const LazyComponent = lazy(importer);
const Wrapped: React.FC<any> = (props) => (
<Suspense fallback={fallback}>
<LazyComponent {...props} />
</Suspense>
);
Wrapped.displayName = `LazyRoute(${
(LazyComponent as unknown as React.ComponentType<any>).displayName ||
LazyComponent.name ||
'Component'
})`;
return Wrapped;
};
const routeConfigOptions = [
{
path: '/login',
Component: lazy(() => import('@/pages/login-next')),
Component: () => import('@/pages/login-next'),
layout: false,
errorElement: <FallbackComponent />,
},
{
path: '/login-next',
Component: lazy(() => import('@/pages/login-next')),
Component: () => import('@/pages/login-next'),
layout: false,
errorElement: <FallbackComponent />,
},
{
path: Routes.ChatShare,
Component: lazy(() => import('@/pages/next-chats/share')),
Component: () => import('@/pages/next-chats/share'),
layout: false,
errorElement: <FallbackComponent />,
},
{
path: Routes.AgentShare,
Component: lazy(() => import('@/pages/agent/share')),
Component: () => import('@/pages/agent/share'),
layout: false,
errorElement: <FallbackComponent />,
},
{
path: Routes.ChatWidget,
Component: lazy(() => import('@/pages/next-chats/widget')),
Component: () => import('@/pages/next-chats/widget'),
layout: false,
errorElement: <FallbackComponent />,
},
{
path: Routes.AgentList,
Component: lazy(() => import('@/pages/agents')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/agents'),
},
{
path: '/document/:id',
Component: lazy(() => import('@/pages/document-viewer')),
Component: () => import('@/pages/document-viewer'),
layout: false,
errorElement: <FallbackComponent />,
},
{
path: '/*',
Component: lazy(() => import('@/pages/404')),
Component: () => import('@/pages/404'),
layout: false,
errorElement: <FallbackComponent />,
},
{
path: Routes.Root,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
wrappers: ['@/wrappers/auth'],
children: [
{
path: Routes.Root,
Component: lazy(() => import('@/pages/home')),
Component: () => import('@/pages/home'),
},
],
errorElement: <FallbackComponent />,
},
{
path: Routes.Datasets,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
children: [
{
path: Routes.Datasets,
Component: lazy(() => import('@/pages/datasets')),
Component: () => import('@/pages/datasets'),
},
],
errorElement: <FallbackComponent />,
},
{
path: Routes.Chats,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
children: [
{
path: Routes.Chats,
Component: lazy(() => import('@/pages/next-chats')),
Component: () => import('@/pages/next-chats'),
},
],
errorElement: <FallbackComponent />,
},
{
path: Routes.Chat + '/:id',
layout: false,
Component: lazy(() => import('@/pages/next-chats/chat')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/next-chats/chat'),
},
{
path: Routes.Searches,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
children: [
{
path: Routes.Searches,
Component: lazy(() => import('@/pages/next-searches')),
Component: () => import('@/pages/next-searches'),
},
],
errorElement: <FallbackComponent />,
},
{
path: Routes.Memories,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
children: [
{
path: Routes.Memories,
Component: lazy(() => import('@/pages/memories')),
Component: () => import('@/pages/memories'),
},
],
errorElement: <FallbackComponent />,
},
{
path: `${Routes.Memory}`,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
children: [
{
path: `${Routes.Memory}`,
layout: false,
Component: lazy(() => import('@/pages/memory')),
Component: () => import('@/pages/memory'),
children: [
{
path: `${Routes.Memory}/${Routes.MemoryMessage}/:id`,
Component: lazy(() => import('@/pages/memory/memory-message')),
Component: () => import('@/pages/memory/memory-message'),
},
{
path: `${Routes.Memory}/${Routes.MemorySetting}/:id`,
Component: lazy(() => import('@/pages/memory/memory-setting')),
Component: () => import('@/pages/memory/memory-setting'),
},
],
},
],
errorElement: <FallbackComponent />,
},
{
path: `${Routes.Search}/:id`,
layout: false,
Component: lazy(() => import('@/pages/next-search')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/next-search'),
},
{
path: `${Routes.SearchShare}`,
layout: false,
Component: lazy(() => import('@/pages/next-search/share')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/next-search/share'),
},
{
path: Routes.Agents,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
children: [
{
path: Routes.Agents,
Component: lazy(() => import('@/pages/agents')),
Component: () => import('@/pages/agents'),
},
],
errorElement: <FallbackComponent />,
},
{
path: `${Routes.AgentLogPage}/:id`,
layout: false,
Component: lazy(() => import('@/pages/agents/agent-log-page')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/agents/agent-log-page'),
},
{
path: `${Routes.Agent}/:id`,
layout: false,
Component: lazy(() => import('@/pages/agent')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/agent'),
},
{
path: Routes.AgentTemplates,
layout: false,
Component: lazy(() => import('@/pages/agents/agent-templates')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/agents/agent-templates'),
},
{
path: Routes.Files,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
children: [
{
path: Routes.Files,
Component: lazy(() => import('@/pages/files')),
Component: () => import('@/pages/files'),
},
],
errorElement: <FallbackComponent />,
},
{
path: Routes.DatasetBase,
layout: false,
Component: lazy(() => import('@/layouts/next')),
Component: () => import('@/layouts/next'),
children: [
{
path: Routes.DatasetBase,
element: <Navigate to={Routes.Dataset} replace />,
},
],
errorElement: <FallbackComponent />,
},
{
path: Routes.DatasetBase,
layout: false,
Component: lazy(() => import('@/pages/dataset')),
Component: () => import('@/pages/dataset'),
children: [
{
path: `${Routes.Dataset}/:id`,
Component: lazy(() => import('@/pages/dataset/dataset')),
Component: () => import('@/pages/dataset/dataset'),
},
{
path: `${Routes.DatasetBase}${Routes.DatasetTesting}/:id`,
Component: lazy(() => import('@/pages/dataset/testing')),
Component: () => import('@/pages/dataset/testing'),
},
{
path: `${Routes.DatasetBase}${Routes.KnowledgeGraph}/:id`,
Component: lazy(() => import('@/pages/dataset/knowledge-graph')),
Component: () => import('@/pages/dataset/knowledge-graph'),
},
{
path: `${Routes.DatasetBase}${Routes.DataSetOverview}/:id`,
Component: lazy(() => import('@/pages/dataset/dataset-overview')),
Component: () => import('@/pages/dataset/dataset-overview'),
},
{
path: `${Routes.DatasetBase}${Routes.DataSetSetting}/:id`,
Component: lazy(() => import('@/pages/dataset/dataset-setting')),
Component: () => import('@/pages/dataset/dataset-setting'),
},
],
errorElement: <FallbackComponent />,
},
{
path: `${Routes.DataflowResult}`,
layout: false,
Component: lazy(() => import('@/pages/dataflow-result')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/dataflow-result'),
},
{
path: `${Routes.ParsedResult}/chunks`,
layout: false,
Component: lazy(
() =>
import('@/pages/chunk/parsed-result/add-knowledge/components/knowledge-chunk'),
),
errorElement: <FallbackComponent />,
Component: () =>
import('@/pages/chunk/parsed-result/add-knowledge/components/knowledge-chunk'),
},
{
path: Routes.Chunk,
@ -319,30 +320,28 @@ const routeConfig = [
children: [
{
path: Routes.Chunk,
Component: lazy(() => import('@/pages/chunk')),
Component: () => import('@/pages/chunk'),
children: [
{
path: `${Routes.ChunkResult}/:id`,
Component: lazy(() => import('@/pages/chunk/chunk-result')),
Component: () => import('@/pages/chunk/chunk-result'),
},
{
path: `${Routes.ResultView}/:id`,
Component: lazy(() => import('@/pages/chunk/result-view')),
Component: () => import('@/pages/chunk/result-view'),
},
],
},
],
errorElement: <FallbackComponent />,
},
{
path: Routes.Chunk,
layout: false,
Component: lazy(() => import('@/pages/chunk')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/chunk'),
},
{
path: '/user-setting',
Component: lazy(() => import('@/pages/user-setting')),
Component: () => import('@/pages/user-setting'),
layout: false,
children: [
{
@ -351,92 +350,87 @@ const routeConfig = [
},
{
path: '/user-setting/profile',
Component: lazy(() => import('@/pages/user-setting/profile')),
Component: () => import('@/pages/user-setting/profile'),
},
{
path: '/user-setting/locale',
Component: lazy(() => import('@/pages/user-setting/setting-locale')),
Component: () => import('@/pages/user-setting/setting-locale'),
},
{
path: '/user-setting/model',
Component: lazy(() => import('@/pages/user-setting/setting-model')),
Component: () => import('@/pages/user-setting/setting-model'),
},
{
path: '/user-setting/team',
Component: lazy(() => import('@/pages/user-setting/setting-team')),
Component: () => import('@/pages/user-setting/setting-team'),
},
{
path: `/user-setting${Routes.Api}`,
Component: lazy(() => import('@/pages/user-setting/setting-api')),
Component: () => import('@/pages/user-setting/setting-api'),
},
{
path: `/user-setting${Routes.Mcp}`,
Component: lazy(() => import('@/pages/user-setting/mcp')),
Component: () => import('@/pages/user-setting/mcp'),
},
{
path: `/user-setting${Routes.DataSource}`,
Component: lazy(() => import('@/pages/user-setting/data-source')),
Component: () => import('@/pages/user-setting/data-source'),
},
],
errorElement: <FallbackComponent />,
},
{
path: `/user-setting${Routes.DataSource}${Routes.DataSourceDetailPage}`,
Component: lazy(
() => import('@/pages/user-setting/data-source/data-source-detail-page'),
),
Component: () =>
import('@/pages/user-setting/data-source/data-source-detail-page'),
layout: false,
errorElement: <FallbackComponent />,
},
{
path: Routes.Admin,
Component: lazy(() => import('@/pages/admin/layouts/root-layout')),
errorElement: <FallbackComponent />,
Component: () => import('@/pages/admin/layouts/root-layout'),
children: [
{
path: Routes.Admin,
Component: lazy(() => import('@/pages/admin/login')),
Component: () => import('@/pages/admin/login'),
},
{
path: Routes.Admin,
Component: lazy(
() => import('@/pages/admin/layouts/authorized-layout'),
),
Component: () => import('@/pages/admin/layouts/authorized-layout'),
children: [
{
path: `${Routes.AdminUserManagement}/:id`,
Component: lazy(() => import('@/pages/admin/user-detail')),
Component: () => import('@/pages/admin/user-detail'),
},
{
Component: lazy(
() => import('@/pages/admin/layouts/navigation-layout'),
),
Component: () => import('@/pages/admin/layouts/navigation-layout'),
children: [
{
path: Routes.AdminServices,
Component: lazy(() => import('@/pages/admin/service-status')),
Component: () => import('@/pages/admin/service-status'),
},
{
path: Routes.AdminUserManagement,
Component: lazy(() => import('@/pages/admin/users')),
Component: () => import('@/pages/admin/users'),
},
{
path: Routes.AdminSandboxSettings,
Component: lazy(() => import('@/pages/admin/sandbox-settings')),
Component: () => import('@/pages/admin/sandbox-settings'),
},
...(IS_ENTERPRISE
? [
{
path: Routes.AdminWhitelist,
Component: lazy(() => import('@/pages/admin/whitelist')),
Component: () => import('@/pages/admin/whitelist'),
},
{
path: Routes.AdminRoles,
Component: lazy(() => import('@/pages/admin/roles')),
Component: () => import('@/pages/admin/roles'),
},
{
path: Routes.AdminMonitoring,
Component: lazy(() => import('@/pages/admin/monitoring')),
Component: () => import('@/pages/admin/monitoring'),
},
]
: []),
@ -445,9 +439,24 @@ const routeConfig = [
],
},
],
} satisfies RouteObject,
} satisfies LazyRouteConfig,
];
const wrapRoutes = (routes: LazyRouteConfig[]): RouteObject[] =>
routes.map((item) => {
const { Component, children, ...rest } = item;
const next: RouteObject = { ...rest, errorElement: <FallbackComponent /> };
if (Component) {
next.Component = withLazyRoute(Component);
}
if (children) {
next.children = wrapRoutes(children);
}
return next;
});
const routeConfig = wrapRoutes(routeConfigOptions);
const routers = createBrowserRouter(routeConfig, {
basename: import.meta.env.VITE_BASE_URL || '/',
});

View File

@ -101,6 +101,12 @@ export default defineConfig(({ mode, command }) => {
experimentalMinChunkSize: 30 * 1024,
chunkSizeWarningLimit: 1000,
rollupOptions: {
onwarn(warning, warn) {
if (warning.code === 'EMPTY_BUNDLE') {
return;
}
warn(warning);
},
output: {
manualChunks(id) {
// if (id.includes('src/components')) {