### What problem does this PR solve?

As title

### Type of change

- [x] Refactoring

Signed-off-by: Jin Hai <haijin.chn@gmail.com>
This commit is contained in:
Jin Hai
2025-08-27 18:56:40 +08:00
committed by GitHub
parent 2d89863fdd
commit 5abd0bbac1
9 changed files with 25 additions and 24 deletions

View File

@ -93,6 +93,7 @@ def list_chunk():
def get(): def get():
chunk_id = request.args["chunk_id"] chunk_id = request.args["chunk_id"]
try: try:
chunk = None
tenants = UserTenantService.query(user_id=current_user.id) tenants = UserTenantService.query(user_id=current_user.id)
if not tenants: if not tenants:
return get_data_error_result(message="Tenant not found!") return get_data_error_result(message="Tenant not found!")

View File

@ -243,7 +243,7 @@ def add_llm():
model_name=mdl_nm, model_name=mdl_nm,
base_url=llm["api_base"] base_url=llm["api_base"]
) )
arr, tc = mdl.similarity("Hello~ Ragflower!", ["Hi, there!", "Ohh, my friend!"]) arr, tc = mdl.similarity("Hello~ RAGFlower!", ["Hi, there!", "Ohh, my friend!"])
if len(arr) == 0: if len(arr) == 0:
raise Exception("Not known.") raise Exception("Not known.")
except KeyError: except KeyError:
@ -271,7 +271,7 @@ def add_llm():
key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"] key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"]
) )
try: try:
for resp in mdl.tts("Hello~ Ragflower!"): for resp in mdl.tts("Hello~ RAGFlower!"):
pass pass
except RuntimeError as e: except RuntimeError as e:
msg += f"\nFail to access model({factory}/{mdl_nm})." + str(e) msg += f"\nFail to access model({factory}/{mdl_nm})." + str(e)

View File

@ -82,7 +82,7 @@ def create() -> Response:
server_name = req.get("name", "") server_name = req.get("name", "")
if not server_name or len(server_name.encode("utf-8")) > 255: if not server_name or len(server_name.encode("utf-8")) > 255:
return get_data_error_result(message=f"Invaild MCP name or length is {len(server_name)} which is large than 255.") return get_data_error_result(message=f"Invalid MCP name or length is {len(server_name)} which is large than 255.")
e, _ = MCPServerService.get_by_name_and_tenant(name=server_name, tenant_id=current_user.id) e, _ = MCPServerService.get_by_name_and_tenant(name=server_name, tenant_id=current_user.id)
if e: if e:
@ -90,7 +90,7 @@ def create() -> Response:
url = req.get("url", "") url = req.get("url", "")
if not url: if not url:
return get_data_error_result(message="Invaild url.") return get_data_error_result(message="Invalid url.")
headers = safe_json_parse(req.get("headers", {})) headers = safe_json_parse(req.get("headers", {}))
req["headers"] = headers req["headers"] = headers
@ -141,10 +141,10 @@ def update() -> Response:
return get_data_error_result(message="Unsupported MCP server type.") return get_data_error_result(message="Unsupported MCP server type.")
server_name = req.get("name", mcp_server.name) server_name = req.get("name", mcp_server.name)
if server_name and len(server_name.encode("utf-8")) > 255: if server_name and len(server_name.encode("utf-8")) > 255:
return get_data_error_result(message=f"Invaild MCP name or length is {len(server_name)} which is large than 255.") return get_data_error_result(message=f"Invalid MCP name or length is {len(server_name)} which is large than 255.")
url = req.get("url", mcp_server.url) url = req.get("url", mcp_server.url)
if not url: if not url:
return get_data_error_result(message="Invaild url.") return get_data_error_result(message="Invalid url.")
headers = safe_json_parse(req.get("headers", mcp_server.headers)) headers = safe_json_parse(req.get("headers", mcp_server.headers))
req["headers"] = headers req["headers"] = headers
@ -218,7 +218,7 @@ def import_multiple() -> Response:
continue continue
if not server_name or len(server_name.encode("utf-8")) > 255: if not server_name or len(server_name.encode("utf-8")) > 255:
results.append({"server": server_name, "success": False, "message": f"Invaild MCP name or length is {len(server_name)} which is large than 255."}) results.append({"server": server_name, "success": False, "message": f"Invalid MCP name or length is {len(server_name)} which is large than 255."})
continue continue
base_name = server_name base_name = server_name
@ -409,7 +409,7 @@ def test_mcp() -> Response:
url = req.get("url", "") url = req.get("url", "")
if not url: if not url:
return get_data_error_result(message="Invaild MCP url.") return get_data_error_result(message="Invalid MCP url.")
server_type = req.get("server_type", "") server_type = req.get("server_type", "")
if server_type not in VALID_MCP_SERVER_TYPES: if server_type not in VALID_MCP_SERVER_TYPES:

View File

@ -43,7 +43,7 @@ def create():
return get_data_error_result(message=f"Search name length is {len(search_name)} which is large than 255.") return get_data_error_result(message=f"Search name length is {len(search_name)} which is large than 255.")
e, _ = TenantService.get_by_id(current_user.id) e, _ = TenantService.get_by_id(current_user.id)
if not e: if not e:
return get_data_error_result(message="Authorizationd identity.") return get_data_error_result(message="Authorized identity.")
search_name = search_name.strip() search_name = search_name.strip()
search_name = duplicate_name(SearchService.query, name=search_name, tenant_id=current_user.id, status=StatusEnum.VALID.value) search_name = duplicate_name(SearchService.query, name=search_name, tenant_id=current_user.id, status=StatusEnum.VALID.value)
@ -78,7 +78,7 @@ def update():
tenant_id = req["tenant_id"] tenant_id = req["tenant_id"]
e, _ = TenantService.get_by_id(tenant_id) e, _ = TenantService.get_by_id(tenant_id)
if not e: if not e:
return get_data_error_result(message="Authorizationd identity.") return get_data_error_result(message="Authorized identity.")
search_id = req["search_id"] search_id = req["search_id"]
if not SearchService.accessible4deletion(search_id, current_user.id): if not SearchService.accessible4deletion(search_id, current_user.id):

View File

@ -31,11 +31,11 @@ def save_results(image_list, results, labels, output_dir='output/', threshold=0.
logging.debug("save result to: " + out_path) logging.debug("save result to: " + out_path)
def draw_box(im, result, lables, threshold=0.5): def draw_box(im, result, labels, threshold=0.5):
draw_thickness = min(im.size) // 320 draw_thickness = min(im.size) // 320
draw = ImageDraw.Draw(im) draw = ImageDraw.Draw(im)
color_list = get_color_map_list(len(lables)) color_list = get_color_map_list(len(labels))
clsid2color = {n.lower():color_list[i] for i,n in enumerate(lables)} clsid2color = {n.lower():color_list[i] for i,n in enumerate(labels)}
result = [r for r in result if r["score"] >= threshold] result = [r for r in result if r["score"] >= threshold]
for dt in result: for dt in result:

View File

@ -554,8 +554,8 @@ def naive_merge(sections, chunk_token_num=128, delimiter="\n。", overl
if num_tokens_from_string(sec) < chunk_token_num: if num_tokens_from_string(sec) < chunk_token_num:
add_chunk(sec, pos) add_chunk(sec, pos)
continue continue
splited_sec = re.split(r"(%s)" % dels, sec, flags=re.DOTALL) split_sec = re.split(r"(%s)" % dels, sec, flags=re.DOTALL)
for sub_sec in splited_sec: for sub_sec in split_sec:
if re.match(f"^{dels}$", sub_sec): if re.match(f"^{dels}$", sub_sec):
continue continue
add_chunk(sub_sec, pos) add_chunk(sub_sec, pos)
@ -600,14 +600,14 @@ def naive_merge_with_images(texts, images, chunk_token_num=128, delimiter="\n。
if isinstance(text, tuple): if isinstance(text, tuple):
text_str = text[0] text_str = text[0]
text_pos = text[1] if len(text) > 1 else "" text_pos = text[1] if len(text) > 1 else ""
splited_sec = re.split(r"(%s)" % dels, text_str) split_sec = re.split(r"(%s)" % dels, text_str)
for sub_sec in splited_sec: for sub_sec in split_sec:
if re.match(f"^{dels}$", sub_sec): if re.match(f"^{dels}$", sub_sec):
continue continue
add_chunk(sub_sec, image, text_pos) add_chunk(sub_sec, image, text_pos)
else: else:
splited_sec = re.split(r"(%s)" % dels, text) split_sec = re.split(r"(%s)" % dels, text)
for sub_sec in splited_sec: for sub_sec in split_sec:
if re.match(f"^{dels}$", sub_sec): if re.match(f"^{dels}$", sub_sec):
continue continue
add_chunk(sub_sec, image) add_chunk(sub_sec, image)
@ -684,8 +684,8 @@ def naive_merge_docx(sections, chunk_token_num=128, delimiter="\n。"):
dels = get_delimiters(delimiter) dels = get_delimiters(delimiter)
for sec, image in sections: for sec, image in sections:
splited_sec = re.split(r"(%s)" % dels, sec) split_sec = re.split(r"(%s)" % dels, sec)
for sub_sec in splited_sec: for sub_sec in split_sec:
if re.match(f"^{dels}$", sub_sec): if re.match(f"^{dels}$", sub_sec):
continue continue
add_chunk(sub_sec, image,"") add_chunk(sub_sec, image,"")

View File

@ -148,7 +148,7 @@ class MCPToolCallSession(ToolCallSession):
if result.isError: if result.isError:
return f"MCP server error: {result.content}" return f"MCP server error: {result.content}"
# For now we only support text content # For now, we only support text content
if isinstance(result.content[0], TextContent): if isinstance(result.content[0], TextContent):
return result.content[0].text return result.content[0].text
else: else:

View File

@ -336,7 +336,7 @@ class RedisDB:
def delete_if_equal(self, key: str, expected_value: str) -> bool: def delete_if_equal(self, key: str, expected_value: str) -> bool:
""" """
Do follwing atomically: Do following atomically:
Delete a key if its value is equals to the given one, do nothing otherwise. Delete a key if its value is equals to the given one, do nothing otherwise.
""" """
return bool(self.lua_delete_if_equal(keys=[key], args=[expected_value], client=self.REDIS)) return bool(self.lua_delete_if_equal(keys=[key], args=[expected_value], client=self.REDIS))