Compare commits

...

5 Commits

Author SHA1 Message Date
ba71160b14 Refa: rm useless code. (#11238)
### Type of change

- [x] Refactoring
2025-11-13 09:59:55 +08:00
bd5dda6b10 Feature/doc upload api add parent path 20251112 (#11231)
### What problem does this PR solve?

Add the specified parent_path to the document upload api interface
(#11230)

### Type of change

- [x] New Feature (non-breaking change which adds functionality)

Co-authored-by: virgilwong <hyhvirgil@gmail.com>
2025-11-13 09:59:39 +08:00
774563970b Fix: update readme (#11212)
### What problem does this PR solve?

Continue update readme #11167 

### Type of change

- [x] Documentation Update
2025-11-13 09:50:47 +08:00
83d84e90ed Fix: Profile picture cropping supported #10703 (#11221)
### What problem does this PR solve?

Fix: Profile picture cropping supported

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-11-13 09:50:10 +08:00
8ef2f79d0a Fix:reset the agent component’s output (#11222)
### What problem does this PR solve?

change:
“After each dialogue turn, the agent component’s output is not reset.”

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
2025-11-13 09:49:12 +08:00
21 changed files with 349 additions and 804 deletions

View File

@ -192,9 +192,10 @@ releases! 🌟
```bash
$ cd ragflow/docker
# Optional: use a stable tag (see releases: https://github.com/infiniflow/ragflow/releases), e.g.: git checkout v0.22.0
# This steps ensures the **entrypoint.sh** file in the code matches the Docker image version.
# Use CPU for DeepDoc tasks:
$ docker compose -f docker-compose.yml up -d

View File

@ -192,6 +192,7 @@ Coba demo kami di [https://demo.ragflow.io](https://demo.ragflow.io).
$ cd ragflow/docker
# Opsional: gunakan tag stabil (lihat releases: https://github.com/infiniflow/ragflow/releases), contoh: git checkout v0.22.0
# This steps ensures the **entrypoint.sh** file in the code matches the Docker image version.
# Use CPU for DeepDoc tasks:
$ docker compose -f docker-compose.yml up -d

View File

@ -172,6 +172,7 @@
$ cd ragflow/docker
# 任意: 安定版タグを利用 (一覧: https://github.com/infiniflow/ragflow/releases) 例: git checkout v0.22.0
# この手順は、コード内の entrypoint.sh ファイルが Docker イメージのバージョンと一致していることを確認します。
# Use CPU for DeepDoc tasks:
$ docker compose -f docker-compose.yml up -d

View File

@ -174,6 +174,7 @@
$ cd ragflow/docker
# Optional: use a stable tag (see releases: https://github.com/infiniflow/ragflow/releases), e.g.: git checkout v0.22.0
# 이 단계는 코드의 entrypoint.sh 파일이 Docker 이미지 버전과 일치하도록 보장합니다.
# Use CPU for DeepDoc tasks:
$ docker compose -f docker-compose.yml up -d

View File

@ -192,6 +192,7 @@ Experimente nossa demo em [https://demo.ragflow.io](https://demo.ragflow.io).
$ cd ragflow/docker
# Opcional: use uma tag estável (veja releases: https://github.com/infiniflow/ragflow/releases), ex.: git checkout v0.22.0
# Esta etapa garante que o arquivo entrypoint.sh no código corresponda à versão da imagem do Docker.
# Use CPU for DeepDoc tasks:
$ docker compose -f docker-compose.yml up -d

View File

@ -191,6 +191,7 @@
$ cd ragflow/docker
# 可選使用穩定版標籤查看發佈https://github.com/infiniflow/ragflow/releasesgit checkout v0.22.0
# 此步驟確保程式碼中的 entrypoint.sh 檔案與 Docker 映像版本一致。
# Use CPU for DeepDoc tasks:
$ docker compose -f docker-compose.yml up -d

View File

@ -192,6 +192,7 @@
$ cd ragflow/docker
# 可选使用稳定版本标签查看发布https://github.com/infiniflow/ragflow/releases例如git checkout v0.22.0
# 这一步确保代码中的 entrypoint.sh 文件与 Docker 镜像的版本保持一致。
# Use CPU for DeepDoc tasks:
$ docker compose -f docker-compose.yml up -d

View File

@ -368,11 +368,19 @@ Respond immediately with your final comprehensive answer.
return "Error occurred."
def reset(self, temp=False):
def reset(self, only_output=False):
"""
Reset all tools if they have a reset method. This avoids errors for tools like MCPToolCallSession.
"""
for k in self._param.outputs.keys():
self._param.outputs[k]["value"] = None
for k, cpn in self.tools.items():
if hasattr(cpn, "reset") and callable(cpn.reset):
cpn.reset()
if only_output:
return
for k in self._param.inputs.keys():
self._param.inputs[k]["value"] = None
self._param.debug_inputs = {}

View File

@ -222,7 +222,7 @@ class LLM(ComponentBase):
output_structure = self._param.outputs['structured']
except Exception:
pass
if output_structure:
if output_structure and isinstance(output_structure, dict) and output_structure.get("properties"):
schema=json.dumps(output_structure, ensure_ascii=False, indent=2)
prompt += structured_output_prompt(schema)
for _ in range(self._param.max_retries+1):

View File

@ -13,41 +13,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import re
from datetime import datetime, timedelta
from flask import request, Response
from api.db.services.llm_service import LLMBundle
from flask import request
from flask_login import login_required, current_user
from api.db import VALID_FILE_TYPES, FileType
from api.db.db_models import APIToken, Task, File
from api.db.services import duplicate_name
from api.db.db_models import APIToken
from api.db.services.api_service import APITokenService, API4ConversationService
from api.db.services.dialog_service import DialogService, chat
from api.db.services.document_service import DocumentService, doc_upload_and_parse
from api.db.services.file2document_service import File2DocumentService
from api.db.services.file_service import FileService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.task_service import queue_tasks, TaskService
from api.db.services.user_service import UserTenantService
from common.misc_utils import get_uuid
from common.constants import RetCode, VALID_TASK_STATUS, LLMType, ParserType, FileSource
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request, \
generate_confirmation_token
from api.utils.file_utils import filename_type, thumbnail
from rag.app.tag import label_question
from rag.prompts.generator import keyword_extraction
from common.time_utils import current_timestamp, datetime_format
from api.db.services.canvas_service import UserCanvasService
from agent.canvas import Canvas
from functools import partial
from pathlib import Path
from common import settings
@manager.route('/new_token', methods=['POST']) # noqa: F821
@login_required
@ -138,758 +113,3 @@ def stats():
except Exception as e:
return server_error_response(e)
@manager.route('/new_conversation', methods=['GET']) # noqa: F821
def set_conversation():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
try:
if objs[0].source == "agent":
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
if not e:
return server_error_response("canvas not found.")
if not isinstance(cvs.dsl, str):
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
conv = {
"id": get_uuid(),
"dialog_id": cvs.id,
"user_id": request.args.get("user_id", ""),
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
"source": "agent"
}
API4ConversationService.save(**conv)
return get_json_result(data=conv)
else:
e, dia = DialogService.get_by_id(objs[0].dialog_id)
if not e:
return get_data_error_result(message="Dialog not found")
conv = {
"id": get_uuid(),
"dialog_id": dia.id,
"user_id": request.args.get("user_id", ""),
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
}
API4ConversationService.save(**conv)
return get_json_result(data=conv)
except Exception as e:
return server_error_response(e)
@manager.route('/completion', methods=['POST']) # noqa: F821
@validate_request("conversation_id", "messages")
def completion():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
req = request.json
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
if not e:
return get_data_error_result(message="Conversation not found!")
if "quote" not in req:
req["quote"] = False
msg = []
for m in req["messages"]:
if m["role"] == "system":
continue
if m["role"] == "assistant" and not msg:
continue
msg.append(m)
if not msg[-1].get("id"):
msg[-1]["id"] = get_uuid()
message_id = msg[-1]["id"]
def fillin_conv(ans):
nonlocal conv, message_id
if not conv.reference:
conv.reference.append(ans["reference"])
else:
conv.reference[-1] = ans["reference"]
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
ans["id"] = message_id
def rename_field(ans):
reference = ans['reference']
if not isinstance(reference, dict):
return
for chunk_i in reference.get('chunks', []):
if 'docnm_kwd' in chunk_i:
chunk_i['doc_name'] = chunk_i['docnm_kwd']
chunk_i.pop('docnm_kwd')
try:
if conv.source == "agent":
stream = req.get("stream", True)
conv.message.append(msg[-1])
e, cvs = UserCanvasService.get_by_id(conv.dialog_id)
if not e:
return server_error_response("canvas not found.")
del req["conversation_id"]
del req["messages"]
if not isinstance(cvs.dsl, str):
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
if not conv.reference:
conv.reference = []
conv.message.append({"role": "assistant", "content": "", "id": message_id})
conv.reference.append({"chunks": [], "doc_aggs": []})
final_ans = {"reference": [], "content": ""}
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
canvas.messages.append(msg[-1])
canvas.add_user_input(msg[-1]["content"])
answer = canvas.run(stream=stream)
assert answer is not None, "Nothing. Is it over?"
if stream:
assert isinstance(answer, partial), "Nothing. Is it over?"
def sse():
nonlocal answer, cvs, conv
try:
for ans in answer():
for k in ans.keys():
final_ans[k] = ans[k]
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
fillin_conv(ans)
rename_field(ans)
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
ensure_ascii=False) + "\n\n"
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
canvas.history.append(("assistant", final_ans["content"]))
if final_ans.get("reference"):
canvas.reference.append(final_ans["reference"])
cvs.dsl = json.loads(str(canvas))
API4ConversationService.append_message(conv.id, conv.to_dict())
except Exception as e:
yield "data:" + json.dumps({"code": 500, "message": str(e),
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
ensure_ascii=False) + "\n\n"
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
resp = Response(sse(), mimetype="text/event-stream")
resp.headers.add_header("Cache-control", "no-cache")
resp.headers.add_header("Connection", "keep-alive")
resp.headers.add_header("X-Accel-Buffering", "no")
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
return resp
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
if final_ans.get("reference"):
canvas.reference.append(final_ans["reference"])
cvs.dsl = json.loads(str(canvas))
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
fillin_conv(result)
API4ConversationService.append_message(conv.id, conv.to_dict())
rename_field(result)
return get_json_result(data=result)
# ******************For dialog******************
conv.message.append(msg[-1])
e, dia = DialogService.get_by_id(conv.dialog_id)
if not e:
return get_data_error_result(message="Dialog not found!")
del req["conversation_id"]
del req["messages"]
if not conv.reference:
conv.reference = []
conv.message.append({"role": "assistant", "content": "", "id": message_id})
conv.reference.append({"chunks": [], "doc_aggs": []})
def stream():
nonlocal dia, msg, req, conv
try:
for ans in chat(dia, msg, True, **req):
fillin_conv(ans)
rename_field(ans)
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
ensure_ascii=False) + "\n\n"
API4ConversationService.append_message(conv.id, conv.to_dict())
except Exception as e:
yield "data:" + json.dumps({"code": 500, "message": str(e),
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
ensure_ascii=False) + "\n\n"
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
if req.get("stream", True):
resp = Response(stream(), mimetype="text/event-stream")
resp.headers.add_header("Cache-control", "no-cache")
resp.headers.add_header("Connection", "keep-alive")
resp.headers.add_header("X-Accel-Buffering", "no")
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
return resp
answer = None
for ans in chat(dia, msg, **req):
answer = ans
fillin_conv(ans)
API4ConversationService.append_message(conv.id, conv.to_dict())
break
rename_field(answer)
return get_json_result(data=answer)
except Exception as e:
return server_error_response(e)
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
# @login_required
def get_conversation(conversation_id):
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
try:
e, conv = API4ConversationService.get_by_id(conversation_id)
if not e:
return get_data_error_result(message="Conversation not found!")
conv = conv.to_dict()
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
return get_json_result(data=False, message='Authentication error: API key is invalid for this conversation_id!"',
code=RetCode.AUTHENTICATION_ERROR)
for referenct_i in conv['reference']:
if referenct_i is None or len(referenct_i) == 0:
continue
for chunk_i in referenct_i['chunks']:
if 'docnm_kwd' in chunk_i.keys():
chunk_i['doc_name'] = chunk_i['docnm_kwd']
chunk_i.pop('docnm_kwd')
return get_json_result(data=conv)
except Exception as e:
return server_error_response(e)
@manager.route('/document/upload', methods=['POST']) # noqa: F821
@validate_request("kb_name")
def upload():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
kb_name = request.form.get("kb_name").strip()
tenant_id = objs[0].tenant_id
try:
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
if not e:
return get_data_error_result(
message="Can't find this knowledgebase!")
kb_id = kb.id
except Exception as e:
return server_error_response(e)
if 'file' not in request.files:
return get_json_result(
data=False, message='No file part!', code=RetCode.ARGUMENT_ERROR)
file = request.files['file']
if file.filename == '':
return get_json_result(
data=False, message='No file selected!', code=RetCode.ARGUMENT_ERROR)
root_folder = FileService.get_root_folder(tenant_id)
pf_id = root_folder["id"]
FileService.init_knowledgebase_docs(pf_id, tenant_id)
kb_root_folder = FileService.get_kb_folder(tenant_id)
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
try:
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
return get_data_error_result(
message="Exceed the maximum file number of a free user!")
filename = duplicate_name(
DocumentService.query,
name=file.filename,
kb_id=kb_id)
filetype = filename_type(filename)
if not filetype:
return get_data_error_result(
message="This type of file has not been supported yet!")
location = filename
while settings.STORAGE_IMPL.obj_exist(kb_id, location):
location += "_"
blob = request.files['file'].read()
settings.STORAGE_IMPL.put(kb_id, location, blob)
doc = {
"id": get_uuid(),
"kb_id": kb.id,
"parser_id": kb.parser_id,
"parser_config": kb.parser_config,
"created_by": kb.tenant_id,
"type": filetype,
"name": filename,
"location": location,
"size": len(blob),
"thumbnail": thumbnail(filename, blob),
"suffix": Path(filename).suffix.lstrip("."),
}
form_data = request.form
if "parser_id" in form_data.keys():
if request.form.get("parser_id").strip() in list(vars(ParserType).values())[1:-3]:
doc["parser_id"] = request.form.get("parser_id").strip()
if doc["type"] == FileType.VISUAL:
doc["parser_id"] = ParserType.PICTURE.value
if doc["type"] == FileType.AURAL:
doc["parser_id"] = ParserType.AUDIO.value
if re.search(r"\.(ppt|pptx|pages)$", filename):
doc["parser_id"] = ParserType.PRESENTATION.value
if re.search(r"\.(eml)$", filename):
doc["parser_id"] = ParserType.EMAIL.value
doc_result = DocumentService.insert(doc)
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
except Exception as e:
return server_error_response(e)
if "run" in form_data.keys():
if request.form.get("run").strip() == "1":
try:
info = {"run": 1, "progress": 0, "progress_msg": "", "chunk_num": 0, "token_num": 0}
DocumentService.update_by_id(doc["id"], info)
# if str(req["run"]) == TaskStatus.CANCEL.value:
tenant_id = DocumentService.get_tenant_id(doc["id"])
if not tenant_id:
return get_data_error_result(message="Tenant not found!")
# e, doc = DocumentService.get_by_id(doc["id"])
TaskService.filter_delete([Task.doc_id == doc["id"]])
e, doc = DocumentService.get_by_id(doc["id"])
doc = doc.to_dict()
doc["tenant_id"] = tenant_id
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
queue_tasks(doc, bucket, name, 0)
except Exception as e:
return server_error_response(e)
return get_json_result(data=doc_result.to_json())
@manager.route('/document/upload_and_parse', methods=['POST']) # noqa: F821
@validate_request("conversation_id")
def upload_parse():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
if 'file' not in request.files:
return get_json_result(
data=False, message='No file part!', code=RetCode.ARGUMENT_ERROR)
file_objs = request.files.getlist('file')
for file_obj in file_objs:
if file_obj.filename == '':
return get_json_result(
data=False, message='No file selected!', code=RetCode.ARGUMENT_ERROR)
doc_ids = doc_upload_and_parse(request.form.get("conversation_id"), file_objs, objs[0].tenant_id)
return get_json_result(data=doc_ids)
@manager.route('/list_chunks', methods=['POST']) # noqa: F821
# @login_required
def list_chunks():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
req = request.json
try:
if "doc_name" in req.keys():
tenant_id = DocumentService.get_tenant_id_by_name(req['doc_name'])
doc_id = DocumentService.get_doc_id_by_doc_name(req['doc_name'])
elif "doc_id" in req.keys():
tenant_id = DocumentService.get_tenant_id(req['doc_id'])
doc_id = req['doc_id']
else:
return get_json_result(
data=False, message="Can't find doc_name or doc_id"
)
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
res = settings.retriever.chunk_list(doc_id, tenant_id, kb_ids)
res = [
{
"content": res_item["content_with_weight"],
"doc_name": res_item["docnm_kwd"],
"image_id": res_item["img_id"]
} for res_item in res
]
except Exception as e:
return server_error_response(e)
return get_json_result(data=res)
@manager.route('/get_chunk/<chunk_id>', methods=['GET']) # noqa: F821
# @login_required
def get_chunk(chunk_id):
from rag.nlp import search
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
try:
tenant_id = objs[0].tenant_id
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
if chunk is None:
return server_error_response(Exception("Chunk not found"))
k = []
for n in chunk.keys():
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
k.append(n)
for n in k:
del chunk[n]
return get_json_result(data=chunk)
except Exception as e:
return server_error_response(e)
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
# @login_required
def list_kb_docs():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
req = request.json
tenant_id = objs[0].tenant_id
kb_name = req.get("kb_name", "").strip()
try:
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
if not e:
return get_data_error_result(
message="Can't find this knowledgebase!")
kb_id = kb.id
except Exception as e:
return server_error_response(e)
page_number = int(req.get("page", 1))
items_per_page = int(req.get("page_size", 15))
orderby = req.get("orderby", "create_time")
desc = req.get("desc", True)
keywords = req.get("keywords", "")
status = req.get("status", [])
if status:
invalid_status = {s for s in status if s not in VALID_TASK_STATUS}
if invalid_status:
return get_data_error_result(
message=f"Invalid filter status conditions: {', '.join(invalid_status)}"
)
types = req.get("types", [])
if types:
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
if invalid_types:
return get_data_error_result(
message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}"
)
try:
docs, tol = DocumentService.get_by_kb_id(
kb_id, page_number, items_per_page, orderby, desc, keywords, status, types)
docs = [{"doc_id": doc['id'], "doc_name": doc['name']} for doc in docs]
return get_json_result(data={"total": tol, "docs": docs})
except Exception as e:
return server_error_response(e)
@manager.route('/document/infos', methods=['POST']) # noqa: F821
@validate_request("doc_ids")
def docinfos():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
req = request.json
doc_ids = req["doc_ids"]
docs = DocumentService.get_by_ids(doc_ids)
return get_json_result(data=list(docs.dicts()))
@manager.route('/document', methods=['DELETE']) # noqa: F821
# @login_required
def document_rm():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
tenant_id = objs[0].tenant_id
req = request.json
try:
doc_ids = DocumentService.get_doc_ids_by_doc_names(req.get("doc_names", []))
for doc_id in req.get("doc_ids", []):
if doc_id not in doc_ids:
doc_ids.append(doc_id)
if not doc_ids:
return get_json_result(
data=False, message="Can't find doc_names or doc_ids"
)
except Exception as e:
return server_error_response(e)
root_folder = FileService.get_root_folder(tenant_id)
pf_id = root_folder["id"]
FileService.init_knowledgebase_docs(pf_id, tenant_id)
errors = ""
docs = DocumentService.get_by_ids(doc_ids)
doc_dic = {}
for doc in docs:
doc_dic[doc.id] = doc
for doc_id in doc_ids:
try:
if doc_id not in doc_dic:
return get_data_error_result(message="Document not found!")
doc = doc_dic[doc_id]
tenant_id = DocumentService.get_tenant_id(doc_id)
if not tenant_id:
return get_data_error_result(message="Tenant not found!")
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
if not DocumentService.remove_document(doc, tenant_id):
return get_data_error_result(
message="Database error (Document removal)!")
f2d = File2DocumentService.get_by_document_id(doc_id)
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
File2DocumentService.delete_by_document_id(doc_id)
settings.STORAGE_IMPL.rm(b, n)
except Exception as e:
errors += str(e)
if errors:
return get_json_result(data=False, message=errors, code=RetCode.SERVER_ERROR)
return get_json_result(data=True)
@manager.route('/completion_aibotk', methods=['POST']) # noqa: F821
@validate_request("Authorization", "conversation_id", "word")
def completion_faq():
import base64
req = request.json
token = req["Authorization"]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
if not e:
return get_data_error_result(message="Conversation not found!")
if "quote" not in req:
req["quote"] = True
msg = [{"role": "user", "content": req["word"]}]
if not msg[-1].get("id"):
msg[-1]["id"] = get_uuid()
message_id = msg[-1]["id"]
def fillin_conv(ans):
nonlocal conv, message_id
if not conv.reference:
conv.reference.append(ans["reference"])
else:
conv.reference[-1] = ans["reference"]
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
ans["id"] = message_id
try:
if conv.source == "agent":
conv.message.append(msg[-1])
e, cvs = UserCanvasService.get_by_id(conv.dialog_id)
if not e:
return server_error_response("canvas not found.")
if not isinstance(cvs.dsl, str):
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
if not conv.reference:
conv.reference = []
conv.message.append({"role": "assistant", "content": "", "id": message_id})
conv.reference.append({"chunks": [], "doc_aggs": []})
final_ans = {"reference": [], "doc_aggs": []}
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
canvas.messages.append(msg[-1])
canvas.add_user_input(msg[-1]["content"])
answer = canvas.run(stream=False)
assert answer is not None, "Nothing. Is it over?"
data_type_picture = {
"type": 3,
"url": "base64 content"
}
data = [
{
"type": 1,
"content": ""
}
]
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
if final_ans.get("reference"):
canvas.reference.append(final_ans["reference"])
cvs.dsl = json.loads(str(canvas))
ans = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
fillin_conv(ans)
API4ConversationService.append_message(conv.id, conv.to_dict())
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
for chunk_idx in chunk_idxs[:1]:
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
try:
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
response = settings.STORAGE_IMPL.get(bkt, nm)
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
data.append(data_type_picture)
break
except Exception as e:
return server_error_response(e)
response = {"code": 200, "msg": "success", "data": data}
return response
# ******************For dialog******************
conv.message.append(msg[-1])
e, dia = DialogService.get_by_id(conv.dialog_id)
if not e:
return get_data_error_result(message="Dialog not found!")
del req["conversation_id"]
if not conv.reference:
conv.reference = []
conv.message.append({"role": "assistant", "content": "", "id": message_id})
conv.reference.append({"chunks": [], "doc_aggs": []})
data_type_picture = {
"type": 3,
"url": "base64 content"
}
data = [
{
"type": 1,
"content": ""
}
]
ans = ""
for a in chat(dia, msg, stream=False, **req):
ans = a
break
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
fillin_conv(ans)
API4ConversationService.append_message(conv.id, conv.to_dict())
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
for chunk_idx in chunk_idxs[:1]:
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
try:
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
response = settings.STORAGE_IMPL.get(bkt, nm)
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
data.append(data_type_picture)
break
except Exception as e:
return server_error_response(e)
response = {"code": 200, "msg": "success", "data": data}
return response
except Exception as e:
return server_error_response(e)
@manager.route('/retrieval', methods=['POST']) # noqa: F821
@validate_request("kb_id", "question")
def retrieval():
token = request.headers.get('Authorization').split()[1]
objs = APIToken.query(token=token)
if not objs:
return get_json_result(
data=False, message='Authentication error: API key is invalid!"', code=RetCode.AUTHENTICATION_ERROR)
req = request.json
kb_ids = req.get("kb_id", [])
doc_ids = req.get("doc_ids", [])
question = req.get("question")
page = int(req.get("page", 1))
size = int(req.get("page_size", 30))
similarity_threshold = float(req.get("similarity_threshold", 0.2))
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
top = int(req.get("top_k", 1024))
highlight = bool(req.get("highlight", False))
try:
kbs = KnowledgebaseService.get_by_ids(kb_ids)
embd_nms = list(set([kb.embd_id for kb in kbs]))
if len(embd_nms) != 1:
return get_json_result(
data=False, message='Knowledge bases use different embedding models or does not exist."',
code=RetCode.AUTHENTICATION_ERROR)
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, llm_name=kbs[0].embd_id)
rerank_mdl = None
if req.get("rerank_id"):
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, llm_name=req["rerank_id"])
if req.get("keyword", False):
chat_mdl = LLMBundle(kbs[0].tenant_id, LLMType.CHAT)
question += keyword_extraction(chat_mdl, question)
ranks = settings.retriever.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
similarity_threshold, vector_similarity_weight, top,
doc_ids, rerank_mdl=rerank_mdl, highlight= highlight,
rank_feature=label_question(question, kbs))
for c in ranks["chunks"]:
c.pop("vector", None)
return get_json_result(data=ranks)
except Exception as e:
if str(e).find("not_found") > 0:
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
code=RetCode.DATA_ERROR)
return server_error_response(e)

View File

@ -93,6 +93,10 @@ def upload(dataset_id, tenant_id):
type: file
required: true
description: Document files to upload.
- in: formData
name: parent_path
type: string
description: Optional nested path under the parent folder. Uses '/' separators.
responses:
200:
description: Successfully uploaded documents.
@ -151,7 +155,7 @@ def upload(dataset_id, tenant_id):
e, kb = KnowledgebaseService.get_by_id(dataset_id)
if not e:
raise LookupError(f"Can't find the dataset with ID {dataset_id}!")
err, files = FileService.upload_document(kb, file_objs, tenant_id)
err, files = FileService.upload_document(kb, file_objs, tenant_id, parent_path=request.form.get("parent_path"))
if err:
return get_result(message="\n".join(err), code=RetCode.SERVER_ERROR)
# rename key's name

View File

@ -242,7 +242,7 @@ class Connector2KbService(CommonService):
"id": get_uuid(),
"connector_id": conn_id,
"kb_id": kb_id,
"auto_parse": conn.get("auto_parse", "1")
"auto_parse": conn.get("auto_parse", "1")
})
SyncLogsService.schedule(conn_id, kb_id, reindex=True)

View File

@ -31,7 +31,7 @@ from common.misc_utils import get_uuid
from common.constants import TaskStatus, FileSource, ParserType
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.task_service import TaskService
from api.utils.file_utils import filename_type, read_potential_broken_pdf, thumbnail_img
from api.utils.file_utils import filename_type, read_potential_broken_pdf, thumbnail_img, sanitize_path
from rag.llm.cv_model import GptV4
from common import settings
@ -329,7 +329,7 @@ class FileService(CommonService):
current_id = start_id
while current_id:
e, file = cls.get_by_id(current_id)
if file.parent_id != file.id and e:
if e and file.parent_id != file.id:
parent_folders.append(file)
current_id = file.parent_id
else:
@ -423,13 +423,15 @@ class FileService(CommonService):
@classmethod
@DB.connection_context()
def upload_document(self, kb, file_objs, user_id, src="local"):
def upload_document(self, kb, file_objs, user_id, src="local", parent_path: str | None = None):
root_folder = self.get_root_folder(user_id)
pf_id = root_folder["id"]
self.init_knowledgebase_docs(pf_id, user_id)
kb_root_folder = self.get_kb_folder(user_id)
kb_folder = self.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
safe_parent_path = sanitize_path(parent_path)
err, files = [], []
for file in file_objs:
try:
@ -439,7 +441,7 @@ class FileService(CommonService):
if filetype == FileType.OTHER.value:
raise RuntimeError("This type of file has not been supported yet!")
location = filename
location = filename if not safe_parent_path else f"{safe_parent_path}/{filename}"
while settings.STORAGE_IMPL.obj_exist(kb.id, location):
location += "_"

View File

@ -164,3 +164,23 @@ def read_potential_broken_pdf(blob):
return repaired
return blob
def sanitize_path(raw_path: str | None) -> str:
"""Normalize and sanitize a user-provided path segment.
- Converts backslashes to forward slashes
- Strips leading/trailing slashes
- Removes '.' and '..' segments
- Restricts characters to A-Za-z0-9, underscore, dash, and '/'
"""
if not raw_path:
return ""
backslash_re = re.compile(r"[\\]+")
unsafe_re = re.compile(r"[^A-Za-z0-9_\-/]")
normalized = backslash_re.sub("/", raw_path)
normalized = normalized.strip("/")
parts = [seg for seg in normalized.split("/") if seg and seg not in (".", "..")]
sanitized = "/".join(parts)
sanitized = unsafe_re.sub("", sanitized)
return sanitized

View File

@ -186,9 +186,6 @@ class DoclingParser(RAGFlowPdfParser):
yield (DoclingContentType.EQUATION.value, text, bbox)
def _transfer_to_sections(self, doc) -> list[tuple[str, str]]:
"""
和 MinerUParser 保持一致:返回 [(section_text, line_tag), ...]
"""
sections: list[tuple[str, str]] = []
for typ, payload, bbox in self._iter_doc_items(doc):
if typ == DoclingContentType.TEXT.value:

View File

@ -34,6 +34,7 @@ def vision_figure_parser_figure_data_wrapper(figures_data_without_positions):
if isinstance(figure_data[1], Image.Image)
]
def vision_figure_parser_docx_wrapper(sections,tbls,callback=None,**kwargs):
try:
vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT)
@ -50,7 +51,8 @@ def vision_figure_parser_docx_wrapper(sections,tbls,callback=None,**kwargs):
callback(0.8, f"Visual model error: {e}. Skipping figure parsing enhancement.")
return tbls
def vision_figure_parser_pdf_wrapper(tbls,callback=None,**kwargs):
def vision_figure_parser_pdf_wrapper(tbls, callback=None, **kwargs):
try:
vision_model = LLMBundle(kwargs["tenant_id"], LLMType.IMAGE2TEXT)
callback(0.7, "Visual model detected. Attempting to enhance figure extraction...")
@ -72,6 +74,7 @@ def vision_figure_parser_pdf_wrapper(tbls,callback=None,**kwargs):
callback(0.8, f"Visual model error: {e}. Skipping figure parsing enhancement.")
return tbls
shared_executor = ThreadPoolExecutor(max_workers=10)

View File

@ -117,7 +117,6 @@ def load_model(model_dir, nm, device_id: int | None = None):
providers=['CUDAExecutionProvider'],
provider_options=[cuda_provider_options]
)
run_options.add_run_config_entry("memory.enable_memory_arena_shrinkage", "gpu:" + str(provider_device_id))
logging.info(f"load_model {model_file_path} uses GPU (device {provider_device_id}, gpu_mem_limit={cuda_provider_options['gpu_mem_limit']}, arena_strategy={arena_strategy})")
else:
sess = ort.InferenceSession(

View File

@ -5,12 +5,14 @@ import {
forwardRef,
useCallback,
useEffect,
useRef,
useState,
} from 'react';
import { useTranslation } from 'react-i18next';
import { Avatar, AvatarFallback, AvatarImage } from './ui/avatar';
import { Button } from './ui/button';
import { Input } from './ui/input';
import { Modal } from './ui/modal/modal';
type AvatarUploadProps = {
value?: string;
@ -22,14 +24,24 @@ export const AvatarUpload = forwardRef<HTMLInputElement, AvatarUploadProps>(
function AvatarUpload({ value, onChange, tips }, ref) {
const { t } = useTranslation();
const [avatarBase64Str, setAvatarBase64Str] = useState(''); // Avatar Image base64
const [isCropModalOpen, setIsCropModalOpen] = useState(false);
const [imageToCrop, setImageToCrop] = useState<string | null>(null);
const [cropArea, setCropArea] = useState({ x: 0, y: 0, size: 200 });
const imageRef = useRef<HTMLImageElement>(null);
const canvasRef = useRef<HTMLCanvasElement>(null);
const containerRef = useRef<HTMLDivElement>(null);
const isDraggingRef = useRef(false);
const dragStartRef = useRef({ x: 0, y: 0 });
const [imageScale, setImageScale] = useState(1);
const [imageOffset, setImageOffset] = useState({ x: 0, y: 0 });
const handleChange: ChangeEventHandler<HTMLInputElement> = useCallback(
async (ev) => {
const file = ev.target?.files?.[0];
if (/\.(jpg|jpeg|png|webp|bmp)$/i.test(file?.name ?? '')) {
const str = await transformFile2Base64(file!);
setAvatarBase64Str(str);
onChange?.(str);
const str = await transformFile2Base64(file!, 1000);
setImageToCrop(str);
setIsCropModalOpen(true);
}
ev.target.value = '';
},
@ -41,17 +53,209 @@ export const AvatarUpload = forwardRef<HTMLInputElement, AvatarUploadProps>(
onChange?.('');
}, [onChange]);
const handleCrop = useCallback(() => {
if (!imageRef.current || !canvasRef.current) return;
const canvas = canvasRef.current;
const ctx = canvas.getContext('2d');
const image = imageRef.current;
if (!ctx) return;
// Set canvas size to 64x64 (avatar size)
canvas.width = 64;
canvas.height = 64;
// Draw cropped image on canvas
ctx.drawImage(
image,
cropArea.x,
cropArea.y,
cropArea.size,
cropArea.size,
0,
0,
64,
64,
);
// Convert to base64
const croppedImageBase64 = canvas.toDataURL('image/png');
setAvatarBase64Str(croppedImageBase64);
onChange?.(croppedImageBase64);
setIsCropModalOpen(false);
}, [cropArea, onChange]);
const handleCancelCrop = useCallback(() => {
setIsCropModalOpen(false);
setImageToCrop(null);
}, []);
const initCropArea = useCallback(() => {
if (!imageRef.current || !containerRef.current) return;
const image = imageRef.current;
const container = containerRef.current;
// Calculate image scale to fit container
const scale = Math.min(
container.clientWidth / image.width,
container.clientHeight / image.height,
);
setImageScale(scale);
// Calculate image offset to center it
const scaledWidth = image.width * scale;
const scaledHeight = image.height * scale;
const offsetX = (container.clientWidth - scaledWidth) / 2;
const offsetY = (container.clientHeight - scaledHeight) / 2;
setImageOffset({ x: offsetX, y: offsetY });
// Initialize crop area to center of image
const size = Math.min(scaledWidth, scaledHeight) * 0.8; // 80% of the smaller dimension
const x = (image.width - size / scale) / 2;
const y = (image.height - size / scale) / 2;
setCropArea({ x, y, size: size / scale });
}, []);
const handleMouseMove = useCallback(
(e: MouseEvent) => {
if (
!isDraggingRef.current ||
!imageRef.current ||
!containerRef.current
)
return;
const image = imageRef.current;
const container = containerRef.current;
const containerRect = container.getBoundingClientRect();
// Calculate mouse position relative to container
const mouseX = e.clientX - containerRect.left;
const mouseY = e.clientY - containerRect.top;
// Calculate mouse position relative to image
const imageX = (mouseX - imageOffset.x) / imageScale;
const imageY = (mouseY - imageOffset.y) / imageScale;
// Calculate new crop area position based on mouse movement
let newX = imageX - dragStartRef.current.x;
let newY = imageY - dragStartRef.current.y;
// Boundary checks
newX = Math.max(0, Math.min(newX, image.width - cropArea.size));
newY = Math.max(0, Math.min(newY, image.height - cropArea.size));
setCropArea((prev) => ({
...prev,
x: newX,
y: newY,
}));
},
[cropArea.size, imageScale, imageOffset],
);
const handleMouseUp = useCallback(() => {
isDraggingRef.current = false;
document.removeEventListener('mousemove', handleMouseMove);
document.removeEventListener('mouseup', handleMouseUp);
}, [handleMouseMove]);
const handleMouseDown = useCallback(
(e: React.MouseEvent) => {
e.preventDefault();
e.stopPropagation();
isDraggingRef.current = true;
if (imageRef.current && containerRef.current) {
const container = containerRef.current;
const containerRect = container.getBoundingClientRect();
// Calculate mouse position relative to container
const mouseX = e.clientX - containerRect.left;
const mouseY = e.clientY - containerRect.top;
// Calculate mouse position relative to image
const imageX = (mouseX - imageOffset.x) / imageScale;
const imageY = (mouseY - imageOffset.y) / imageScale;
// Store the offset between mouse position and crop area position
dragStartRef.current = {
x: imageX - cropArea.x,
y: imageY - cropArea.y,
};
}
document.addEventListener('mousemove', handleMouseMove);
document.addEventListener('mouseup', handleMouseUp);
},
[cropArea, imageScale, imageOffset],
);
const handleWheel = useCallback((e: React.WheelEvent) => {
if (!imageRef.current) return;
e.preventDefault();
const image = imageRef.current;
const delta = e.deltaY > 0 ? 0.9 : 1.1; // Zoom factor
setCropArea((prev) => {
const newSize = Math.max(
20,
Math.min(prev.size * delta, Math.min(image.width, image.height)),
);
// Adjust position to keep crop area centered
const centerRatioX = (prev.x + prev.size / 2) / image.width;
const centerRatioY = (prev.y + prev.size / 2) / image.height;
const newX = centerRatioX * image.width - newSize / 2;
const newY = centerRatioY * image.height - newSize / 2;
// Boundary checks
const boundedX = Math.max(0, Math.min(newX, image.width - newSize));
const boundedY = Math.max(0, Math.min(newY, image.height - newSize));
return {
x: boundedX,
y: boundedY,
size: newSize,
};
});
}, []);
useEffect(() => {
if (value) {
setAvatarBase64Str(value);
}
}, [value]);
useEffect(() => {
const container = containerRef.current;
setTimeout(() => {
console.log('container', container);
// initCropArea();
if (imageToCrop && container && isCropModalOpen) {
container.addEventListener(
'wheel',
handleWheel as unknown as EventListener,
{ passive: false },
);
return () => {
container.removeEventListener(
'wheel',
handleWheel as unknown as EventListener,
);
};
}
}, 100);
}, [handleWheel, containerRef.current]);
return (
<div className="flex justify-start items-end space-x-2">
<div className="relative group">
{!avatarBase64Str ? (
<div className="w-[64px] h-[64px] grid place-content-center border border-dashed bg-bg-input rounded-md">
<div className="w-[64px] h-[64px] grid place-content-center border border-dashed bg-bg-input rounded-md">
<div className="flex flex-col items-center">
<Plus />
<p>{t('common.upload')}</p>
@ -60,7 +264,7 @@ export const AvatarUpload = forwardRef<HTMLInputElement, AvatarUploadProps>(
) : (
<div className="w-[64px] h-[64px] relative grid place-content-center">
<Avatar className="w-[64px] h-[64px] rounded-md">
<AvatarImage className=" block" src={avatarBase64Str} alt="" />
<AvatarImage className="block" src={avatarBase64Str} alt="" />
<AvatarFallback></AvatarFallback>
</Avatar>
<div className="absolute inset-0 bg-[#000]/20 group-hover:bg-[#000]/60">
@ -93,6 +297,79 @@ export const AvatarUpload = forwardRef<HTMLInputElement, AvatarUploadProps>(
<div className="margin-1 text-text-secondary">
{tips ?? t('knowledgeConfiguration.photoTip')}
</div>
{/* Crop Modal */}
<Modal
open={isCropModalOpen}
onOpenChange={(open) => {
setIsCropModalOpen(open);
if (!open) {
setImageToCrop(null);
}
}}
title={t('setting.cropImage')}
size="small"
onCancel={handleCancelCrop}
onOk={handleCrop}
// footer={
// <div className="flex justify-end space-x-2">
// <Button variant="secondary" onClick={handleCancelCrop}>
// {t('common.cancel')}
// </Button>
// <Button onClick={handleCrop}>{t('common.confirm')}</Button>
// </div>
// }
>
<div className="flex flex-col items-center p-4">
{imageToCrop && (
<div className="w-full">
<div
ref={containerRef}
className="relative overflow-hidden border border-border rounded-md mx-auto bg-bg-card"
style={{
width: '300px',
height: '300px',
touchAction: 'none',
}}
// onWheel={handleWheel}
>
<img
ref={imageRef}
src={imageToCrop}
alt="To crop"
className="absolute block"
style={{
transform: `scale(${imageScale})`,
transformOrigin: 'top left',
left: `${imageOffset.x}px`,
top: `${imageOffset.y}px`,
}}
onLoad={initCropArea}
/>
{imageRef.current && (
<div
className="absolute border-2 border-white border-dashed cursor-move"
style={{
left: `${imageOffset.x + cropArea.x * imageScale}px`,
top: `${imageOffset.y + cropArea.y * imageScale}px`,
width: `${cropArea.size * imageScale}px`,
height: `${cropArea.size * imageScale}px`,
boxShadow: '0 0 0 9999px rgba(0, 0, 0, 0.5)',
}}
onMouseDown={handleMouseDown}
/>
)}
</div>
<div className="flex justify-center mt-4">
<p className="text-sm text-text-secondary">
{t('setting.cropTip')}
</p>
</div>
<canvas ref={canvasRef} className="hidden" />
</div>
)}
</div>
</Modal>
</div>
);
},

View File

@ -694,6 +694,9 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
tocEnhanceTip: ` During the parsing of the document, table of contents information was generated (see the 'Enable Table of Contents Extraction' option in the General method). This allows the large model to return table of contents items relevant to the user's query, thereby using these items to retrieve related chunks and apply weighting to these chunks during the sorting process. This approach is derived from mimicking the behavioral logic of how humans search for knowledge in books.`,
},
setting: {
cropTip:
'Drag the selection area to choose the cropping position of the image, and scroll to zoom in/out',
cropImage: 'Crop image',
selectModelPlaceholder: 'Select model',
configureModelTitle: 'Configure model',
confluenceIsCloudTip:

View File

@ -684,6 +684,8 @@ General实体和关系提取提示来自 GitHub - microsoft/graphrag基于
tocEnhanceTip: `解析文档时生成了目录信息见General方法的启用目录抽取让大模型返回和用户问题相关的目录项从而利用目录项拿到相关chunk对这些chunk在排序中进行加权。这种方法来源于模仿人类查询书本中知识的行为逻辑`,
},
setting: {
cropTip: '拖动选区可以选择要图片的裁剪位置,滚动可以放大/缩小选区',
cropImage: '剪裁图片',
selectModelPlaceholder: '请选择模型',
configureModelTitle: '配置模型',
confluenceIsCloudTip:

View File

@ -2,7 +2,10 @@ import { FileMimeType } from '@/constants/common';
import fileManagerService from '@/services/file-manager-service';
import { UploadFile } from 'antd';
export const transformFile2Base64 = (val: any): Promise<any> => {
export const transformFile2Base64 = (
val: any,
imgSize?: number,
): Promise<any> => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(val);
@ -19,7 +22,7 @@ export const transformFile2Base64 = (val: any): Promise<any> => {
// Calculate compressed dimensions, set max width/height to 800px
let width = img.width;
let height = img.height;
const maxSize = 100;
const maxSize = imgSize ?? 100;
if (width > height && width > maxSize) {
height = (height * maxSize) / width;