mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-24 07:26:47 +08:00
Fix: Tika server upgrades. (#12073)
### What problem does this PR solve? #12037 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
@ -76,6 +76,7 @@ async def list_chunk():
|
|||||||
"image_id": sres.field[id].get("img_id", ""),
|
"image_id": sres.field[id].get("img_id", ""),
|
||||||
"available_int": int(sres.field[id].get("available_int", 1)),
|
"available_int": int(sres.field[id].get("available_int", 1)),
|
||||||
"positions": sres.field[id].get("position_int", []),
|
"positions": sres.field[id].get("position_int", []),
|
||||||
|
"doc_type_kwd": sres.field[id].get("doc_type_kwd")
|
||||||
}
|
}
|
||||||
assert isinstance(d["positions"], list)
|
assert isinstance(d["positions"], list)
|
||||||
assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5)
|
assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5)
|
||||||
@ -176,10 +177,9 @@ async def set():
|
|||||||
settings.docStoreConn.update({"id": req["chunk_id"]}, _d, search.index_name(tenant_id), doc.kb_id)
|
settings.docStoreConn.update({"id": req["chunk_id"]}, _d, search.index_name(tenant_id), doc.kb_id)
|
||||||
|
|
||||||
# update image
|
# update image
|
||||||
image_id = req.get("img_id")
|
|
||||||
bkt, name = image_id.split("-")
|
|
||||||
image_base64 = req.get("image_base64", None)
|
image_base64 = req.get("image_base64", None)
|
||||||
if image_base64:
|
if image_base64:
|
||||||
|
bkt, name = req.get("img_id", "-").split("-")
|
||||||
image_binary = base64.b64decode(image_base64)
|
image_binary = base64.b64decode(image_base64)
|
||||||
settings.STORAGE_IMPL.put(bkt, name, image_binary)
|
settings.STORAGE_IMPL.put(bkt, name, image_binary)
|
||||||
return get_json_result(data=True)
|
return get_json_result(data=True)
|
||||||
|
|||||||
@ -150,6 +150,21 @@ async def update():
|
|||||||
return server_error_response(e)
|
return server_error_response(e)
|
||||||
|
|
||||||
|
|
||||||
|
@manager.route('/update_metadata_setting', methods=['post']) # noqa: F821
|
||||||
|
@login_required
|
||||||
|
@validate_request("kb_id", "metadata")
|
||||||
|
async def update_metadata_setting():
|
||||||
|
req = await get_request_json()
|
||||||
|
e, kb = KnowledgebaseService.get_by_id(req["kb_id"])
|
||||||
|
if not e:
|
||||||
|
return get_data_error_result(
|
||||||
|
message="Database error (Knowledgebase rename)!")
|
||||||
|
kb = kb.to_dict()
|
||||||
|
kb["parser_config"]["metadata"] = req["metadata"]
|
||||||
|
KnowledgebaseService.update_by_id(kb["id"], kb)
|
||||||
|
return get_json_result(data=kb)
|
||||||
|
|
||||||
|
|
||||||
@manager.route('/detail', methods=['GET']) # noqa: F821
|
@manager.route('/detail', methods=['GET']) # noqa: F821
|
||||||
@login_required
|
@login_required
|
||||||
def detail():
|
def detail():
|
||||||
|
|||||||
@ -411,6 +411,8 @@ class KnowledgebaseService(CommonService):
|
|||||||
ok, _t = TenantService.get_by_id(tenant_id)
|
ok, _t = TenantService.get_by_id(tenant_id)
|
||||||
if not ok:
|
if not ok:
|
||||||
return False, get_data_error_result(message="Tenant not found.")
|
return False, get_data_error_result(message="Tenant not found.")
|
||||||
|
if kwargs.get("parser_config") and isinstance(kwargs["parser_config"], dict) and not kwargs["parser_config"].get("llm_id"):
|
||||||
|
kwargs["parser_config"]["llm_id"] = _t.llm_id
|
||||||
|
|
||||||
# Build payload
|
# Build payload
|
||||||
kb_id = get_uuid()
|
kb_id = get_uuid()
|
||||||
|
|||||||
@ -31,6 +31,7 @@
|
|||||||
"entity_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
"entity_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"source_id": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
"source_id": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"n_hop_with_weight": {"type": "varchar", "default": ""},
|
"n_hop_with_weight": {"type": "varchar", "default": ""},
|
||||||
|
"mom_with_weight": {"type": "varchar", "default": ""},
|
||||||
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
"removed_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"doc_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
"doc_type_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
"toc_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
"toc_kwd": {"type": "varchar", "default": "", "analyzer": "whitespace-#"},
|
||||||
|
|||||||
@ -23,8 +23,8 @@ def get_urls(use_china_mirrors=False) -> list[Union[str, list[str]]]:
|
|||||||
return [
|
return [
|
||||||
"http://mirrors.tuna.tsinghua.edu.cn/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb",
|
"http://mirrors.tuna.tsinghua.edu.cn/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb",
|
||||||
"http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_arm64.deb",
|
"http://mirrors.tuna.tsinghua.edu.cn/ubuntu-ports/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_arm64.deb",
|
||||||
"https://repo.huaweicloud.com/repository/maven/org/apache/tika/tika-server-standard/3.0.0/tika-server-standard-3.0.0.jar",
|
"https://repo.huaweicloud.com/repository/maven/org/apache/tika/tika-server-standard/3.2.3/tika-server-standard-3.2.3.jar",
|
||||||
"https://repo.huaweicloud.com/repository/maven/org/apache/tika/tika-server-standard/3.0.0/tika-server-standard-3.0.0.jar.md5",
|
"https://repo.huaweicloud.com/repository/maven/org/apache/tika/tika-server-standard/3.2.3/tika-server-standard-3.2.3.jar.md5",
|
||||||
"https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken",
|
"https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken",
|
||||||
["https://registry.npmmirror.com/-/binary/chrome-for-testing/121.0.6167.85/linux64/chrome-linux64.zip", "chrome-linux64-121-0-6167-85"],
|
["https://registry.npmmirror.com/-/binary/chrome-for-testing/121.0.6167.85/linux64/chrome-linux64.zip", "chrome-linux64-121-0-6167-85"],
|
||||||
["https://registry.npmmirror.com/-/binary/chrome-for-testing/121.0.6167.85/linux64/chromedriver-linux64.zip", "chromedriver-linux64-121-0-6167-85"],
|
["https://registry.npmmirror.com/-/binary/chrome-for-testing/121.0.6167.85/linux64/chromedriver-linux64.zip", "chromedriver-linux64-121-0-6167-85"],
|
||||||
@ -34,8 +34,8 @@ def get_urls(use_china_mirrors=False) -> list[Union[str, list[str]]]:
|
|||||||
return [
|
return [
|
||||||
"http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb",
|
"http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb",
|
||||||
"http://ports.ubuntu.com/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_arm64.deb",
|
"http://ports.ubuntu.com/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_arm64.deb",
|
||||||
"https://repo1.maven.org/maven2/org/apache/tika/tika-server-standard/3.0.0/tika-server-standard-3.0.0.jar",
|
"https://repo1.maven.org/maven2/org/apache/tika/tika-server-standard/3.2.3/tika-server-standard-3.2.3.jar",
|
||||||
"https://repo1.maven.org/maven2/org/apache/tika/tika-server-standard/3.0.0/tika-server-standard-3.0.0.jar.md5",
|
"https://repo1.maven.org/maven2/org/apache/tika/tika-server-standard/3.2.3/tika-server-standard-3.2.3.jar.md5",
|
||||||
"https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken",
|
"https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken",
|
||||||
["https://storage.googleapis.com/chrome-for-testing-public/121.0.6167.85/linux64/chrome-linux64.zip", "chrome-linux64-121-0-6167-85"],
|
["https://storage.googleapis.com/chrome-for-testing-public/121.0.6167.85/linux64/chrome-linux64.zip", "chrome-linux64-121-0-6167-85"],
|
||||||
["https://storage.googleapis.com/chrome-for-testing-public/121.0.6167.85/linux64/chromedriver-linux64.zip", "chromedriver-linux64-121-0-6167-85"],
|
["https://storage.googleapis.com/chrome-for-testing-public/121.0.6167.85/linux64/chromedriver-linux64.zip", "chromedriver-linux64-121-0-6167-85"],
|
||||||
|
|||||||
@ -91,7 +91,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
filename, binary=binary, from_page=from_page, to_page=to_page)
|
filename, binary=binary, from_page=from_page, to_page=to_page)
|
||||||
remove_contents_table(sections, eng=is_english(
|
remove_contents_table(sections, eng=is_english(
|
||||||
random_choices([t for t, _ in sections], k=200)))
|
random_choices([t for t, _ in sections], k=200)))
|
||||||
tbls=vision_figure_parser_docx_wrapper(sections=sections,tbls=tbls,callback=callback,**kwargs)
|
tbls = vision_figure_parser_docx_wrapper(sections=sections,tbls=tbls,callback=callback,**kwargs)
|
||||||
# tbls = [((None, lns), None) for lns in tbls]
|
# tbls = [((None, lns), None) for lns in tbls]
|
||||||
sections=[(item[0],item[1] if item[1] is not None else "") for item in sections if not isinstance(item[1], Image.Image)]
|
sections=[(item[0],item[1] if item[1] is not None else "") for item in sections if not isinstance(item[1], Image.Image)]
|
||||||
callback(0.8, "Finish parsing.")
|
callback(0.8, "Finish parsing.")
|
||||||
@ -147,9 +147,16 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
|
|
||||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||||
callback(0.1, "Start to parse.")
|
callback(0.1, "Start to parse.")
|
||||||
with BytesIO(binary) as binary:
|
try:
|
||||||
binary = BytesIO(binary)
|
from tika import parser as tika_parser
|
||||||
doc_parsed = parser.from_buffer(binary)
|
except Exception as e:
|
||||||
|
callback(0.8, f"tika not available: {e}. Unsupported .doc parsing.")
|
||||||
|
logging.warning(f"tika not available: {e}. Unsupported .doc parsing for {filename}.")
|
||||||
|
return []
|
||||||
|
|
||||||
|
binary = BytesIO(binary)
|
||||||
|
doc_parsed = tika_parser.from_buffer(binary)
|
||||||
|
if doc_parsed.get('content', None) is not None:
|
||||||
sections = doc_parsed['content'].split('\n')
|
sections = doc_parsed['content'].split('\n')
|
||||||
sections = [(line, "") for line in sections if line]
|
sections = [(line, "") for line in sections if line]
|
||||||
remove_contents_table(sections, eng=is_english(
|
remove_contents_table(sections, eng=is_english(
|
||||||
|
|||||||
@ -312,7 +312,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
tk_cnt = num_tokens_from_string(txt)
|
tk_cnt = num_tokens_from_string(txt)
|
||||||
if sec_id > -1:
|
if sec_id > -1:
|
||||||
last_sid = sec_id
|
last_sid = sec_id
|
||||||
tbls=vision_figure_parser_pdf_wrapper(tbls=tbls,callback=callback,**kwargs)
|
tbls = vision_figure_parser_pdf_wrapper(tbls=tbls,callback=callback,**kwargs)
|
||||||
res = tokenize_table(tbls, doc, eng)
|
res = tokenize_table(tbls, doc, eng)
|
||||||
res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
|
res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
|
||||||
table_ctx = max(0, int(parser_config.get("table_context_size", 0) or 0))
|
table_ctx = max(0, int(parser_config.get("table_context_size", 0) or 0))
|
||||||
@ -325,7 +325,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
docx_parser = Docx()
|
docx_parser = Docx()
|
||||||
ti_list, tbls = docx_parser(filename, binary,
|
ti_list, tbls = docx_parser(filename, binary,
|
||||||
from_page=0, to_page=10000, callback=callback)
|
from_page=0, to_page=10000, callback=callback)
|
||||||
tbls=vision_figure_parser_docx_wrapper(sections=ti_list,tbls=tbls,callback=callback,**kwargs)
|
tbls = vision_figure_parser_docx_wrapper(sections=ti_list,tbls=tbls,callback=callback,**kwargs)
|
||||||
res = tokenize_table(tbls, doc, eng)
|
res = tokenize_table(tbls, doc, eng)
|
||||||
for text, image in ti_list:
|
for text, image in ti_list:
|
||||||
d = copy.deepcopy(doc)
|
d = copy.deepcopy(doc)
|
||||||
|
|||||||
@ -76,7 +76,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
if re.search(r"\.docx$", filename, re.IGNORECASE):
|
if re.search(r"\.docx$", filename, re.IGNORECASE):
|
||||||
callback(0.1, "Start to parse.")
|
callback(0.1, "Start to parse.")
|
||||||
sections, tbls = naive.Docx()(filename, binary)
|
sections, tbls = naive.Docx()(filename, binary)
|
||||||
tbls=vision_figure_parser_docx_wrapper(sections=sections,tbls=tbls,callback=callback,**kwargs)
|
tbls = vision_figure_parser_docx_wrapper(sections=sections, tbls=tbls, callback=callback, **kwargs)
|
||||||
sections = [s for s, _ in sections if s]
|
sections = [s for s, _ in sections if s]
|
||||||
for (_, html), _ in tbls:
|
for (_, html), _ in tbls:
|
||||||
sections.append(html)
|
sections.append(html)
|
||||||
@ -142,10 +142,18 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|||||||
|
|
||||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||||
callback(0.1, "Start to parse.")
|
callback(0.1, "Start to parse.")
|
||||||
|
try:
|
||||||
|
from tika import parser as tika_parser
|
||||||
|
except Exception as e:
|
||||||
|
callback(0.8, f"tika not available: {e}. Unsupported .doc parsing.")
|
||||||
|
logging.warning(f"tika not available: {e}. Unsupported .doc parsing for {filename}.")
|
||||||
|
return []
|
||||||
|
|
||||||
binary = BytesIO(binary)
|
binary = BytesIO(binary)
|
||||||
doc_parsed = parser.from_buffer(binary)
|
doc_parsed = tika_parser.from_buffer(binary)
|
||||||
sections = doc_parsed['content'].split('\n')
|
if doc_parsed.get('content', None) is not None:
|
||||||
sections = [s for s in sections if s]
|
sections = doc_parsed['content'].split('\n')
|
||||||
|
sections = [s for s in sections if s]
|
||||||
callback(0.8, "Finish parsing.")
|
callback(0.8, "Finish parsing.")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|||||||
@ -650,7 +650,7 @@ class Parser(ProcessBase):
|
|||||||
tmpf.flush()
|
tmpf.flush()
|
||||||
tmp_path = os.path.abspath(tmpf.name)
|
tmp_path = os.path.abspath(tmpf.name)
|
||||||
|
|
||||||
seq2txt_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.SPEECH2TEXT)
|
seq2txt_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.SPEECH2TEXT, llm_name=conf["llm_id"])
|
||||||
txt = seq2txt_mdl.transcription(tmp_path)
|
txt = seq2txt_mdl.transcription(tmp_path)
|
||||||
|
|
||||||
self.set_output("text", txt)
|
self.set_output("text", txt)
|
||||||
|
|||||||
@ -374,7 +374,7 @@ async def build_chunks(task, progress_callback):
|
|||||||
chat_mdl = LLMBundle(task["tenant_id"], LLMType.CHAT, llm_name=task["llm_id"], lang=task["language"])
|
chat_mdl = LLMBundle(task["tenant_id"], LLMType.CHAT, llm_name=task["llm_id"], lang=task["language"])
|
||||||
|
|
||||||
async def gen_metadata_task(chat_mdl, d):
|
async def gen_metadata_task(chat_mdl, d):
|
||||||
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "metadata")
|
cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "metadata", {})
|
||||||
if not cached:
|
if not cached:
|
||||||
async with chat_limiter:
|
async with chat_limiter:
|
||||||
cached = await gen_metadata(chat_mdl,
|
cached = await gen_metadata(chat_mdl,
|
||||||
@ -852,7 +852,7 @@ async def do_handle_task(task):
|
|||||||
task_tenant_id = task["tenant_id"]
|
task_tenant_id = task["tenant_id"]
|
||||||
task_embedding_id = task["embd_id"]
|
task_embedding_id = task["embd_id"]
|
||||||
task_language = task["language"]
|
task_language = task["language"]
|
||||||
task_llm_id = task["llm_id"]
|
task_llm_id = task["parser_config"].get("llm_id") or task["llm_id"]
|
||||||
task_dataset_id = task["kb_id"]
|
task_dataset_id = task["kb_id"]
|
||||||
task_doc_id = task["doc_id"]
|
task_doc_id = task["doc_id"]
|
||||||
task_document_name = task["name"]
|
task_document_name = task["name"]
|
||||||
|
|||||||
Reference in New Issue
Block a user