File: Now parsing support all types of embedded documents, solved #10059 (#10635)

### What problem does this PR solve?

File: Now parsing support all types of embedded documents, solved #10059
Fix: Incomplete words in chat #10530
### Type of change

- [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
Billy Bao
2025-10-17 18:46:47 +08:00
committed by GitHub
parent f50b2461cb
commit 8ee0b6ea54
6 changed files with 486 additions and 7 deletions

View File

@ -15,12 +15,11 @@
#
import logging
import os
import re
import os
from functools import reduce
from io import BytesIO
from timeit import default_timer as timer
from docx import Document
from docx.image.exceptions import InvalidImageStreamError, UnexpectedEndOfFileError, UnrecognizedImageError
from docx.opc.pkgreader import _SerializedRelationships, _SerializedRelationship
@ -31,10 +30,11 @@ from tika import parser
from api.db import LLMType
from api.db.services.llm_service import LLMBundle
from api.utils.file_utils import extract_embed_file
from deepdoc.parser import DocxParser, ExcelParser, HtmlParser, JsonParser, MarkdownElementExtractor, MarkdownParser, PdfParser, TxtParser
from deepdoc.parser.figure_parser import VisionFigureParser, vision_figure_parser_figure_data_wrapper
from deepdoc.parser.mineru_parser import MinerUParser
from deepdoc.parser.pdf_parser import PlainParser, VisionParser
from deepdoc.parser.mineru_parser import MinerUParser
from rag.nlp import concat_img, find_codec, naive_merge, naive_merge_with_images, naive_merge_docx, rag_tokenizer, tokenize_chunks, tokenize_chunks_with_images, tokenize_table
@ -437,6 +437,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
Successive text will be sliced into pieces using 'delimiter'.
Next, these successive pieces are merge into chunks whose token number is no more than 'Max token number'.
"""
is_english = lang.lower() == "english" # is_english(cks)
parser_config = kwargs.get(
@ -450,6 +451,27 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
res = []
pdf_parser = None
section_images = None
is_root = kwargs.get("is_root", True)
embed_res = []
if is_root:
# Only extract embedded files at the root call
embeds = []
if binary is not None:
embeds = extract_embed_file(binary)
else:
raise Exception("Embedding extraction from file path is not supported.")
# Recursively chunk each embedded file and collect results
for embed_filename, embed_bytes in embeds:
try:
sub_res = chunk(embed_filename, binary=embed_bytes, lang=lang, callback=callback, is_root=False, **kwargs) or []
embed_res.extend(sub_res)
except Exception as e:
if callback:
callback(0.05, f"Failed to chunk embed {embed_filename}: {e}")
continue
if re.search(r"\.docx$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")
@ -483,10 +505,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
"delimiter", "\n!?。;!?"))
if kwargs.get("section_only", False):
chunks.extend(embed_res)
return chunks
res.extend(tokenize_chunks_with_images(chunks, doc, is_english, images))
logging.info("naive_merge({}): {}".format(filename, timer() - st))
res.extend(embed_res)
return res
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
@ -519,6 +543,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
res = tokenize_table(tables, doc, is_english)
callback(0.8, "Finish parsing.")
elif layout_recognizer == "MinerU":
mineru_executable = os.environ.get("MINERU_EXECUTABLE", "mineru")
pdf_parser = MinerUParser(mineru_path=mineru_executable)
@ -621,7 +646,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
callback(0.8, f"tika.parser got empty content from {filename}.")
logging.warning(f"tika.parser got empty content from {filename}.")
return []
else:
raise NotImplementedError(
"file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
@ -638,6 +662,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
"chunk_token_num", 128)), parser_config.get(
"delimiter", "\n!?。;!?"))
if kwargs.get("section_only", False):
chunks.extend(embed_res)
return chunks
res.extend(tokenize_chunks_with_images(chunks, doc, is_english, images))
@ -647,11 +672,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
"chunk_token_num", 128)), parser_config.get(
"delimiter", "\n!?。;!?"))
if kwargs.get("section_only", False):
chunks.extend(embed_res)
return chunks
res.extend(tokenize_chunks(chunks, doc, is_english, pdf_parser))
logging.info("naive_merge({}): {}".format(filename, timer() - st))
if embed_res:
res.extend(embed_res)
return res