mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Add support for HTML file (#973)
### What problem does this PR solve? Add support for HTML file ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
@ -19,7 +19,7 @@ from rag.nlp import bullets_category, is_english, tokenize, remove_contents_tabl
|
||||
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \
|
||||
tokenize_chunks, find_codec
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||
|
||||
|
||||
class Pdf(PdfParser):
|
||||
@ -105,6 +105,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
random_choices([t for t, _ in sections], k=200)))
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
sections = HtmlParser()(filename, binary)
|
||||
sections = [(l, "") for l in sections if l]
|
||||
remove_contents_table(sections, eng=is_english(
|
||||
random_choices([t for t, _ in sections], k=200)))
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
binary = BytesIO(binary)
|
||||
|
||||
@ -20,7 +20,7 @@ from api.db import ParserType
|
||||
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
||||
make_colon_as_title, add_positions, tokenize_chunks, find_codec
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||
from rag.settings import cron_logger
|
||||
|
||||
|
||||
@ -125,6 +125,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
sections = [l for l in sections if l]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
sections = HtmlParser()(filename, binary)
|
||||
sections = [l for l in sections if l]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
binary = BytesIO(binary)
|
||||
|
||||
@ -17,7 +17,7 @@ from timeit import default_timer as timer
|
||||
import re
|
||||
from deepdoc.parser.pdf_parser import PlainParser
|
||||
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
||||
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
|
||||
from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser
|
||||
from rag.settings import cron_logger
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
@ -161,6 +161,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
sections = HtmlParser()(filename, binary)
|
||||
sections = [(l, "") for l in sections if l]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
binary = BytesIO(binary)
|
||||
|
||||
@ -15,7 +15,7 @@ from io import BytesIO
|
||||
import re
|
||||
from rag.app import laws
|
||||
from rag.nlp import rag_tokenizer, tokenize, find_codec
|
||||
from deepdoc.parser import PdfParser, ExcelParser, PlainParser
|
||||
from deepdoc.parser import PdfParser, ExcelParser, PlainParser, HtmlParser
|
||||
|
||||
|
||||
class Pdf(PdfParser):
|
||||
@ -97,6 +97,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
sections = [s for s in sections if s]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
sections = HtmlParser()(filename, binary)
|
||||
sections = [s for s in sections if s]
|
||||
callback(0.8, "Finish parsing.")
|
||||
|
||||
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
binary = BytesIO(binary)
|
||||
|
||||
Reference in New Issue
Block a user