mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
solve task execution issues (#90)
This commit is contained in:
@ -13,7 +13,7 @@
|
||||
import copy
|
||||
import re
|
||||
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
|
||||
hierarchical_merge, make_colon_as_title, naive_merge, random_choices
|
||||
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table
|
||||
from rag.nlp import huqie
|
||||
from deepdoc.parser import PdfParser, DocxParser
|
||||
|
||||
@ -90,25 +90,16 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
make_colon_as_title(sections)
|
||||
bull = bullets_category([t for t in random_choices([t for t,_ in sections], k=100)])
|
||||
if bull >= 0: cks = hierarchical_merge(bull, sections, 3)
|
||||
else: cks = naive_merge(sections, kwargs.get("chunk_token_num", 256), kwargs.get("delimer", "\n。;!?"))
|
||||
else:
|
||||
sections = [s.split("@") for s in sections]
|
||||
sections = [(pr[0], "@"+pr[1]) for pr in sections if len(pr)==2]
|
||||
cks = naive_merge(sections, kwargs.get("chunk_token_num", 256), kwargs.get("delimer", "\n。;!?"))
|
||||
|
||||
sections = [t for t, _ in sections]
|
||||
# is it English
|
||||
eng = lang.lower() == "english"#is_english(random_choices(sections, k=218))
|
||||
eng = lang.lower() == "english"#is_english(random_choices([t for t, _ in sections], k=218))
|
||||
|
||||
res = tokenize_table(tbls, doc, eng)
|
||||
|
||||
res = []
|
||||
# add tables
|
||||
for img, rows in tbls:
|
||||
bs = 10
|
||||
de = ";" if eng else ";"
|
||||
for i in range(0, len(rows), bs):
|
||||
d = copy.deepcopy(doc)
|
||||
r = de.join(rows[i:i + bs])
|
||||
r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
|
||||
tokenize(d, r, eng)
|
||||
d["image"] = img
|
||||
res.append(d)
|
||||
print("TABLE", d["content_with_weight"])
|
||||
# wrap up to es documents
|
||||
for ck in cks:
|
||||
d = copy.deepcopy(doc)
|
||||
|
||||
@ -2,7 +2,7 @@ import copy
|
||||
import re
|
||||
|
||||
from api.db import ParserType
|
||||
from rag.nlp import huqie, tokenize
|
||||
from rag.nlp import huqie, tokenize, tokenize_table
|
||||
from deepdoc.parser import PdfParser
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
@ -81,18 +81,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
# is it English
|
||||
eng = lang.lower() == "english"#pdf_parser.is_english
|
||||
|
||||
res = []
|
||||
# add tables
|
||||
for img, rows in tbls:
|
||||
bs = 10
|
||||
de = ";" if eng else ";"
|
||||
for i in range(0, len(rows), bs):
|
||||
d = copy.deepcopy(doc)
|
||||
r = de.join(rows[i:i + bs])
|
||||
r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
|
||||
tokenize(d, r, eng)
|
||||
d["image"] = img
|
||||
res.append(d)
|
||||
res = tokenize_table(tbls, doc, eng)
|
||||
|
||||
i = 0
|
||||
chunk = []
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
import copy
|
||||
import re
|
||||
from rag.app import laws
|
||||
from rag.nlp import huqie, is_english, tokenize, naive_merge
|
||||
from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table
|
||||
from deepdoc.parser import PdfParser
|
||||
from rag.settings import cron_logger
|
||||
|
||||
@ -72,17 +72,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
pdf_parser = Pdf()
|
||||
sections, tbls = pdf_parser(filename if not binary else binary,
|
||||
from_page=from_page, to_page=to_page, callback=callback)
|
||||
# add tables
|
||||
for img, rows in tbls:
|
||||
bs = 10
|
||||
de = ";" if eng else ";"
|
||||
for i in range(0, len(rows), bs):
|
||||
d = copy.deepcopy(doc)
|
||||
r = de.join(rows[i:i + bs])
|
||||
r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
|
||||
tokenize(d, r, eng)
|
||||
d["image"] = img
|
||||
res.append(d)
|
||||
res = tokenize_table(tbls, doc, eng)
|
||||
elif re.search(r"\.txt$", filename, re.IGNORECASE):
|
||||
callback(0.1, "Start to parse.")
|
||||
txt = ""
|
||||
@ -106,6 +96,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
# wrap up to es documents
|
||||
for ck in cks:
|
||||
print("--", ck)
|
||||
if not ck:continue
|
||||
d = copy.deepcopy(doc)
|
||||
if pdf_parser:
|
||||
d["image"] = pdf_parser.crop(ck)
|
||||
|
||||
@ -15,7 +15,7 @@ import re
|
||||
from collections import Counter
|
||||
|
||||
from api.db import ParserType
|
||||
from rag.nlp import huqie, tokenize
|
||||
from rag.nlp import huqie, tokenize, tokenize_table
|
||||
from deepdoc.parser import PdfParser
|
||||
import numpy as np
|
||||
from rag.utils import num_tokens_from_string
|
||||
@ -158,18 +158,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
||||
eng = lang.lower() == "english"#pdf_parser.is_english
|
||||
print("It's English.....", eng)
|
||||
|
||||
res = []
|
||||
# add tables
|
||||
for img, rows in paper["tables"]:
|
||||
bs = 10
|
||||
de = ";" if eng else ";"
|
||||
for i in range(0, len(rows), bs):
|
||||
d = copy.deepcopy(doc)
|
||||
r = de.join(rows[i:i + bs])
|
||||
r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
|
||||
tokenize(d, r)
|
||||
d["image"] = img
|
||||
res.append(d)
|
||||
res = tokenize_table(paper["tables"], doc, eng)
|
||||
|
||||
if paper["abstract"]:
|
||||
d = copy.deepcopy(doc)
|
||||
|
||||
@ -20,7 +20,7 @@ from deepdoc.parser import PdfParser, PptParser
|
||||
|
||||
class Ppt(PptParser):
|
||||
def __call__(self, fnm, from_page, to_page, callback=None):
|
||||
txts = super.__call__(fnm, from_page, to_page)
|
||||
txts = super().__call__(fnm, from_page, to_page)
|
||||
|
||||
callback(0.5, "Text extraction finished.")
|
||||
import aspose.slides as slides
|
||||
|
||||
@ -79,7 +79,7 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
||||
resume = remote_call(filename, binary)
|
||||
if len(resume.keys()) < 7:
|
||||
callback(-1, "Resume is not successfully parsed.")
|
||||
return []
|
||||
raise Exception("Resume parser remote call fail!")
|
||||
callback(0.6, "Done parsing. Chunking...")
|
||||
print(json.dumps(resume, ensure_ascii=False, indent=2))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user