fix table desc bugs, add positions to chunks (#91)

This commit is contained in:
KevinHuSh
2024-03-04 14:42:26 +08:00
committed by GitHub
parent 8a726fb04b
commit 685b4d8a95
13 changed files with 117 additions and 46 deletions

View File

@ -13,7 +13,7 @@
import copy
import re
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions
from rag.nlp import huqie
from deepdoc.parser import PdfParser, DocxParser
@ -21,6 +21,7 @@ from deepdoc.parser import PdfParser, DocxParser
class Pdf(PdfParser):
def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
@ -40,11 +41,11 @@ class Pdf(PdfParser):
self._filter_forpages()
self._merge_with_same_bullet()
callback(0.75, "Text merging finished.")
tbls = self._extract_table_figure(True, zoomin, False)
tbls = self._extract_table_figure(True, zoomin, False, True)
callback(0.8, "Text extraction finished")
return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls
return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls, tbl_poss
def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
@ -69,7 +70,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
callback(0.8, "Finish parsing.")
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
pdf_parser = Pdf()
sections,tbls = pdf_parser(filename if not binary else binary,
sections, tbls = pdf_parser(filename if not binary else binary,
from_page=from_page, to_page=to_page, callback=callback)
elif re.search(r"\.txt$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")
@ -105,7 +106,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
d = copy.deepcopy(doc)
ck = "\n".join(ck)
if pdf_parser:
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
ck = pdf_parser.remove_tag(ck)
tokenize(d, ck, eng)
res.append(d)

View File

@ -15,7 +15,7 @@ import re
from io import BytesIO
from docx import Document
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
make_colon_as_title
make_colon_as_title, add_positions
from rag.nlp import huqie
from deepdoc.parser import PdfParser, DocxParser
from rag.settings import cron_logger
@ -49,6 +49,7 @@ class Docx(DocxParser):
class Pdf(PdfParser):
def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
@ -122,7 +123,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
ck = "\n".join(ck)
d = copy.deepcopy(doc)
if pdf_parser:
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
ck = pdf_parser.remove_tag(ck)
tokenize(d, ck, eng)
res.append(d)

View File

@ -2,7 +2,7 @@ import copy
import re
from api.db import ParserType
from rag.nlp import huqie, tokenize, tokenize_table
from rag.nlp import huqie, tokenize, tokenize_table, add_positions
from deepdoc.parser import PdfParser
from rag.utils import num_tokens_from_string
@ -14,6 +14,7 @@ class Pdf(PdfParser):
def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
@ -32,7 +33,7 @@ class Pdf(PdfParser):
self._concat_downward(concat_between_pages=False)
self._filter_forpages()
callback(0.77, "Text merging finished")
tbls = self._extract_table_figure(True, zoomin, False)
tbls = self._extract_table_figure(True, zoomin, False, True)
# clean mess
for b in self.boxes:
@ -91,7 +92,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
d = copy.deepcopy(doc)
ck = "\n".join(chunk)
tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english)
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
res.append(d)
chunk = []
tk_cnt = 0

View File

@ -13,7 +13,7 @@
import copy
import re
from rag.app import laws
from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table
from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table, add_positions
from deepdoc.parser import PdfParser
from rag.settings import cron_logger
@ -21,6 +21,7 @@ from rag.settings import cron_logger
class Pdf(PdfParser):
def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
@ -39,7 +40,7 @@ class Pdf(PdfParser):
self._concat_downward(concat_between_pages=False)
self._filter_forpages()
callback(0.77, "Text merging finished")
tbls = self._extract_table_figure(True, zoomin, False)
tbls = self._extract_table_figure(True, zoomin, False, True)
cron_logger.info("paddle layouts:".format((timer() - start) / (self.total_page + 0.1)))
#self._naive_vertical_merge()
@ -95,11 +96,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
# wrap up to es documents
for ck in cks:
if len(ck.strip()) == 0:continue
print("--", ck)
if not ck:continue
d = copy.deepcopy(doc)
if pdf_parser:
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
ck = pdf_parser.remove_tag(ck)
tokenize(d, ck, eng)
res.append(d)

View File

@ -15,7 +15,7 @@ import re
from collections import Counter
from api.db import ParserType
from rag.nlp import huqie, tokenize, tokenize_table
from rag.nlp import huqie, tokenize, tokenize_table, add_positions
from deepdoc.parser import PdfParser
import numpy as np
from rag.utils import num_tokens_from_string
@ -28,6 +28,7 @@ class Pdf(PdfParser):
def __call__(self, filename, binary=None, from_page=0,
to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(
filename if not binary else binary,
zoomin,
@ -47,7 +48,7 @@ class Pdf(PdfParser):
self._concat_downward(concat_between_pages=False)
self._filter_forpages()
callback(0.75, "Text merging finished.")
tbls = self._extract_table_figure(True, zoomin, False)
tbls = self._extract_table_figure(True, zoomin, False, True)
# clean mess
if column_width < self.page_images[0].size[0] / zoomin / 2:
@ -165,7 +166,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
txt = pdf_parser.remove_tag(paper["abstract"])
d["important_kwd"] = ["abstract", "总结", "概括", "summary", "summarize"]
d["important_tks"] = " ".join(d["important_kwd"])
d["image"] = pdf_parser.crop(paper["abstract"])
d["image"], poss = pdf_parser.crop(paper["abstract"], need_position=True)
add_positions(d, poss)
tokenize(d, txt, eng)
res.append(d)
@ -198,8 +200,9 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
for p in proj:
d = copy.deepcopy(doc)
txt += "\n" + pdf_parser.remove_tag(p)
d["image"] = pdf_parser.crop(p)
tokenize(d, txt)
d["image"], poss = pdf_parser.crop(p, need_position=True)
add_positions(d, poss)
tokenize(d, txt, eng)
res.append(d)
i = 0
@ -210,7 +213,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
d = copy.deepcopy(doc)
ck = "\n".join(chunk)
tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english)
d["image"] = pdf_parser.crop(ck)
d["image"], poss = pdf_parser.crop(ck, need_position=True)
add_positions(d, poss)
res.append(d)
chunk = []
tk_cnt = 0

View File

@ -48,6 +48,7 @@ class Pdf(PdfParser):
return False
def __call__(self, filename, binary=None, from_page=0, to_page=100000, zoomin=3, callback=None):
callback(msg="OCR is running...")
self.__images__(filename if not binary else binary, zoomin, from_page, to_page)
callback(0.8, "Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)))
assert len(self.boxes) == len(self.page_images), "{} vs. {}".format(len(self.boxes), len(self.page_images))
@ -94,9 +95,10 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **k
return res
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
pdf_parser = Pdf()
for txt,img in pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback):
for pn, (txt,img) in enumerate(pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback)):
d = copy.deepcopy(doc)
d["image"] = img
d["page_num_obj"] = [pn+1]
tokenize(d, txt, pdf_parser.is_english)
res.append(d)
return res