mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Add task moduel, and pipline the task and every parser (#49)
This commit is contained in:
@ -67,4 +67,6 @@ def tokenize(d, t, eng):
|
||||
d["content_ltks"] = " ".join([stemmer.stem(w) for w in word_tokenize(t)])
|
||||
else:
|
||||
d["content_ltks"] = huqie.qie(t)
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
|
||||
|
||||
|
||||
@ -32,14 +32,12 @@ class Pdf(HuParser):
|
||||
zoomin,
|
||||
from_page,
|
||||
to_page)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
|
||||
"Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.1, "OCR finished", callback)
|
||||
|
||||
from timeit import default_timer as timer
|
||||
start = timer()
|
||||
self._layouts_paddle(zoomin)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
|
||||
"Page {}~{}: Layout analysis finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.77, "Layout analysis finished", callback)
|
||||
print("paddle layouts:", timer()-start)
|
||||
bxs = self.sort_Y_firstly(self.boxes, np.median(self.mean_height) / 3)
|
||||
# is it English
|
||||
@ -77,8 +75,7 @@ class Pdf(HuParser):
|
||||
b["x1"] = max(b["x1"], b_["x1"])
|
||||
bxs.pop(i + 1)
|
||||
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
|
||||
"Page {}~{}: Text extraction finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.8, "Text extraction finished", callback)
|
||||
|
||||
return [b["text"] + self._line_tag(b, zoomin) for b in bxs]
|
||||
|
||||
@ -92,14 +89,17 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
||||
pdf_parser = None
|
||||
sections = []
|
||||
if re.search(r"\.docx?$", filename, re.IGNORECASE):
|
||||
callback__(0.1, "Start to parse.", callback)
|
||||
for txt in Docx()(filename, binary):
|
||||
sections.append(txt)
|
||||
if re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
callback__(0.8, "Finish parsing.", callback)
|
||||
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
pdf_parser = Pdf()
|
||||
for txt in pdf_parser(filename if not binary else binary,
|
||||
from_page=from_page, to_page=to_page, callback=callback):
|
||||
sections.append(txt)
|
||||
if re.search(r"\.txt$", filename, re.IGNORECASE):
|
||||
elif re.search(r"\.txt$", filename, re.IGNORECASE):
|
||||
callback__(0.1, "Start to parse.", callback)
|
||||
txt = ""
|
||||
if binary:txt = binary.decode("utf-8")
|
||||
else:
|
||||
@ -110,6 +110,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
||||
txt += l
|
||||
sections = txt.split("\n")
|
||||
sections = [l for l in sections if l]
|
||||
callback__(0.8, "Finish parsing.", callback)
|
||||
else: raise NotImplementedError("file type not supported yet(docx, pdf, txt supported)")
|
||||
|
||||
# is it English
|
||||
eng = is_english(sections)
|
||||
|
||||
@ -1,12 +1,8 @@
|
||||
import copy
|
||||
import re
|
||||
from collections import Counter
|
||||
from rag.app import callback__, bullets_category, BULLET_PATTERN, is_english, tokenize
|
||||
from rag.nlp import huqie, stemmer
|
||||
from rag.parser.docx_parser import HuDocxParser
|
||||
from rag.app import callback__, tokenize
|
||||
from rag.nlp import huqie
|
||||
from rag.parser.pdf_parser import HuParser
|
||||
from nltk.tokenize import word_tokenize
|
||||
import numpy as np
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
|
||||
@ -18,24 +14,19 @@ class Pdf(HuParser):
|
||||
zoomin,
|
||||
from_page,
|
||||
to_page)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 4,
|
||||
"Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.2, "OCR finished.", callback)
|
||||
|
||||
from timeit import default_timer as timer
|
||||
start = timer()
|
||||
self._layouts_paddle(zoomin)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 4,
|
||||
"Page {}~{}: Layout analysis finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.5, "Layout analysis finished.", callback)
|
||||
print("paddle layouts:", timer() - start)
|
||||
self._table_transformer_job(zoomin)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 4,
|
||||
"Page {}~{}: Table analysis finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.7, "Table analysis finished.", callback)
|
||||
self._text_merge()
|
||||
column_width = np.median([b["x1"] - b["x0"] for b in self.boxes])
|
||||
self._concat_downward(concat_between_pages=False)
|
||||
self._filter_forpages()
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 4,
|
||||
"Page {}~{}: Text merging finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.77, "Text merging finished", callback)
|
||||
tbls = self._extract_table_figure(True, zoomin, False)
|
||||
|
||||
# clean mess
|
||||
@ -71,6 +62,7 @@ class Pdf(HuParser):
|
||||
b_["top"] = b["top"]
|
||||
self.boxes.pop(i)
|
||||
|
||||
callback__(0.8, "Parsing finished", callback)
|
||||
for b in self.boxes: print(b["text"], b.get("layoutno"))
|
||||
|
||||
print(tbls)
|
||||
@ -85,6 +77,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
||||
pdf_parser = Pdf()
|
||||
cks, tbls = pdf_parser(filename if not binary else binary,
|
||||
from_page=from_page, to_page=to_page, callback=callback)
|
||||
else: raise NotImplementedError("file type not supported yet(pdf supported)")
|
||||
doc = {
|
||||
"docnm_kwd": filename
|
||||
}
|
||||
|
||||
@ -18,24 +18,20 @@ class Pdf(HuParser):
|
||||
zoomin,
|
||||
from_page,
|
||||
to_page)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 4,
|
||||
"Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.2, "OCR finished.", callback)
|
||||
|
||||
from timeit import default_timer as timer
|
||||
start = timer()
|
||||
self._layouts_paddle(zoomin)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 4,
|
||||
"Page {}~{}: Layout analysis finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.47, "Layout analysis finished", callback)
|
||||
print("paddle layouts:", timer() - start)
|
||||
self._table_transformer_job(zoomin)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 4,
|
||||
"Page {}~{}: Table analysis finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.68, "Table analysis finished", callback)
|
||||
self._text_merge()
|
||||
column_width = np.median([b["x1"] - b["x0"] for b in self.boxes])
|
||||
self._concat_downward(concat_between_pages=False)
|
||||
self._filter_forpages()
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 4,
|
||||
"Page {}~{}: Text merging finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.75, "Text merging finished.", callback)
|
||||
tbls = self._extract_table_figure(True, zoomin, False)
|
||||
|
||||
# clean mess
|
||||
@ -105,6 +101,7 @@ class Pdf(HuParser):
|
||||
break
|
||||
if not abstr: i = 0
|
||||
|
||||
callback__(0.8, "Page {}~{}: Text merging finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
for b in self.boxes: print(b["text"], b.get("layoutno"))
|
||||
print(tbls)
|
||||
|
||||
@ -126,6 +123,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
||||
pdf_parser = Pdf()
|
||||
paper = pdf_parser(filename if not binary else binary,
|
||||
from_page=from_page, to_page=to_page, callback=callback)
|
||||
else: raise NotImplementedError("file type not supported yet(pdf supported)")
|
||||
doc = {
|
||||
"docnm_kwd": paper["title"] if paper["title"] else filename,
|
||||
"authors_tks": paper["authors"]
|
||||
|
||||
@ -42,10 +42,8 @@ class Ppt(object):
|
||||
txt = self.__extract(shape)
|
||||
if txt: texts.append(txt)
|
||||
txts.append("\n".join(texts))
|
||||
callback__((i+1)/self.total_page/2, "", callback)
|
||||
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page,
|
||||
"Page {}~{}: Text extraction finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.5, "Text extraction finished.", callback)
|
||||
import aspose.slides as slides
|
||||
import aspose.pydrawing as drawing
|
||||
imgs = []
|
||||
@ -55,8 +53,7 @@ class Ppt(object):
|
||||
slide.get_thumbnail(0.5, 0.5).save(buffered, drawing.imaging.ImageFormat.jpeg)
|
||||
imgs.append(buffered.getvalue())
|
||||
assert len(imgs) == len(txts), "Slides text and image do not match: {} vs. {}".format(len(imgs), len(txts))
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page,
|
||||
"Page {}~{}: Image extraction finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.9, "Image extraction finished", callback)
|
||||
self.is_english = is_english(txts)
|
||||
return [(txts[i], imgs[i]) for i in range(len(txts))]
|
||||
|
||||
@ -73,7 +70,7 @@ class Pdf(HuParser):
|
||||
|
||||
def __call__(self, filename, binary=None, from_page=0, to_page=100000, zoomin=3, callback=None):
|
||||
self.__images__(filename if not binary else binary, zoomin, from_page, to_page)
|
||||
callback__((min(to_page, self.total_page)-from_page) / self.total_page, "Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
callback__(0.8, "Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
assert len(self.boxes) == len(self.page_images), "{} vs. {}".format(len(self.boxes), len(self.page_images))
|
||||
res = []
|
||||
#################### More precisely ###################
|
||||
@ -92,6 +89,7 @@ class Pdf(HuParser):
|
||||
for i in range(len(self.boxes)):
|
||||
lines = "\n".join([b["text"] for b in self.boxes[i] if not self.__garbage(b["text"])])
|
||||
res.append((lines, self.page_images[i]))
|
||||
callback__(0.9, "Page {}~{}: Parsing finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
return res
|
||||
|
||||
|
||||
@ -104,13 +102,13 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
||||
res = []
|
||||
if re.search(r"\.pptx?$", filename, re.IGNORECASE):
|
||||
ppt_parser = Ppt()
|
||||
for txt,img in ppt_parser(filename if not binary else binary, from_page, to_page, callback):
|
||||
for txt,img in ppt_parser(filename if not binary else binary, from_page, 1000000, callback):
|
||||
d = copy.deepcopy(doc)
|
||||
d["image"] = img
|
||||
tokenize(d, txt, ppt_parser.is_english)
|
||||
res.append(d)
|
||||
return res
|
||||
if re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
pdf_parser = Pdf()
|
||||
for txt,img in pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback):
|
||||
d = copy.deepcopy(doc)
|
||||
@ -118,7 +116,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
||||
tokenize(d, txt, pdf_parser.is_english)
|
||||
res.append(d)
|
||||
return res
|
||||
callback__(-1, "This kind of presentation document did not support yet!", callback)
|
||||
|
||||
raise NotImplementedError("file type not supported yet(pptx, pdf supported)")
|
||||
|
||||
|
||||
if __name__== "__main__":
|
||||
|
||||
Reference in New Issue
Block a user