mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Add app to rag module: presentaion & laws (#43)
This commit is contained in:
48
rag/app/__init__.py
Normal file
48
rag/app/__init__.py
Normal file
@ -0,0 +1,48 @@
|
||||
import re
|
||||
|
||||
|
||||
def callback__(progress, msg, func):
|
||||
if not func :return
|
||||
func(progress, msg)
|
||||
|
||||
|
||||
BULLET_PATTERN = [[
|
||||
r"第[零一二三四五六七八九十百]+编",
|
||||
r"第[零一二三四五六七八九十百]+章",
|
||||
r"第[零一二三四五六七八九十百]+节",
|
||||
r"第[零一二三四五六七八九十百]+条",
|
||||
r"[\((][零一二三四五六七八九十百]+[\))]",
|
||||
], [
|
||||
r"[0-9]{,3}[\. 、]",
|
||||
r"[0-9]{,2}\.[0-9]{,2}",
|
||||
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
||||
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
||||
], [
|
||||
r"[零一二三四五六七八九十百]+[ 、]",
|
||||
r"[\((][零一二三四五六七八九十百]+[\))]",
|
||||
r"[\((][0-9]{,2}[\))]",
|
||||
] ,[
|
||||
r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
|
||||
r"Chapter (I+V?|VI*|XI|IX|X)",
|
||||
r"Section [0-9]+",
|
||||
r"Article [0-9]+"
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
def bullets_category(sections):
|
||||
global BULLET_PATTERN
|
||||
hits = [0] * len(BULLET_PATTERN)
|
||||
for i, pro in enumerate(BULLET_PATTERN):
|
||||
for sec in sections:
|
||||
for p in pro:
|
||||
if re.match(p, sec):
|
||||
hits[i] += 1
|
||||
break
|
||||
maxium = 0
|
||||
res = -1
|
||||
for i,h in enumerate(hits):
|
||||
if h <= maxium:continue
|
||||
res = i
|
||||
maxium = h
|
||||
return res
|
||||
192
rag/app/laws.py
Normal file
192
rag/app/laws.py
Normal file
@ -0,0 +1,192 @@
|
||||
import copy
|
||||
import re
|
||||
from io import BytesIO
|
||||
from docx import Document
|
||||
import numpy as np
|
||||
from rag.app import callback__, bullets_category, BULLET_PATTERN
|
||||
from rag.nlp import huqie
|
||||
from rag.parser.pdf_parser import HuParser
|
||||
|
||||
|
||||
class Docx(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __clean(self, line):
|
||||
line = re.sub(r"\u3000", " ", line).strip()
|
||||
return line
|
||||
|
||||
def __call__(self, filename, binary=None):
|
||||
self.doc = Document(
|
||||
filename) if not binary else Document(BytesIO(binary))
|
||||
lines = [self.__clean(p.text) for p in self.doc.paragraphs]
|
||||
return [l for l in lines if l]
|
||||
|
||||
|
||||
class Pdf(HuParser):
|
||||
def __call__(self, filename, binary=None, from_page=0,
|
||||
to_page=100000, zoomin=3, callback=None):
|
||||
self.__images__(
|
||||
filename if not binary else binary,
|
||||
zoomin,
|
||||
from_page,
|
||||
to_page)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
|
||||
"Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
|
||||
from timeit import default_timer as timer
|
||||
start = timer()
|
||||
self._layouts_paddle(zoomin)
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
|
||||
"Page {}~{}: Layout analysis finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
print("paddle layouts:", timer()-start)
|
||||
bxs = self.sort_Y_firstly(self.boxes, np.median(self.mean_height) / 3)
|
||||
# is it English
|
||||
eng = 0
|
||||
for b in bxs:
|
||||
if re.match(r"[a-zA-Z]", b["text"].strip()):
|
||||
eng += 1
|
||||
if eng / len(bxs) > 0.8:
|
||||
eng = True
|
||||
else:
|
||||
eng = False
|
||||
# Merge vertically
|
||||
i = 0
|
||||
while i + 1 < len(bxs):
|
||||
b = bxs[i]
|
||||
b_ = bxs[i + 1]
|
||||
if b["page_number"] < b_["page_number"] and re.match(r"[0-9 •一—-]+$", b["text"]):
|
||||
bxs.pop(i)
|
||||
continue
|
||||
concatting_feats = [
|
||||
b["text"].strip()[-1] in ",;:'\",、‘“;:",
|
||||
len(b["text"].strip())>1 and b["text"].strip()[-2] in ",;:'\",‘“、;:",
|
||||
b["text"].strip()[0] in "。;?!?”)),,、:",
|
||||
]
|
||||
# features for not concating
|
||||
feats = [
|
||||
b.get("layoutno",0) != b.get("layoutno",0),
|
||||
b["text"].strip()[-1] in "。?!?",
|
||||
eng and b["text"].strip()[-1] in ".!?",
|
||||
b["page_number"] == b_["page_number"] and b_["top"] - \
|
||||
b["bottom"] > self.mean_height[b["page_number"] - 1] * 1.5,
|
||||
b["page_number"] < b_["page_number"] and abs(
|
||||
b["x0"] - b_["x0"]) > self.mean_width[b["page_number"] - 1] * 4
|
||||
]
|
||||
if any(feats) and not any(concatting_feats):
|
||||
i += 1
|
||||
continue
|
||||
# merge up and down
|
||||
b["bottom"] = b_["bottom"]
|
||||
b["text"] += b_["text"]
|
||||
b["x0"] = min(b["x0"], b_["x0"])
|
||||
b["x1"] = max(b["x1"], b_["x1"])
|
||||
bxs.pop(i + 1)
|
||||
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
|
||||
"Page {}~{}: Text extraction finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
|
||||
return [b["text"] + self._line_tag(b, zoomin) for b in bxs]
|
||||
|
||||
|
||||
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
pdf_parser = None
|
||||
sections = []
|
||||
if re.search(r"\.docx?$", filename, re.IGNORECASE):
|
||||
for txt in Docx()(filename, binary):
|
||||
sections.append(txt)
|
||||
if re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
pdf_parser = Pdf()
|
||||
for txt in pdf_parser(filename if not binary else binary,
|
||||
from_page=from_page, to_page=to_page, callback=callback):
|
||||
sections.append(txt)
|
||||
if re.search(r"\.txt$", filename, re.IGNORECASE):
|
||||
txt = ""
|
||||
if binary:txt = binary.decode("utf-8")
|
||||
else:
|
||||
with open(filename, "r") as f:
|
||||
while True:
|
||||
l = f.readline()
|
||||
if not l:break
|
||||
txt += l
|
||||
sections = txt.split("\n")
|
||||
sections = [l for l in sections if l]
|
||||
|
||||
# is it English
|
||||
eng = 0
|
||||
for sec in sections:
|
||||
if re.match(r"[a-zA-Z]", sec.strip()):
|
||||
eng += 1
|
||||
if eng / len(sections) > 0.8:
|
||||
eng = True
|
||||
else:
|
||||
eng = False
|
||||
# Remove 'Contents' part
|
||||
i = 0
|
||||
while i < len(sections):
|
||||
if not re.match(r"(Contents|目录|目次)$", re.sub(r"( | |\u3000)+", "", sections[i].split("@@")[0])):
|
||||
i += 1
|
||||
continue
|
||||
sections.pop(i)
|
||||
if i >= len(sections): break
|
||||
prefix = sections[i].strip()[:3] if not eng else " ".join(sections[i].strip().split(" ")[:2])
|
||||
while not prefix:
|
||||
sections.pop(i)
|
||||
if i >= len(sections): break
|
||||
prefix = sections[i].strip()[:3] if not eng else " ".join(sections[i].strip().split(" ")[:2])
|
||||
sections.pop(i)
|
||||
if i >= len(sections) or not prefix: break
|
||||
for j in range(i, min(i+128, len(sections))):
|
||||
if not re.match(prefix, sections[j]):
|
||||
continue
|
||||
for k in range(i, j):sections.pop(i)
|
||||
break
|
||||
|
||||
bull = bullets_category(sections)
|
||||
projs = [len(BULLET_PATTERN[bull])] * len(sections)
|
||||
for i, sec in enumerate(sections):
|
||||
for j,p in enumerate(BULLET_PATTERN[bull]):
|
||||
if re.match(p, sec.strip()):
|
||||
projs[i] = j
|
||||
break
|
||||
readed = [0] * len(sections)
|
||||
cks = []
|
||||
for pr in range(len(BULLET_PATTERN[bull])-1, 1, -1):
|
||||
for i in range(len(sections)):
|
||||
if readed[i] or projs[i] < pr:
|
||||
continue
|
||||
# find father and grand-father and grand...father
|
||||
p = projs[i]
|
||||
readed[i] = 1
|
||||
ck = [sections[i]]
|
||||
for j in range(i-1, -1, -1):
|
||||
if projs[j] >= p:continue
|
||||
ck.append(sections[j])
|
||||
readed[j] = 1
|
||||
p = projs[j]
|
||||
if p == 0: break
|
||||
cks.append(ck[::-1])
|
||||
|
||||
res = []
|
||||
# wrap up to es documents
|
||||
for ck in cks:
|
||||
print("\n-".join(ck))
|
||||
ck = "\n".join(ck)
|
||||
d = copy.deepcopy(doc)
|
||||
if pdf_parser:
|
||||
d["image"] = pdf_parser.crop(ck)
|
||||
ck = pdf_parser.remove_tag(ck)
|
||||
d["content_ltks"] = huqie.qie(ck)
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
res.append(d)
|
||||
return res
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
chunk(sys.argv[1])
|
||||
127
rag/app/presentation.py
Normal file
127
rag/app/presentation.py
Normal file
@ -0,0 +1,127 @@
|
||||
import copy
|
||||
import re
|
||||
from io import BytesIO
|
||||
from pptx import Presentation
|
||||
|
||||
from rag.app import callback__
|
||||
from rag.nlp import huqie
|
||||
from rag.parser.pdf_parser import HuParser
|
||||
|
||||
|
||||
class Ppt(object):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def __extract(self, shape):
|
||||
if shape.shape_type == 19:
|
||||
tb = shape.table
|
||||
rows = []
|
||||
for i in range(1, len(tb.rows)):
|
||||
rows.append("; ".join([tb.cell(0, j).text + ": " + tb.cell(i, j).text for j in range(len(tb.columns)) if tb.cell(i, j)]))
|
||||
return "\n".join(rows)
|
||||
|
||||
if shape.has_text_frame:
|
||||
return shape.text_frame.text
|
||||
|
||||
if shape.shape_type == 6:
|
||||
texts = []
|
||||
for p in shape.shapes:
|
||||
t = self.__extract(p)
|
||||
if t: texts.append(t)
|
||||
return "\n".join(texts)
|
||||
|
||||
def __call__(self, fnm, from_page, to_page, callback=None):
|
||||
ppt = Presentation(fnm) if isinstance(
|
||||
fnm, str) else Presentation(
|
||||
BytesIO(fnm))
|
||||
txts = []
|
||||
self.total_page = len(ppt.slides)
|
||||
for i, slide in enumerate(ppt.slides[from_page: to_page]):
|
||||
texts = []
|
||||
for shape in slide.shapes:
|
||||
txt = self.__extract(shape)
|
||||
if txt: texts.append(txt)
|
||||
txts.append("\n".join(texts))
|
||||
callback__((i+1)/self.total_page/2, "", callback)
|
||||
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page,
|
||||
"Page {}~{}: Text extraction finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
import aspose.slides as slides
|
||||
import aspose.pydrawing as drawing
|
||||
imgs = []
|
||||
with slides.Presentation(BytesIO(fnm)) as presentation:
|
||||
for i, slide in enumerate(presentation.slides[from_page: to_page]):
|
||||
buffered = BytesIO()
|
||||
slide.get_thumbnail(0.5, 0.5).save(buffered, drawing.imaging.ImageFormat.jpeg)
|
||||
imgs.append(buffered.getvalue())
|
||||
assert len(imgs) == len(txts), "Slides text and image do not match: {} vs. {}".format(len(imgs), len(txts))
|
||||
callback__((min(to_page, self.total_page) - from_page) / self.total_page,
|
||||
"Page {}~{}: Image extraction finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
|
||||
return [(txts[i], imgs[i]) for i in range(len(txts))]
|
||||
|
||||
|
||||
class Pdf(HuParser):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def __garbage(self, txt):
|
||||
txt = txt.lower().strip()
|
||||
if re.match(r"[0-9\.,%/-]+$", txt): return True
|
||||
if len(txt) < 3:return True
|
||||
return False
|
||||
|
||||
def __call__(self, filename, binary=None, from_page=0, to_page=100000, zoomin=3, callback=None):
|
||||
self.__images__(filename if not binary else binary, zoomin, from_page, to_page)
|
||||
callback__((min(to_page, self.total_page)-from_page) / self.total_page, "Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)), callback)
|
||||
assert len(self.boxes) == len(self.page_images), "{} vs. {}".format(len(self.boxes), len(self.page_images))
|
||||
res = []
|
||||
#################### More precisely ###################
|
||||
# self._layouts_paddle(zoomin)
|
||||
# self._text_merge()
|
||||
# pages = {}
|
||||
# for b in self.boxes:
|
||||
# if self.__garbage(b["text"]):continue
|
||||
# if b["page_number"] not in pages: pages[b["page_number"]] = []
|
||||
# pages[b["page_number"]].append(b["text"])
|
||||
# for i, lines in pages.items():
|
||||
# res.append(("\n".join(lines), self.page_images[i-1]))
|
||||
# return res
|
||||
########################################
|
||||
|
||||
for i in range(len(self.boxes)):
|
||||
lines = "\n".join([b["text"] for b in self.boxes[i] if not self.__garbage(b["text"])])
|
||||
res.append((lines, self.page_images[i]))
|
||||
return res
|
||||
|
||||
|
||||
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
||||
doc = {
|
||||
"docnm_kwd": filename,
|
||||
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
||||
}
|
||||
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
||||
res = []
|
||||
if re.search(r"\.pptx?$", filename, re.IGNORECASE):
|
||||
for txt,img in Ppt()(filename if not binary else binary, from_page, to_page, callback):
|
||||
d = copy.deepcopy(doc)
|
||||
d["content_ltks"] = huqie.qie(txt)
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
d["image"] = img
|
||||
res.append(d)
|
||||
return res
|
||||
if re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
for txt,img in Pdf()(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback):
|
||||
d = copy.deepcopy(doc)
|
||||
d["content_ltks"] = huqie.qie(txt)
|
||||
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
||||
d["image"] = img
|
||||
res.append(d)
|
||||
return res
|
||||
callback__(-1, "This kind of presentation document did not support yet!", callback)
|
||||
|
||||
|
||||
if __name__== "__main__":
|
||||
import sys
|
||||
print(chunk(sys.argv[1]))
|
||||
|
||||
Reference in New Issue
Block a user