fix plainPdf bugs (#152)

This commit is contained in:
KevinHuSh
2024-03-26 15:11:07 +08:00
committed by GitHub
parent 75f7c6da2f
commit da21320b88
13 changed files with 36 additions and 33 deletions

View File

@ -68,7 +68,7 @@ class Pdf(PdfParser):
callback(0.8, "Text extraction finished")
return [(b["text"], self._line_tag(b, zoomin)) for b in self.boxes]
return [(b["text"], self._line_tag(b, zoomin)) for b in self.boxes], None
def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
@ -91,7 +91,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
pdf_parser = Pdf() if kwargs.get("parser_config",{}).get("layout_recognize", True) else PlainParser()
for txt, poss in pdf_parser(filename if not binary else binary,
from_page=from_page, to_page=to_page, callback=callback):
from_page=from_page, to_page=to_page, callback=callback)[0]:
sections.append(txt + poss)
elif re.search(r"\.txt$", filename, re.IGNORECASE):

View File

@ -136,7 +136,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
"title": filename,
"authors": " ",
"abstract": "",
"sections": pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page),
"sections": pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page)[0],
"tables": []
}
else:

View File

@ -66,7 +66,7 @@ class Pdf(PdfParser):
class PlainPdf(PlainParser):
def __call__(self, filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
self.pdf = pdf2_read(filename if not binary else BytesIO(filename))
self.pdf = pdf2_read(filename if not binary else BytesIO(binary))
page_txt = []
for page in self.pdf.pages[from_page: to_page]:
page_txt.append(page.extract_text())

View File

@ -40,7 +40,7 @@ def remote_call(filename, binary):
"encrypt_type": "base64",
"filename": filename,
"langtype": '',
"fileori": base64.b64encode(binary.stream.read()).decode('utf-8')
"fileori": base64.b64encode(binary).decode('utf-8')
},
"c": "resume_parse_module",
"m": "resume_parse"

View File

@ -20,10 +20,10 @@ from openai import OpenAI
from FlagEmbedding import FlagModel
import torch
import numpy as np
from huggingface_hub import snapshot_download
from rag.utils import num_tokens_from_string
flag_model = FlagModel("BAAI/bge-large-zh-v1.5",
flag_model = FlagModel(snapshot_download("BAAI/bge-large-zh-v1.5", local_files_only=True),
query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:",
use_fp16=torch.cuda.is_available())

View File

@ -53,7 +53,7 @@ class EsQueryer:
if not self.isChinese(txt):
tks = huqie.qie(txt).split(" ")
q = tks
q = copy.deepcopy(tks)
for i in range(1, len(tks)):
q.append("\"%s %s\"^2" % (tks[i - 1], tks[i]))
if not q:
@ -138,7 +138,7 @@ class EsQueryer:
def toDict(tks):
d = {}
if isinstance(tks, type("")):
if isinstance(tks, str):
tks = tks.split(" ")
for t, c in self.tw.weights(tks):
if t not in d:

View File

@ -234,13 +234,13 @@ class Dealer:
assert len(ans_v[0]) == len(chunk_v[0]), "The dimension of query and chunk do not match: {} vs. {}".format(
len(ans_v[0]), len(chunk_v[0]))
chunks_tks = [huqie.qie(ck).split(" ") for ck in chunks]
chunks_tks = [huqie.qie(self.qryr.rmWWW(ck)).split(" ") for ck in chunks]
cites = {}
for i, a in enumerate(pieces_):
sim, tksim, vtsim = self.qryr.hybrid_similarity(ans_v[i],
chunk_v,
huqie.qie(
pieces_[i]).split(" "),
self.qryr.rmWWW(pieces_[i])).split(" "),
chunks_tks,
tkweight, vtweight)
mx = np.max(sim) * 0.99

View File

@ -150,9 +150,10 @@ class Dealer:
return 6
def ner(t):
if re.match(r"[0-9,.]{2,}$", t): return 2
if re.match(r"[a-z]{1,2}$", t): return 0.01
if not self.ne or t not in self.ne:
return 1
if re.match(r"[0-9,.]+$", t): return 2
m = {"toxic": 2, "func": 1, "corp": 3, "loca": 3, "sch": 3, "stock": 3,
"firstnm": 1}
return m[self.ne[t]]
@ -170,11 +171,11 @@ class Dealer:
return 1
def freq(t):
if re.match(r"[0-9\. -]+$", t):
return 10000
if re.match(r"[0-9. -]{2,}$", t):
return 3
s = huqie.freq(t)
if not s and re.match(r"[a-z\. -]+$", t):
return 10
if not s and re.match(r"[a-z. -]+$", t):
return 300
if not s:
s = 0
@ -188,12 +189,12 @@ class Dealer:
return max(s, 10)
def df(t):
if re.match(r"[0-9\. -]+$", t):
return 100000
if re.match(r"[0-9. -]{2,}$", t):
return 5
if t in self.df:
return self.df[t] + 3
elif re.match(r"[a-z\. -]+$", t):
return 3
elif re.match(r"[a-z. -]+$", t):
return 300
elif len(t) >= 4:
s = [tt for tt in huqie.qieqie(t).split(" ") if len(tt) > 1]
if len(s) > 1:

View File

@ -87,7 +87,9 @@ def dispatch():
if r["parser_id"] == "paper": page_size = r["parser_config"].get("task_page_size", 22)
if r["parser_id"] == "one": page_size = 1000000000
if not do_layout: page_size = 1000000000
for s,e in r["parser_config"].get("pages", [(1, 100000)]):
page_ranges = r["parser_config"].get("pages")
if not page_ranges: page_ranges = [(1, 100000)]
for s,e in page_ranges:
s -= 1
s = max(0, s)
e = min(e-1, pages)