solve task execution issues (#90)

This commit is contained in:
KevinHuSh
2024-03-01 19:48:01 +08:00
committed by GitHub
parent 7f174fb9d3
commit 8a726fb04b
16 changed files with 89 additions and 87 deletions

View File

@ -13,7 +13,7 @@
import copy
import re
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
hierarchical_merge, make_colon_as_title, naive_merge, random_choices
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table
from rag.nlp import huqie
from deepdoc.parser import PdfParser, DocxParser
@ -90,25 +90,16 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
make_colon_as_title(sections)
bull = bullets_category([t for t in random_choices([t for t,_ in sections], k=100)])
if bull >= 0: cks = hierarchical_merge(bull, sections, 3)
else: cks = naive_merge(sections, kwargs.get("chunk_token_num", 256), kwargs.get("delimer", "\n。;!?"))
else:
sections = [s.split("@") for s in sections]
sections = [(pr[0], "@"+pr[1]) for pr in sections if len(pr)==2]
cks = naive_merge(sections, kwargs.get("chunk_token_num", 256), kwargs.get("delimer", "\n。;!?"))
sections = [t for t, _ in sections]
# is it English
eng = lang.lower() == "english"#is_english(random_choices(sections, k=218))
eng = lang.lower() == "english"#is_english(random_choices([t for t, _ in sections], k=218))
res = tokenize_table(tbls, doc, eng)
res = []
# add tables
for img, rows in tbls:
bs = 10
de = ";" if eng else ""
for i in range(0, len(rows), bs):
d = copy.deepcopy(doc)
r = de.join(rows[i:i + bs])
r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
tokenize(d, r, eng)
d["image"] = img
res.append(d)
print("TABLE", d["content_with_weight"])
# wrap up to es documents
for ck in cks:
d = copy.deepcopy(doc)

View File

@ -2,7 +2,7 @@ import copy
import re
from api.db import ParserType
from rag.nlp import huqie, tokenize
from rag.nlp import huqie, tokenize, tokenize_table
from deepdoc.parser import PdfParser
from rag.utils import num_tokens_from_string
@ -81,18 +81,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
# is it English
eng = lang.lower() == "english"#pdf_parser.is_english
res = []
# add tables
for img, rows in tbls:
bs = 10
de = ";" if eng else ""
for i in range(0, len(rows), bs):
d = copy.deepcopy(doc)
r = de.join(rows[i:i + bs])
r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
tokenize(d, r, eng)
d["image"] = img
res.append(d)
res = tokenize_table(tbls, doc, eng)
i = 0
chunk = []

View File

@ -13,7 +13,7 @@
import copy
import re
from rag.app import laws
from rag.nlp import huqie, is_english, tokenize, naive_merge
from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table
from deepdoc.parser import PdfParser
from rag.settings import cron_logger
@ -72,17 +72,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
pdf_parser = Pdf()
sections, tbls = pdf_parser(filename if not binary else binary,
from_page=from_page, to_page=to_page, callback=callback)
# add tables
for img, rows in tbls:
bs = 10
de = ";" if eng else ""
for i in range(0, len(rows), bs):
d = copy.deepcopy(doc)
r = de.join(rows[i:i + bs])
r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
tokenize(d, r, eng)
d["image"] = img
res.append(d)
res = tokenize_table(tbls, doc, eng)
elif re.search(r"\.txt$", filename, re.IGNORECASE):
callback(0.1, "Start to parse.")
txt = ""
@ -106,6 +96,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
# wrap up to es documents
for ck in cks:
print("--", ck)
if not ck:continue
d = copy.deepcopy(doc)
if pdf_parser:
d["image"] = pdf_parser.crop(ck)

View File

@ -15,7 +15,7 @@ import re
from collections import Counter
from api.db import ParserType
from rag.nlp import huqie, tokenize
from rag.nlp import huqie, tokenize, tokenize_table
from deepdoc.parser import PdfParser
import numpy as np
from rag.utils import num_tokens_from_string
@ -158,18 +158,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
eng = lang.lower() == "english"#pdf_parser.is_english
print("It's English.....", eng)
res = []
# add tables
for img, rows in paper["tables"]:
bs = 10
de = ";" if eng else ""
for i in range(0, len(rows), bs):
d = copy.deepcopy(doc)
r = de.join(rows[i:i + bs])
r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
tokenize(d, r)
d["image"] = img
res.append(d)
res = tokenize_table(paper["tables"], doc, eng)
if paper["abstract"]:
d = copy.deepcopy(doc)

View File

@ -20,7 +20,7 @@ from deepdoc.parser import PdfParser, PptParser
class Ppt(PptParser):
def __call__(self, fnm, from_page, to_page, callback=None):
txts = super.__call__(fnm, from_page, to_page)
txts = super().__call__(fnm, from_page, to_page)
callback(0.5, "Text extraction finished.")
import aspose.slides as slides

View File

@ -79,7 +79,7 @@ def chunk(filename, binary=None, callback=None, **kwargs):
resume = remote_call(filename, binary)
if len(resume.keys()) < 7:
callback(-1, "Resume is not successfully parsed.")
return []
raise Exception("Resume parser remote call fail!")
callback(0.6, "Done parsing. Chunking...")
print(json.dumps(resume, ensure_ascii=False, indent=2))

View File

@ -1,4 +1,4 @@
import copy
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
@ -80,6 +80,20 @@ def tokenize(d, t, eng):
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
def tokenize_table(tbls, doc, eng, batch_size=10):
res = []
# add tables
for img, rows in tbls:
de = "; " if eng else " "
for i in range(0, len(rows), batch_size):
d = copy.deepcopy(doc)
r = de.join(rows[i:i + batch_size])
tokenize(d, r, eng)
d["image"] = img
res.append(d)
return res
def remove_contents_table(sections, eng=False):
i = 0
while i < len(sections):
@ -201,10 +215,12 @@ def naive_merge(sections, chunk_token_num=128, delimiter="\n。"):
tnum = num_tokens_from_string(t)
if tnum < 8: pos = ""
if tk_nums[-1] > chunk_token_num:
cks.append(t + pos)
if t.find(pos) < 0: t += pos
cks.append(t)
tk_nums.append(tnum)
else:
cks[-1] += t + pos
if cks[-1].find(pos) < 0: t += pos
cks[-1] += t
tk_nums[-1] += tnum
for sec, pos in sections:

View File

@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
import json
import re
from copy import deepcopy
from elasticsearch_dsl import Q, Search
from typing import List, Optional, Dict, Union
from dataclasses import dataclass
@ -98,7 +100,7 @@ class Dealer:
del s["highlight"]
q_vec = s["knn"]["query_vector"]
es_logger.info("【Q】: {}".format(json.dumps(s)))
res = self.es.search(s, idxnm=idxnm, timeout="600s", src=src)
res = self.es.search(deepcopy(s), idxnm=idxnm, timeout="600s", src=src)
es_logger.info("TOTAL: {}".format(self.es.getTotal(res)))
if self.es.getTotal(res) == 0 and "knn" in s:
bqry, _ = self.qryr.question(qst, min_match="10%")

View File

@ -90,7 +90,7 @@ def dispatch():
tsks.append(task)
else:
tsks.append(new_task())
print(tsks)
bulk_insert_into_db(Task, tsks, True)
set_dispatching(r["id"])
tmf.write(str(r["update_time"]) + "\n")

View File

@ -114,7 +114,7 @@ def build(row):
kb_id=row["kb_id"], parser_config=row["parser_config"], tenant_id=row["tenant_id"])
except Exception as e:
if re.search("(No such file|not found)", str(e)):
callback(-1, "Can not find file <%s>" % row["doc_name"])
callback(-1, "Can not find file <%s>" % row["name"])
else:
callback(-1, f"Internal server error: %s" %
str(e).replace("'", ""))