mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Use consistent log file names, introduced initLogger (#3403)
### What problem does this PR solve? Use consistent log file names, introduced initLogger ### Type of change - [ ] Bug Fix (non-breaking change which fixes an issue) - [ ] New Feature (non-breaking change which adds functionality) - [ ] Documentation Update - [x] Refactoring - [ ] Performance Improvement - [ ] Other (please describe):
This commit is contained in:
@ -10,6 +10,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from tika import parser
|
||||
import re
|
||||
from io import BytesIO
|
||||
@ -20,7 +21,6 @@ from rag.nlp import bullets_category, is_english,remove_contents_table, \
|
||||
tokenize_chunks
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||
from api.utils.log_utils import logger
|
||||
|
||||
|
||||
class Pdf(PdfParser):
|
||||
@ -39,7 +39,7 @@ class Pdf(PdfParser):
|
||||
start = timer()
|
||||
self._layouts_rec(zoomin)
|
||||
callback(0.67, "Layout analysis finished")
|
||||
logger.info("layouts: {}".format(timer() - start))
|
||||
logging.debug("layouts: {}".format(timer() - start))
|
||||
self._table_transformer_job(zoomin)
|
||||
callback(0.68, "Table analysis finished")
|
||||
self._text_merge()
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
from email import policy
|
||||
from email.parser import BytesParser
|
||||
from rag.app.naive import chunk as naive_chunk
|
||||
@ -18,7 +19,6 @@ import re
|
||||
from rag.nlp import rag_tokenizer, naive_merge, tokenize_chunks
|
||||
from deepdoc.parser import HtmlParser, TxtParser
|
||||
from timeit import default_timer as timer
|
||||
from api.utils.log_utils import logger
|
||||
import io
|
||||
|
||||
|
||||
@ -86,7 +86,7 @@ def chunk(
|
||||
)
|
||||
|
||||
main_res.extend(tokenize_chunks(chunks, doc, eng, None))
|
||||
logger.info("naive_merge({}): {}".format(filename, timer() - st))
|
||||
logging.debug("naive_merge({}): {}".format(filename, timer() - st))
|
||||
# get the attachment info
|
||||
for part in msg.iter_attachments():
|
||||
content_disposition = part.get("Content-Disposition")
|
||||
|
||||
@ -21,7 +21,6 @@ from rag.nlp import bullets_category, remove_contents_table, hierarchical_merge,
|
||||
make_colon_as_title, tokenize_chunks, docx_question_level
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
||||
from api.utils.log_utils import logger
|
||||
|
||||
|
||||
class Docx(DocxParser):
|
||||
@ -122,7 +121,7 @@ class Pdf(PdfParser):
|
||||
start = timer()
|
||||
self._layouts_rec(zoomin)
|
||||
callback(0.67, "Layout analysis finished")
|
||||
logger.info("layouts:".format(
|
||||
logging.debug("layouts:".format(
|
||||
))
|
||||
self._naive_vertical_merge()
|
||||
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import copy
|
||||
import re
|
||||
|
||||
@ -24,7 +25,6 @@ from rag.utils import num_tokens_from_string
|
||||
from deepdoc.parser import PdfParser, PlainParser, DocxParser
|
||||
from docx import Document
|
||||
from PIL import Image
|
||||
from api.utils.log_utils import logger
|
||||
|
||||
|
||||
class Pdf(PdfParser):
|
||||
@ -48,11 +48,11 @@ class Pdf(PdfParser):
|
||||
# for bb in self.boxes:
|
||||
# for b in bb:
|
||||
# print(b)
|
||||
logger.info("OCR: {}".format(timer() - start))
|
||||
logging.debug("OCR: {}".format(timer() - start))
|
||||
|
||||
self._layouts_rec(zoomin)
|
||||
callback(0.65, "Layout analysis finished.")
|
||||
logger.info("layouts: {}".format(timer() - start))
|
||||
logging.debug("layouts: {}".format(timer() - start))
|
||||
self._table_transformer_job(zoomin)
|
||||
callback(0.67, "Table analysis finished.")
|
||||
self._text_merge()
|
||||
|
||||
@ -10,6 +10,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from tika import parser
|
||||
from io import BytesIO
|
||||
from docx import Document
|
||||
@ -19,7 +20,6 @@ from deepdoc.parser.pdf_parser import PlainParser
|
||||
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec, concat_img, \
|
||||
naive_merge_docx, tokenize_chunks_docx
|
||||
from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser, JsonParser, MarkdownParser, TxtParser
|
||||
from api.utils.log_utils import logger
|
||||
from rag.utils import num_tokens_from_string
|
||||
from PIL import Image
|
||||
from functools import reduce
|
||||
@ -41,13 +41,13 @@ class Docx(DocxParser):
|
||||
try:
|
||||
image_blob = related_part.image.blob
|
||||
except UnrecognizedImageError:
|
||||
logger.info("Unrecognized image format. Skipping image.")
|
||||
logging.info("Unrecognized image format. Skipping image.")
|
||||
return None
|
||||
except UnexpectedEndOfFileError:
|
||||
logger.info("EOF was unexpectedly encountered while reading an image stream. Skipping image.")
|
||||
logging.info("EOF was unexpectedly encountered while reading an image stream. Skipping image.")
|
||||
return None
|
||||
except InvalidImageStreamError:
|
||||
logger.info("The recognized image stream appears to be corrupted. Skipping image.")
|
||||
logging.info("The recognized image stream appears to be corrupted. Skipping image.")
|
||||
return None
|
||||
try:
|
||||
image = Image.open(BytesIO(image_blob)).convert('RGB')
|
||||
@ -133,7 +133,7 @@ class Pdf(PdfParser):
|
||||
callback
|
||||
)
|
||||
callback(msg="OCR finished")
|
||||
logger.info("OCR({}~{}): {}".format(from_page, to_page, timer() - start))
|
||||
logging.info("OCR({}~{}): {}".format(from_page, to_page, timer() - start))
|
||||
|
||||
start = timer()
|
||||
self._layouts_rec(zoomin)
|
||||
@ -147,7 +147,7 @@ class Pdf(PdfParser):
|
||||
self._concat_downward()
|
||||
# self._filter_forpages()
|
||||
|
||||
logger.info("layouts cost: {}s".format(timer() - start))
|
||||
logging.info("layouts cost: {}s".format(timer() - start))
|
||||
return [(b["text"], self._line_tag(b, zoomin))
|
||||
for b in self.boxes], tbls
|
||||
|
||||
@ -216,7 +216,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
return chunks
|
||||
|
||||
res.extend(tokenize_chunks_docx(chunks, doc, eng, images))
|
||||
logger.info("naive_merge({}): {}".format(filename, timer() - st))
|
||||
logging.info("naive_merge({}): {}".format(filename, timer() - st))
|
||||
return res
|
||||
|
||||
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
||||
@ -280,7 +280,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
return chunks
|
||||
|
||||
res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
|
||||
logger.info("naive_merge({}): {}".format(filename, timer() - st))
|
||||
logging.info("naive_merge({}): {}".format(filename, timer() - st))
|
||||
return res
|
||||
|
||||
|
||||
|
||||
@ -10,6 +10,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from tika import parser
|
||||
from io import BytesIO
|
||||
import re
|
||||
@ -18,7 +19,6 @@ from deepdoc.parser.utils import get_text
|
||||
from rag.app import laws
|
||||
from rag.nlp import rag_tokenizer, tokenize
|
||||
from deepdoc.parser import PdfParser, ExcelParser, PlainParser, HtmlParser
|
||||
from api.utils.log_utils import logger
|
||||
|
||||
|
||||
class Pdf(PdfParser):
|
||||
@ -38,7 +38,7 @@ class Pdf(PdfParser):
|
||||
start = timer()
|
||||
self._layouts_rec(zoomin, drop=False)
|
||||
callback(0.63, "Layout analysis finished.")
|
||||
logger.info("layouts cost: {}s".format(timer() - start))
|
||||
logging.debug("layouts cost: {}s".format(timer() - start))
|
||||
self._table_transformer_job(zoomin)
|
||||
callback(0.65, "Table analysis finished.")
|
||||
self._text_merge()
|
||||
|
||||
@ -10,6 +10,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import copy
|
||||
import re
|
||||
|
||||
@ -17,7 +18,6 @@ from api.db import ParserType
|
||||
from rag.nlp import rag_tokenizer, tokenize, tokenize_table, add_positions, bullets_category, title_frequency, tokenize_chunks
|
||||
from deepdoc.parser import PdfParser, PlainParser
|
||||
import numpy as np
|
||||
from api.utils.log_utils import logger
|
||||
|
||||
|
||||
class Pdf(PdfParser):
|
||||
@ -41,7 +41,7 @@ class Pdf(PdfParser):
|
||||
start = timer()
|
||||
self._layouts_rec(zoomin)
|
||||
callback(0.63, "Layout analysis finished")
|
||||
logger.info(f"layouts cost: {timer() - start}s")
|
||||
logging.debug(f"layouts cost: {timer() - start}s")
|
||||
self._table_transformer_job(zoomin)
|
||||
callback(0.68, "Table analysis finished")
|
||||
self._text_merge()
|
||||
@ -53,7 +53,7 @@ class Pdf(PdfParser):
|
||||
|
||||
# clean mess
|
||||
if column_width < self.page_images[0].size[0] / zoomin / 2:
|
||||
logger.info("two_column................... {} {}".format(column_width,
|
||||
logging.debug("two_column................... {} {}".format(column_width,
|
||||
self.page_images[0].size[0] / zoomin / 2))
|
||||
self.boxes = self.sort_X_by_page(self.boxes, column_width / 2)
|
||||
for b in self.boxes:
|
||||
@ -115,8 +115,8 @@ class Pdf(PdfParser):
|
||||
from_page, min(
|
||||
to_page, self.total_page)))
|
||||
for b in self.boxes:
|
||||
logger.info("{} {}".format(b["text"], b.get("layoutno")))
|
||||
logger.info("{}".format(tbls))
|
||||
logging.debug("{} {}".format(b["text"], b.get("layoutno")))
|
||||
logging.debug("{}".format(tbls))
|
||||
|
||||
return {
|
||||
"title": title,
|
||||
@ -157,7 +157,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
doc["authors_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["authors_tks"])
|
||||
# is it English
|
||||
eng = lang.lower() == "english" # pdf_parser.is_english
|
||||
logger.info("It's English.....{}".format(eng))
|
||||
logging.debug("It's English.....{}".format(eng))
|
||||
|
||||
res = tokenize_table(paper["tables"], doc, eng)
|
||||
|
||||
@ -184,7 +184,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
||||
if lvl <= most_level and i > 0 and lvl != levels[i - 1]:
|
||||
sid += 1
|
||||
sec_ids.append(sid)
|
||||
logger.info("{} {} {} {}".format(lvl, sorted_sections[i][0], most_level, sid))
|
||||
logging.debug("{} {} {} {}".format(lvl, sorted_sections[i][0], most_level, sid))
|
||||
|
||||
chunks = []
|
||||
last_sid = -2
|
||||
|
||||
@ -10,6 +10,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
from copy import deepcopy
|
||||
from io import BytesIO
|
||||
@ -19,7 +20,6 @@ from openpyxl import load_workbook
|
||||
from deepdoc.parser.utils import get_text
|
||||
from rag.nlp import is_english, random_choices, qbullets_category, add_positions, has_qbullet, docx_question_level
|
||||
from rag.nlp import rag_tokenizer, tokenize_table, concat_img
|
||||
from api.utils.log_utils import logger
|
||||
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
|
||||
from docx import Document
|
||||
from PIL import Image
|
||||
@ -82,7 +82,7 @@ class Pdf(PdfParser):
|
||||
callback
|
||||
)
|
||||
callback(msg="OCR finished")
|
||||
logger.info("OCR({}~{}): {}".format(from_page, to_page, timer() - start))
|
||||
logging.debug("OCR({}~{}): {}".format(from_page, to_page, timer() - start))
|
||||
start = timer()
|
||||
self._layouts_rec(zoomin, drop=False)
|
||||
callback(0.63, "Layout analysis finished.")
|
||||
@ -94,7 +94,7 @@ class Pdf(PdfParser):
|
||||
#self._naive_vertical_merge()
|
||||
# self._concat_downward()
|
||||
#self._filter_forpages()
|
||||
logger.info("layouts: {}".format(timer() - start))
|
||||
logging.debug("layouts: {}".format(timer() - start))
|
||||
sections = [b["text"] for b in self.boxes]
|
||||
bull_x0_list = []
|
||||
q_bull, reg = qbullets_category(sections)
|
||||
|
||||
@ -10,6 +10,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import base64
|
||||
import datetime
|
||||
import json
|
||||
@ -20,7 +21,6 @@ from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from rag.nlp import rag_tokenizer
|
||||
from deepdoc.parser.resume import refactor
|
||||
from deepdoc.parser.resume import step_one, step_two
|
||||
from api.utils.log_utils import logger
|
||||
from rag.utils import rmSpace
|
||||
|
||||
forbidden_select_fields4resume = [
|
||||
@ -64,7 +64,7 @@ def remote_call(filename, binary):
|
||||
resume = step_two.parse(resume)
|
||||
return resume
|
||||
except Exception:
|
||||
logger.exception("Resume parser error")
|
||||
logging.exception("Resume parser error")
|
||||
return {}
|
||||
|
||||
|
||||
@ -86,7 +86,7 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
||||
callback(-1, "Resume is not successfully parsed.")
|
||||
raise Exception("Resume parser remote call fail!")
|
||||
callback(0.6, "Done parsing. Chunking...")
|
||||
logger.info("chunking resume: " + json.dumps(resume, ensure_ascii=False, indent=2))
|
||||
logging.debug("chunking resume: " + json.dumps(resume, ensure_ascii=False, indent=2))
|
||||
|
||||
field_map = {
|
||||
"name_kwd": "姓名/名字",
|
||||
@ -158,7 +158,7 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
||||
resume[n] = rag_tokenizer.fine_grained_tokenize(resume[n])
|
||||
doc[n] = resume[n]
|
||||
|
||||
logger.info("chunked resume to " + str(doc))
|
||||
logging.debug("chunked resume to " + str(doc))
|
||||
KnowledgebaseService.update_parser_config(
|
||||
kwargs["kb_id"], {"field_map": field_map})
|
||||
return [doc]
|
||||
|
||||
Reference in New Issue
Block a user