Use consistent log file names, introduced initLogger (#3403)

### What problem does this PR solve?

Use consistent log file names, introduced initLogger

### Type of change

- [ ] Bug Fix (non-breaking change which fixes an issue)
- [ ] New Feature (non-breaking change which adds functionality)
- [ ] Documentation Update
- [x] Refactoring
- [ ] Performance Improvement
- [ ] Other (please describe):
This commit is contained in:
Zhichang Yu
2024-11-14 17:13:48 +08:00
committed by GitHub
parent ab4384e011
commit 30f6421760
75 changed files with 396 additions and 402 deletions

View File

@ -11,13 +11,13 @@
# limitations under the License.
#
import logging
import re
import json
import os
import pandas as pd
from rag.nlp import rag_tokenizer
from . import regions
from api.utils.log_utils import logger
current_file_path = os.path.dirname(os.path.abspath(__file__))
@ -71,7 +71,7 @@ GOOD_CORP = set([corpNorm(rmNoise(c), False) for c in GOOD_CORP])
for c,v in CORP_TAG.items():
cc = corpNorm(rmNoise(c), False)
if not cc:
logger.info(c)
logging.debug(c)
CORP_TAG = {corpNorm(rmNoise(c), False):v for c,v in CORP_TAG.items()}
def is_good(nm):

View File

@ -10,7 +10,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
import copy
import time
@ -23,7 +23,6 @@ from deepdoc.parser.resume.entities import degrees, schools, corporations
from rag.nlp import rag_tokenizer, surname
from xpinyin import Pinyin
from contextlib import contextmanager
from api.utils.log_utils import logger
class TimeoutException(Exception): pass
@ -164,7 +163,7 @@ def forEdu(cv):
y, m, d = getYMD(edu_end_dt)
cv["work_exp_flt"] = min(int(str(datetime.date.today())[0:4]) - int(y), cv.get("work_exp_flt", 1000))
except Exception as e:
logger.exception("forEdu {} {} {}".format(e, edu_end_dt, cv.get("work_exp_flt")))
logging.exception("forEdu {} {} {}".format(e, edu_end_dt, cv.get("work_exp_flt")))
if sch:
cv["school_name_kwd"] = sch
if (len(cv.get("degree_kwd", [])) >= 1 and "本科" in cv["degree_kwd"]) \
@ -276,7 +275,7 @@ def forWork(cv):
try:
duas.append((datetime.datetime.strptime(ed, "%Y-%m-%d") - datetime.datetime.strptime(st, "%Y-%m-%d")).days)
except Exception:
logger.exception("forWork {} {}".format(n.get("start_time"), n.get("end_time")))
logging.exception("forWork {} {}".format(n.get("start_time"), n.get("end_time")))
if n.get("scale"):
r = re.search(r"^([0-9]+)", str(n["scale"]))
@ -333,7 +332,7 @@ def forWork(cv):
y, m, d = getYMD(work_st_tm)
cv["work_exp_flt"] = min(int(str(datetime.date.today())[0:4]) - int(y), cv.get("work_exp_flt", 1000))
except Exception as e:
logger.exception("forWork {} {} {}".format(e, work_st_tm, cv.get("work_exp_flt")))
logging.exception("forWork {} {} {}".format(e, work_st_tm, cv.get("work_exp_flt")))
cv["job_num_int"] = 0
if duas:
@ -464,7 +463,7 @@ def parse(cv):
cv[f"{t}_kwd"] = nms
cv[f"{t}_tks"] = rag_tokenizer.tokenize(" ".join(nms))
except Exception:
logger.exception("parse {} {}".format(str(traceback.format_exc()), cv[k]))
logging.exception("parse {} {}".format(str(traceback.format_exc()), cv[k]))
cv[k] = []
# tokenize fields
@ -565,7 +564,7 @@ def parse(cv):
cv["work_start_dt"] = "%s-%02d-%02d 00:00:00" % (y, int(m), int(d))
cv["work_exp_flt"] = int(str(datetime.date.today())[0:4]) - int(y)
except Exception as e:
logger.exception("parse {} ==> {}".format(e, cv.get("work_start_time")))
logging.exception("parse {} ==> {}".format(e, cv.get("work_start_time")))
if "work_exp_flt" not in cv and cv.get("work_experience", 0): cv["work_exp_flt"] = int(cv["work_experience"]) / 12.
keys = list(cv.keys())
@ -580,7 +579,7 @@ def parse(cv):
cv["tob_resume_id"] = str(cv["tob_resume_id"])
cv["id"] = cv["tob_resume_id"]
logger.info("CCCCCCCCCCCCCCC")
logging.debug("CCCCCCCCCCCCCCC")
return dealWithInt64(cv)