Use consistent log file names, introduced initLogger (#3403)

### What problem does this PR solve?

Use consistent log file names, introduced initLogger

### Type of change

- [ ] Bug Fix (non-breaking change which fixes an issue)
- [ ] New Feature (non-breaking change which adds functionality)
- [ ] Documentation Update
- [x] Refactoring
- [ ] Performance Improvement
- [ ] Other (please describe):
This commit is contained in:
Zhichang Yu
2024-11-14 17:13:48 +08:00
committed by GitHub
parent ab4384e011
commit 30f6421760
75 changed files with 396 additions and 402 deletions

View File

@ -1,3 +1,4 @@
import logging
import os
import time
from io import BytesIO
@ -24,7 +25,7 @@ class RAGFlowAzureSasBlob(object):
try:
self.conn = ContainerClient.from_container_url(self.container_url + "?" + self.sas_token)
except Exception:
logger.exception("Fail to connect %s " % self.container_url)
logging.exception("Fail to connect %s " % self.container_url)
def __close__(self):
del self.conn
@ -39,7 +40,7 @@ class RAGFlowAzureSasBlob(object):
try:
return self.conn.upload_blob(name=fnm, data=BytesIO(binary), length=len(binary))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
logging.exception(f"Fail put {bucket}/{fnm}")
self.__open__()
time.sleep(1)
@ -47,7 +48,7 @@ class RAGFlowAzureSasBlob(object):
try:
self.conn.delete_blob(fnm)
except Exception:
logger.exception(f"Fail rm {bucket}/{fnm}")
logging.exception(f"Fail rm {bucket}/{fnm}")
def get(self, bucket, fnm):
for _ in range(1):
@ -55,7 +56,7 @@ class RAGFlowAzureSasBlob(object):
r = self.conn.download_blob(fnm)
return r.read()
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
logging.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return
@ -64,7 +65,7 @@ class RAGFlowAzureSasBlob(object):
try:
return self.conn.get_blob_client(fnm).exists()
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
logging.exception(f"Fail put {bucket}/{fnm}")
return False
def get_presigned_url(self, bucket, fnm, expires):
@ -72,7 +73,7 @@ class RAGFlowAzureSasBlob(object):
try:
return self.conn.get_presigned_url("GET", bucket, fnm, expires)
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
logging.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return

View File

@ -1,3 +1,4 @@
import logging
import os
import time
from rag import settings
@ -28,7 +29,7 @@ class RAGFlowAzureSpnBlob(object):
credentials = ClientSecretCredential(tenant_id=self.tenant_id, client_id=self.client_id, client_secret=self.secret, authority=AzureAuthorityHosts.AZURE_CHINA)
self.conn = FileSystemClient(account_url=self.account_url, file_system_name=self.container_name, credential=credentials)
except Exception:
logger.exception("Fail to connect %s" % self.account_url)
logging.exception("Fail to connect %s" % self.account_url)
def __close__(self):
del self.conn
@ -47,7 +48,7 @@ class RAGFlowAzureSpnBlob(object):
f.append_data(binary, offset=0, length=len(binary))
return f.flush_data(len(binary))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
logging.exception(f"Fail put {bucket}/{fnm}")
self.__open__()
time.sleep(1)
@ -55,7 +56,7 @@ class RAGFlowAzureSpnBlob(object):
try:
self.conn.delete_file(fnm)
except Exception:
logger.exception(f"Fail rm {bucket}/{fnm}")
logging.exception(f"Fail rm {bucket}/{fnm}")
def get(self, bucket, fnm):
for _ in range(1):
@ -64,7 +65,7 @@ class RAGFlowAzureSpnBlob(object):
r = client.download_file()
return r.read()
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
logging.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return
@ -74,7 +75,7 @@ class RAGFlowAzureSpnBlob(object):
client = self.conn.get_file_client(fnm)
return client.exists()
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
logging.exception(f"Fail put {bucket}/{fnm}")
return False
def get_presigned_url(self, bucket, fnm, expires):
@ -82,7 +83,7 @@ class RAGFlowAzureSpnBlob(object):
try:
return self.conn.get_presigned_url("GET", bucket, fnm, expires)
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
logging.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return

View File

@ -1,3 +1,4 @@
import logging
import re
import json
import time
@ -8,7 +9,6 @@ import copy
from elasticsearch import Elasticsearch
from elasticsearch_dsl import UpdateByQuery, Q, Search, Index
from elastic_transport import ConnectionTimeout
from api.utils.log_utils import logger
from rag import settings
from rag.utils import singleton
from api.utils.file_utils import get_project_base_directory
@ -22,7 +22,7 @@ from rag.nlp import is_english, rag_tokenizer
class ESConnection(DocStoreConnection):
def __init__(self):
self.info = {}
logger.info(f"Use Elasticsearch {settings.ES['hosts']} as the doc engine.")
logging.info(f"Use Elasticsearch {settings.ES['hosts']} as the doc engine.")
for _ in range(24):
try:
self.es = Elasticsearch(
@ -36,25 +36,25 @@ class ESConnection(DocStoreConnection):
self.info = self.es.info()
break
except Exception as e:
logger.warn(f"{str(e)}. Waiting Elasticsearch {settings.ES['hosts']} to be healthy.")
logging.warn(f"{str(e)}. Waiting Elasticsearch {settings.ES['hosts']} to be healthy.")
time.sleep(5)
if not self.es.ping():
msg = f"Elasticsearch {settings.ES['hosts']} didn't become healthy in 120s."
logger.error(msg)
logging.error(msg)
raise Exception(msg)
v = self.info.get("version", {"number": "8.11.3"})
v = v["number"].split(".")[0]
if int(v) < 8:
msg = f"Elasticsearch version must be greater than or equal to 8, current version: {v}"
logger.error(msg)
logging.error(msg)
raise Exception(msg)
fp_mapping = os.path.join(get_project_base_directory(), "conf", "mapping.json")
if not os.path.exists(fp_mapping):
msg = f"Elasticsearch mapping file not found at {fp_mapping}"
logger.error(msg)
logging.error(msg)
raise Exception(msg)
self.mapping = json.load(open(fp_mapping, "r"))
logger.info(f"Elasticsearch {settings.ES['hosts']} is healthy.")
logging.info(f"Elasticsearch {settings.ES['hosts']} is healthy.")
"""
Database operations
@ -79,13 +79,13 @@ class ESConnection(DocStoreConnection):
settings=self.mapping["settings"],
mappings=self.mapping["mappings"])
except Exception:
logger.exception("ES create index error %s" % (indexName))
logging.exception("ES create index error %s" % (indexName))
def deleteIdx(self, indexName: str, knowledgebaseId: str):
try:
return self.es.indices.delete(indexName, allow_no_indices=True)
except Exception:
logger.exception("ES delete index error %s" % (indexName))
logging.exception("ES delete index error %s" % (indexName))
def indexExist(self, indexName: str, knowledgebaseId: str) -> bool:
s = Index(indexName, self.es)
@ -93,7 +93,7 @@ class ESConnection(DocStoreConnection):
try:
return s.exists()
except Exception as e:
logger.exception("ES indexExist")
logging.exception("ES indexExist")
if str(e).find("Timeout") > 0 or str(e).find("Conflict") > 0:
continue
return False
@ -178,7 +178,7 @@ class ESConnection(DocStoreConnection):
s = s[offset:limit]
q = s.to_dict()
print(json.dumps(q), flush=True)
# logger.info("ESConnection.search [Q]: " + json.dumps(q))
logging.debug("ESConnection.search [Q]: " + json.dumps(q))
for i in range(3):
try:
@ -190,14 +190,14 @@ class ESConnection(DocStoreConnection):
_source=True)
if str(res.get("timed_out", "")).lower() == "true":
raise Exception("Es Timeout.")
logger.info("ESConnection.search res: " + str(res))
logging.debug("ESConnection.search res: " + str(res))
return res
except Exception as e:
logger.exception("ES search [Q]: " + str(q))
logging.exception("ES search [Q]: " + str(q))
if str(e).find("Timeout") > 0:
continue
raise e
logger.error("ES search timeout for 3 times!")
logging.error("ES search timeout for 3 times!")
raise Exception("ES search timeout.")
def get(self, chunkId: str, indexName: str, knowledgebaseIds: list[str]) -> dict | None:
@ -213,11 +213,11 @@ class ESConnection(DocStoreConnection):
chunk["id"] = chunkId
return chunk
except Exception as e:
logger.exception(f"ES get({chunkId}) got exception")
logging.exception(f"ES get({chunkId}) got exception")
if str(e).find("Timeout") > 0:
continue
raise e
logger.error("ES search timeout for 3 times!")
logging.error("ES search timeout for 3 times!")
raise Exception("ES search timeout.")
def insert(self, documents: list[dict], indexName: str, knowledgebaseId: str) -> list[str]:
@ -247,7 +247,7 @@ class ESConnection(DocStoreConnection):
res.append(str(item[action]["_id"]) + ":" + str(item[action]["error"]))
return res
except Exception as e:
logger.warning("Fail to bulk: " + str(e))
logging.warning("Fail to bulk: " + str(e))
if re.search(r"(Timeout|time out)", str(e), re.IGNORECASE):
time.sleep(3)
continue
@ -264,7 +264,7 @@ class ESConnection(DocStoreConnection):
self.es.update(index=indexName, id=chunkId, doc=doc)
return True
except Exception as e:
logger.exception(
logging.exception(
f"ES failed to update(index={indexName}, id={id}, doc={json.dumps(condition, ensure_ascii=False)})")
if str(e).find("Timeout") > 0:
continue
@ -304,7 +304,7 @@ class ESConnection(DocStoreConnection):
_ = ubq.execute()
return True
except Exception as e:
logger.error("ES update exception: " + str(e) + "[Q]:" + str(bqry.to_dict()))
logging.error("ES update exception: " + str(e) + "[Q]:" + str(bqry.to_dict()))
if str(e).find("Timeout") > 0 or str(e).find("Conflict") > 0:
continue
return False
@ -326,7 +326,7 @@ class ESConnection(DocStoreConnection):
qry.must.append(Q("term", **{k: v}))
else:
raise Exception("Condition value must be int, str or list.")
logger.info("ESConnection.delete [Q]: " + json.dumps(qry.to_dict()))
logging.debug("ESConnection.delete [Q]: " + json.dumps(qry.to_dict()))
for _ in range(10):
try:
res = self.es.delete_by_query(
@ -335,7 +335,7 @@ class ESConnection(DocStoreConnection):
refresh=True)
return res["deleted"]
except Exception as e:
logger.warning("Fail to delete: " + str(filter) + str(e))
logging.warning("Fail to delete: " + str(filter) + str(e))
if re.search(r"(Timeout|time out)", str(e), re.IGNORECASE):
time.sleep(3)
continue
@ -419,7 +419,7 @@ class ESConnection(DocStoreConnection):
"""
def sql(self, sql: str, fetch_size: int, format: str):
logger.info(f"ESConnection.sql get sql: {sql}")
logging.debug(f"ESConnection.sql get sql: {sql}")
sql = re.sub(r"[ `]+", " ", sql)
sql = sql.replace("%", "")
replaces = []
@ -436,7 +436,7 @@ class ESConnection(DocStoreConnection):
for p, r in replaces:
sql = sql.replace(p, r, 1)
logger.info(f"ESConnection.sql to es: {sql}")
logging.debug(f"ESConnection.sql to es: {sql}")
for i in range(3):
try:
@ -444,10 +444,10 @@ class ESConnection(DocStoreConnection):
request_timeout="2s")
return res
except ConnectionTimeout:
logger.exception("ESConnection.sql timeout [Q]: " + sql)
logging.exception("ESConnection.sql timeout [Q]: " + sql)
continue
except Exception:
logger.exception("ESConnection.sql got exception [Q]: " + sql)
logging.exception("ESConnection.sql got exception [Q]: " + sql)
return None
logger.error("ESConnection.sql timeout for 3 times!")
logging.error("ESConnection.sql timeout for 3 times!")
return None

View File

@ -1,3 +1,4 @@
import logging
import os
import re
import json
@ -7,7 +8,6 @@ import infinity
from infinity.common import ConflictType, InfinityException
from infinity.index import IndexInfo, IndexType
from infinity.connection_pool import ConnectionPool
from api.utils.log_utils import logger
from rag import settings
from rag.utils import singleton
import polars as pl
@ -56,7 +56,7 @@ class InfinityConnection(DocStoreConnection):
host, port = infinity_uri.split(":")
infinity_uri = infinity.common.NetworkAddress(host, int(port))
self.connPool = None
logger.info(f"Use Infinity {infinity_uri} as the doc engine.")
logging.info(f"Use Infinity {infinity_uri} as the doc engine.")
for _ in range(24):
try:
connPool = ConnectionPool(infinity_uri)
@ -66,13 +66,13 @@ class InfinityConnection(DocStoreConnection):
self.connPool = connPool
break
except Exception as e:
logger.warn(f"{str(e)}. Waiting Infinity {infinity_uri} to be healthy.")
logging.warn(f"{str(e)}. Waiting Infinity {infinity_uri} to be healthy.")
time.sleep(5)
if self.connPool is None:
msg = f"Infinity {infinity_uri} didn't become healthy in 120s."
logger.error(msg)
logging.error(msg)
raise Exception(msg)
logger.info(f"Infinity {infinity_uri} is healthy.")
logging.info(f"Infinity {infinity_uri} is healthy.")
"""
Database operations
@ -148,7 +148,7 @@ class InfinityConnection(DocStoreConnection):
)
break
self.connPool.release_conn(inf_conn)
logger.info(
logging.info(
f"INFINITY created table {table_name}, vector size {vectorSize}"
)
@ -158,7 +158,7 @@ class InfinityConnection(DocStoreConnection):
db_instance = inf_conn.get_database(self.dbName)
db_instance.drop_table(table_name, ConflictType.Ignore)
self.connPool.release_conn(inf_conn)
logger.info(f"INFINITY dropped table {table_name}")
logging.info(f"INFINITY dropped table {table_name}")
def indexExist(self, indexName: str, knowledgebaseId: str) -> bool:
table_name = f"{indexName}_{knowledgebaseId}"
@ -169,7 +169,7 @@ class InfinityConnection(DocStoreConnection):
self.connPool.release_conn(inf_conn)
return True
except Exception as e:
logger.warn(f"INFINITY indexExist {str(e)}")
logging.warn(f"INFINITY indexExist {str(e)}")
return False
"""
@ -216,7 +216,7 @@ class InfinityConnection(DocStoreConnection):
)
if len(filter_cond) != 0:
filter_fulltext = f"({filter_cond}) AND {filter_fulltext}"
# logger.info(f"filter_fulltext: {filter_fulltext}")
logging.debug(f"filter_fulltext: {filter_fulltext}")
minimum_should_match = "0%"
if "minimum_should_match" in matchExpr.extra_options:
minimum_should_match = (
@ -279,7 +279,7 @@ class InfinityConnection(DocStoreConnection):
df_list.append(kb_res)
self.connPool.release_conn(inf_conn)
res = pl.concat(df_list)
logger.info("INFINITY search tables: " + str(table_list))
logging.debug("INFINITY search tables: " + str(table_list))
return res
def get(
@ -334,18 +334,18 @@ class InfinityConnection(DocStoreConnection):
str_filter = f"id IN ({str_ids})"
table_instance.delete(str_filter)
# for doc in documents:
# logger.info(f"insert position_list: {doc['position_list']}")
# logger.info(f"InfinityConnection.insert {json.dumps(documents)}")
# logging.info(f"insert position_list: {doc['position_list']}")
# logging.info(f"InfinityConnection.insert {json.dumps(documents)}")
table_instance.insert(documents)
self.connPool.release_conn(inf_conn)
logger.info(f"inserted into {table_name} {str_ids}.")
logging.debug(f"inserted into {table_name} {str_ids}.")
return []
def update(
self, condition: dict, newValue: dict, indexName: str, knowledgebaseId: str
) -> bool:
# if 'position_list' in newValue:
# logger.info(f"upsert position_list: {newValue['position_list']}")
# logging.info(f"upsert position_list: {newValue['position_list']}")
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
table_name = f"{indexName}_{knowledgebaseId}"
@ -366,7 +366,7 @@ class InfinityConnection(DocStoreConnection):
try:
table_instance = db_instance.get_table(table_name)
except Exception:
logger.warning(
logging.warning(
f"Skipped deleting `{filter}` from table {table_name} since the table doesn't exist."
)
return 0

View File

@ -1,9 +1,9 @@
import logging
import time
from minio import Minio
from io import BytesIO
from rag import settings
from rag.utils import singleton
from api.utils.log_utils import logger
@singleton
@ -26,7 +26,7 @@ class RAGFlowMinio(object):
secure=False
)
except Exception:
logger.exception(
logging.exception(
"Fail to connect %s " % settings.MINIO["host"])
def __close__(self):
@ -55,7 +55,7 @@ class RAGFlowMinio(object):
)
return r
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
logging.exception(f"Fail put {bucket}/{fnm}:")
self.__open__()
time.sleep(1)
@ -63,7 +63,7 @@ class RAGFlowMinio(object):
try:
self.conn.remove_object(bucket, fnm)
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
logging.exception(f"Fail put {bucket}/{fnm}:")
def get(self, bucket, fnm):
for _ in range(1):
@ -71,7 +71,7 @@ class RAGFlowMinio(object):
r = self.conn.get_object(bucket, fnm)
return r.read()
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
logging.exception(f"Fail put {bucket}/{fnm}:")
self.__open__()
time.sleep(1)
return
@ -81,7 +81,7 @@ class RAGFlowMinio(object):
if self.conn.stat_object(bucket, fnm):return True
return False
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
logging.exception(f"Fail put {bucket}/{fnm}:")
return False
@ -90,7 +90,7 @@ class RAGFlowMinio(object):
try:
return self.conn.get_presigned_url("GET", bucket, fnm, expires)
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
logging.exception(f"Fail put {bucket}/{fnm}:")
self.__open__()
time.sleep(1)
return

View File

@ -1,7 +1,7 @@
import logging
import json
import valkey as redis
import logging
from rag import settings
from rag.utils import singleton

View File

@ -1,3 +1,4 @@
import logging
import boto3
import os
from botocore.exceptions import ClientError
@ -40,7 +41,7 @@ class RAGFlowS3(object):
config=config
)
except Exception:
logger.exception(
logging.exception(
"Fail to connect %s" % self.endpoint)
def __close__(self):
@ -49,11 +50,11 @@ class RAGFlowS3(object):
def bucket_exists(self, bucket):
try:
logger.debug(f"head_bucket bucketname {bucket}")
logging.debug(f"head_bucket bucketname {bucket}")
self.conn.head_bucket(Bucket=bucket)
exists = True
except ClientError:
logger.exception(f"head_bucket error {bucket}")
logging.exception(f"head_bucket error {bucket}")
exists = False
return exists
@ -62,7 +63,7 @@ class RAGFlowS3(object):
if not self.bucket_exists(bucket):
self.conn.create_bucket(Bucket=bucket)
logger.debug(f"create bucket {bucket} ********")
logging.debug(f"create bucket {bucket} ********")
r = self.conn.upload_fileobj(BytesIO(binary), bucket, fnm)
return r
@ -74,17 +75,17 @@ class RAGFlowS3(object):
return []
def put(self, bucket, fnm, binary):
logger.debug(f"bucket name {bucket}; filename :{fnm}:")
logging.debug(f"bucket name {bucket}; filename :{fnm}:")
for _ in range(1):
try:
if not self.bucket_exists(bucket):
self.conn.create_bucket(Bucket=bucket)
logger.info(f"create bucket {bucket} ********")
logging.info(f"create bucket {bucket} ********")
r = self.conn.upload_fileobj(BytesIO(binary), bucket, fnm)
return r
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
logging.exception(f"Fail put {bucket}/{fnm}")
self.__open__()
time.sleep(1)
@ -92,7 +93,7 @@ class RAGFlowS3(object):
try:
self.conn.delete_object(Bucket=bucket, Key=fnm)
except Exception:
logger.exception(f"Fail rm {bucket}/{fnm}")
logging.exception(f"Fail rm {bucket}/{fnm}")
def get(self, bucket, fnm):
for _ in range(1):
@ -101,7 +102,7 @@ class RAGFlowS3(object):
object_data = r['Body'].read()
return object_data
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
logging.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return
@ -128,7 +129,7 @@ class RAGFlowS3(object):
return r
except Exception:
logger.exception(f"fail get url {bucket}/{fnm}")
logging.exception(f"fail get url {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return