Rework logging (#3358)

Unified all log files into one.

### What problem does this PR solve?

Unified all log files into one.

### Type of change

- [x] Refactoring
This commit is contained in:
Zhichang Yu
2024-11-12 17:35:13 +08:00
committed by GitHub
parent 567a7563e7
commit a2a5631da4
75 changed files with 481 additions and 853 deletions

View File

@ -2,7 +2,6 @@ import os
import time
from io import BytesIO
from rag import settings
from rag.settings import azure_logger
from rag.utils import singleton
from azure.storage.blob import ContainerClient
@ -19,14 +18,13 @@ class RAGFlowAzureSasBlob(object):
try:
if self.conn:
self.__close__()
except Exception as e:
except Exception:
pass
try:
self.conn = ContainerClient.from_container_url(self.container_url + "?" + self.sas_token)
except Exception as e:
azure_logger.error(
"Fail to connect %s " % self.container_url + str(e))
except Exception:
logger.exception("Fail to connect %s " % self.container_url)
def __close__(self):
del self.conn
@ -40,24 +38,24 @@ class RAGFlowAzureSasBlob(object):
for _ in range(3):
try:
return self.conn.upload_blob(name=fnm, data=BytesIO(binary), length=len(binary))
except Exception as e:
azure_logger.error(f"Fail put {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
self.__open__()
time.sleep(1)
def rm(self, bucket, fnm):
try:
self.conn.delete_blob(fnm)
except Exception as e:
azure_logger.error(f"Fail rm {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail rm {bucket}/{fnm}")
def get(self, bucket, fnm):
for _ in range(1):
try:
r = self.conn.download_blob(fnm)
return r.read()
except Exception as e:
azure_logger.error(f"fail get {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return
@ -65,16 +63,16 @@ class RAGFlowAzureSasBlob(object):
def obj_exist(self, bucket, fnm):
try:
return self.conn.get_blob_client(fnm).exists()
except Exception as e:
azure_logger.error(f"Fail put {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
return False
def get_presigned_url(self, bucket, fnm, expires):
for _ in range(10):
try:
return self.conn.get_presigned_url("GET", bucket, fnm, expires)
except Exception as e:
azure_logger.error(f"fail get {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return

View File

@ -1,7 +1,6 @@
import os
import time
from rag import settings
from rag.settings import azure_logger
from rag.utils import singleton
from azure.identity import ClientSecretCredential, AzureAuthorityHosts
from azure.storage.filedatalake import FileSystemClient
@ -22,15 +21,14 @@ class RAGFlowAzureSpnBlob(object):
try:
if self.conn:
self.__close__()
except Exception as e:
except Exception:
pass
try:
credentials = ClientSecretCredential(tenant_id=self.tenant_id, client_id=self.client_id, client_secret=self.secret, authority=AzureAuthorityHosts.AZURE_CHINA)
self.conn = FileSystemClient(account_url=self.account_url, file_system_name=self.container_name, credential=credentials)
except Exception as e:
azure_logger.error(
"Fail to connect %s " % self.account_url + str(e))
except Exception:
logger.exception("Fail to connect %s" % self.account_url)
def __close__(self):
del self.conn
@ -48,16 +46,16 @@ class RAGFlowAzureSpnBlob(object):
f = self.conn.create_file(fnm)
f.append_data(binary, offset=0, length=len(binary))
return f.flush_data(len(binary))
except Exception as e:
azure_logger.error(f"Fail put {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
self.__open__()
time.sleep(1)
def rm(self, bucket, fnm):
try:
self.conn.delete_file(fnm)
except Exception as e:
azure_logger.error(f"Fail rm {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail rm {bucket}/{fnm}")
def get(self, bucket, fnm):
for _ in range(1):
@ -65,8 +63,8 @@ class RAGFlowAzureSpnBlob(object):
client = self.conn.get_file_client(fnm)
r = client.download_file()
return r.read()
except Exception as e:
azure_logger.error(f"fail get {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return
@ -75,16 +73,16 @@ class RAGFlowAzureSpnBlob(object):
try:
client = self.conn.get_file_client(fnm)
return client.exists()
except Exception as e:
azure_logger.error(f"Fail put {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
return False
def get_presigned_url(self, bucket, fnm, expires):
for _ in range(10):
try:
return self.conn.get_presigned_url("GET", bucket, fnm, expires)
except Exception as e:
azure_logger.error(f"fail get {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return

View File

@ -9,7 +9,7 @@ import copy
from elasticsearch import Elasticsearch
from elasticsearch_dsl import UpdateByQuery, Q, Search, Index
from elastic_transport import ConnectionTimeout
from rag.settings import doc_store_logger
from api.utils.log_utils import logger
from rag import settings
from rag.utils import singleton
from api.utils.file_utils import get_project_base_directory
@ -17,7 +17,7 @@ import polars as pl
from rag.utils.doc_store_conn import DocStoreConnection, MatchExpr, OrderByExpr, MatchTextExpr, MatchDenseExpr, FusionExpr
from rag.nlp import is_english, rag_tokenizer
doc_store_logger.info("Elasticsearch sdk version: "+str(elasticsearch.__version__))
logger.info("Elasticsearch sdk version: "+str(elasticsearch.__version__))
@singleton
@ -34,10 +34,10 @@ class ESConnection(DocStoreConnection):
)
if self.es:
self.info = self.es.info()
doc_store_logger.info("Connect to es.")
logger.info("Connect to es.")
break
except Exception as e:
doc_store_logger.error("Fail to connect to es: " + str(e))
except Exception:
logger.exception("Fail to connect to es")
time.sleep(1)
if not self.es.ping():
raise Exception("Can't connect to ES cluster")
@ -70,14 +70,14 @@ class ESConnection(DocStoreConnection):
return IndicesClient(self.es).create(index=indexName,
settings=self.mapping["settings"],
mappings=self.mapping["mappings"])
except Exception as e:
doc_store_logger.error("ES create index error %s ----%s" % (indexName, str(e)))
except Exception:
logger.exception("ES create index error %s" % (indexName))
def deleteIdx(self, indexName: str, knowledgebaseId: str):
try:
return self.es.indices.delete(indexName, allow_no_indices=True)
except Exception as e:
doc_store_logger.error("ES delete index error %s ----%s" % (indexName, str(e)))
except Exception:
logger.exception("ES delete index error %s" % (indexName))
def indexExist(self, indexName: str, knowledgebaseId: str) -> bool:
s = Index(indexName, self.es)
@ -85,7 +85,7 @@ class ESConnection(DocStoreConnection):
try:
return s.exists()
except Exception as e:
doc_store_logger.error("ES indexExist: " + str(e))
logger.exception("ES indexExist")
if str(e).find("Timeout") > 0 or str(e).find("Conflict") > 0:
continue
return False
@ -159,7 +159,7 @@ class ESConnection(DocStoreConnection):
if limit > 0:
s = s[offset:limit]
q = s.to_dict()
doc_store_logger.info("ESConnection.search [Q]: " + json.dumps(q))
# logger.info("ESConnection.search [Q]: " + json.dumps(q))
for i in range(3):
try:
@ -171,18 +171,14 @@ class ESConnection(DocStoreConnection):
_source=True)
if str(res.get("timed_out", "")).lower() == "true":
raise Exception("Es Timeout.")
doc_store_logger.info("ESConnection.search res: " + str(res))
logger.info("ESConnection.search res: " + str(res))
return res
except Exception as e:
doc_store_logger.error(
"ES search exception: " +
str(e) +
"\n[Q]: " +
str(q))
logger.exception("ES search [Q]: " + str(q))
if str(e).find("Timeout") > 0:
continue
raise e
doc_store_logger.error("ES search timeout for 3 times!")
logger.error("ES search timeout for 3 times!")
raise Exception("ES search timeout.")
def get(self, chunkId: str, indexName: str, knowledgebaseIds: list[str]) -> dict | None:
@ -198,15 +194,11 @@ class ESConnection(DocStoreConnection):
chunk["id"] = chunkId
return chunk
except Exception as e:
doc_store_logger.error(
"ES get exception: " +
str(e) +
"[Q]: " +
chunkId)
logger.exception(f"ES get({chunkId}) got exception")
if str(e).find("Timeout") > 0:
continue
raise e
doc_store_logger.error("ES search timeout for 3 times!")
logger.error("ES search timeout for 3 times!")
raise Exception("ES search timeout.")
def insert(self, documents: list[dict], indexName: str, knowledgebaseId: str) -> list[str]:
@ -236,7 +228,7 @@ class ESConnection(DocStoreConnection):
res.append(str(item[action]["_id"]) + ":" + str(item[action]["error"]))
return res
except Exception as e:
doc_store_logger.warning("Fail to bulk: " + str(e))
logger.warning("Fail to bulk: " + str(e))
if re.search(r"(Timeout|time out)", str(e), re.IGNORECASE):
time.sleep(3)
continue
@ -253,9 +245,7 @@ class ESConnection(DocStoreConnection):
self.es.update(index=indexName, id=chunkId, doc=doc)
return True
except Exception as e:
doc_store_logger.error(
"ES update exception: " + str(e) + " id:" + str(id) +
json.dumps(newValue, ensure_ascii=False))
logger.exception(f"ES failed to update(index={indexName}, id={id}, doc={json.dumps(condition, ensure_ascii=False)})")
if str(e).find("Timeout") > 0:
continue
else:
@ -292,8 +282,7 @@ class ESConnection(DocStoreConnection):
_ = ubq.execute()
return True
except Exception as e:
doc_store_logger.error("ES update exception: " +
str(e) + "[Q]:" + str(bqry.to_dict()))
logger.error("ES update exception: " + str(e) + "[Q]:" + str(bqry.to_dict()))
if str(e).find("Timeout") > 0 or str(e).find("Conflict") > 0:
continue
return False
@ -315,7 +304,7 @@ class ESConnection(DocStoreConnection):
qry.must.append(Q("term", **{k: v}))
else:
raise Exception("Condition value must be int, str or list.")
doc_store_logger.info("ESConnection.delete [Q]: " + json.dumps(qry.to_dict()))
logger.info("ESConnection.delete [Q]: " + json.dumps(qry.to_dict()))
for _ in range(10):
try:
res = self.es.delete_by_query(
@ -324,7 +313,7 @@ class ESConnection(DocStoreConnection):
refresh=True)
return res["deleted"]
except Exception as e:
doc_store_logger.warning("Fail to delete: " + str(filter) + str(e))
logger.warning("Fail to delete: " + str(filter) + str(e))
if re.search(r"(Timeout|time out)", str(e), re.IGNORECASE):
time.sleep(3)
continue
@ -407,7 +396,7 @@ class ESConnection(DocStoreConnection):
SQL
"""
def sql(self, sql: str, fetch_size: int, format: str):
doc_store_logger.info(f"ESConnection.sql get sql: {sql}")
logger.info(f"ESConnection.sql get sql: {sql}")
sql = re.sub(r"[ `]+", " ", sql)
sql = sql.replace("%", "")
replaces = []
@ -424,17 +413,17 @@ class ESConnection(DocStoreConnection):
for p, r in replaces:
sql = sql.replace(p, r, 1)
doc_store_logger.info(f"ESConnection.sql to es: {sql}")
logger.info(f"ESConnection.sql to es: {sql}")
for i in range(3):
try:
res = self.es.sql.query(body={"query": sql, "fetch_size": fetch_size}, format=format, request_timeout="2s")
return res
except ConnectionTimeout:
doc_store_logger.error("ESConnection.sql timeout [Q]: " + sql)
logger.exception("ESConnection.sql timeout [Q]: " + sql)
continue
except Exception as e:
doc_store_logger.error(f"ESConnection.sql failure: {sql} => " + str(e))
except Exception:
logger.exception("ESConnection.sql got exception [Q]: " + sql)
return None
doc_store_logger.error("ESConnection.sql timeout for 3 times!")
logger.error("ESConnection.sql timeout for 3 times!")
return None

View File

@ -7,7 +7,7 @@ from infinity.common import ConflictType, InfinityException
from infinity.index import IndexInfo, IndexType
from infinity.connection_pool import ConnectionPool
from rag import settings
from rag.settings import doc_store_logger
from api.utils.log_utils import logger
from rag.utils import singleton
import polars as pl
from polars.series.series import Series
@ -22,7 +22,6 @@ from rag.utils.doc_store_conn import (
OrderByExpr,
)
def equivalent_condition_to_str(condition: dict) -> str:
assert "_id" not in condition
cond = list()
@ -56,7 +55,7 @@ class InfinityConnection(DocStoreConnection):
host, port = infinity_uri.split(":")
infinity_uri = infinity.common.NetworkAddress(host, int(port))
self.connPool = ConnectionPool(infinity_uri)
doc_store_logger.info(f"Connected to infinity {infinity_uri}.")
logger.info(f"Connected to infinity {infinity_uri}.")
"""
Database operations
@ -71,7 +70,7 @@ class InfinityConnection(DocStoreConnection):
TODO: Infinity-sdk provides health() to wrap `show global variables` and `show tables`
"""
inf_conn = self.connPool.get_conn()
res = infinity.show_current_node()
res = inf_conn.show_current_node()
self.connPool.release_conn(inf_conn)
color = "green" if res.error_code == 0 else "red"
res2 = {
@ -132,7 +131,7 @@ class InfinityConnection(DocStoreConnection):
)
break
self.connPool.release_conn(inf_conn)
doc_store_logger.info(
logger.info(
f"INFINITY created table {table_name}, vector size {vectorSize}"
)
@ -142,7 +141,7 @@ class InfinityConnection(DocStoreConnection):
db_instance = inf_conn.get_database(self.dbName)
db_instance.drop_table(table_name, ConflictType.Ignore)
self.connPool.release_conn(inf_conn)
doc_store_logger.info(f"INFINITY dropped table {table_name}")
logger.info(f"INFINITY dropped table {table_name}")
def indexExist(self, indexName: str, knowledgebaseId: str) -> bool:
table_name = f"{indexName}_{knowledgebaseId}"
@ -152,8 +151,8 @@ class InfinityConnection(DocStoreConnection):
_ = db_instance.get_table(table_name)
self.connPool.release_conn(inf_conn)
return True
except Exception as e:
doc_store_logger.error("INFINITY indexExist: " + str(e))
except Exception:
logger.exception("INFINITY indexExist")
return False
"""
@ -263,7 +262,7 @@ class InfinityConnection(DocStoreConnection):
df_list.append(kb_res)
self.connPool.release_conn(inf_conn)
res = pl.concat(df_list)
doc_store_logger.info("INFINITY search tables: " + str(table_list))
logger.info("INFINITY search tables: " + str(table_list))
return res
def get(
@ -318,8 +317,8 @@ class InfinityConnection(DocStoreConnection):
str_filter = f"id IN ({str_ids})"
table_instance.delete(str_filter)
# for doc in documents:
# doc_store_logger.info(f"insert position_list: {doc['position_list']}")
# doc_store_logger.info(f"InfinityConnection.insert {json.dumps(documents)}")
# logger.info(f"insert position_list: {doc['position_list']}")
# logger.info(f"InfinityConnection.insert {json.dumps(documents)}")
table_instance.insert(documents)
self.connPool.release_conn(inf_conn)
doc_store_logger.info(f"inserted into {table_name} {str_ids}.")
@ -329,7 +328,7 @@ class InfinityConnection(DocStoreConnection):
self, condition: dict, newValue: dict, indexName: str, knowledgebaseId: str
) -> bool:
# if 'position_list' in newValue:
# doc_store_logger.info(f"update position_list: {newValue['position_list']}")
# logger.info(f"upsert position_list: {newValue['position_list']}")
inf_conn = self.connPool.get_conn()
db_instance = inf_conn.get_database(self.dbName)
table_name = f"{indexName}_{knowledgebaseId}"
@ -350,7 +349,7 @@ class InfinityConnection(DocStoreConnection):
try:
table_instance = db_instance.get_table(table_name)
except Exception:
doc_store_logger.warning(
logger.warning(
f"Skipped deleting `{filter}` from table {table_name} since the table doesn't exist."
)
return 0

View File

@ -1,10 +1,9 @@
import os
import time
from minio import Minio
from io import BytesIO
from rag import settings
from rag.settings import minio_logger
from rag.utils import singleton
from api.utils.log_utils import logger
@singleton
@ -17,7 +16,7 @@ class RAGFlowMinio(object):
try:
if self.conn:
self.__close__()
except Exception as e:
except Exception:
pass
try:
@ -26,9 +25,9 @@ class RAGFlowMinio(object):
secret_key=settings.MINIO["password"],
secure=False
)
except Exception as e:
minio_logger.error(
"Fail to connect %s " % settings.MINIO["host"] + str(e))
except Exception:
logger.exception(
"Fail to connect %s " % settings.MINIO["host"])
def __close__(self):
del self.conn
@ -55,24 +54,24 @@ class RAGFlowMinio(object):
len(binary)
)
return r
except Exception as e:
minio_logger.error(f"Fail put {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
self.__open__()
time.sleep(1)
def rm(self, bucket, fnm):
try:
self.conn.remove_object(bucket, fnm)
except Exception as e:
minio_logger.error(f"Fail rm {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
def get(self, bucket, fnm):
for _ in range(1):
try:
r = self.conn.get_object(bucket, fnm)
return r.read()
except Exception as e:
minio_logger.error(f"fail get {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
self.__open__()
time.sleep(1)
return
@ -81,8 +80,8 @@ class RAGFlowMinio(object):
try:
if self.conn.stat_object(bucket, fnm):return True
return False
except Exception as e:
minio_logger.error(f"Fail put {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
return False
@ -90,8 +89,8 @@ class RAGFlowMinio(object):
for _ in range(10):
try:
return self.conn.get_presigned_url("GET", bucket, fnm, expires)
except Exception as e:
minio_logger.error(f"fail get {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}:")
self.__open__()
time.sleep(1)
return

View File

@ -110,9 +110,8 @@ class RedisDB:
#pipeline.expire(queue, exp)
pipeline.execute()
return True
except Exception as e:
print(e)
logging.warning("[EXCEPTION]producer" + str(queue) + "||" + str(e))
except Exception:
logging.exception("producer" + str(queue) + " got exception")
return False
def queue_consumer(self, queue_name, group_name, consumer_name, msg_id=b">") -> Payload:
@ -143,7 +142,7 @@ class RedisDB:
if 'key' in str(e):
pass
else:
logging.warning("[EXCEPTION]consumer: " + str(queue_name) + "||" + str(e))
logging.exception("consumer: " + str(queue_name) + " got exception")
return None
def get_unacked_for(self, consumer_name, queue_name, group_name):
@ -160,7 +159,7 @@ class RedisDB:
except Exception as e:
if 'key' in str(e):
return
logging.warning("[EXCEPTION]xpending_range: " + consumer_name + "||" + str(e))
logging.exception("xpending_range: " + consumer_name + " got exception")
self.__open__()
REDIS_CONN = RedisDB()

View File

@ -4,7 +4,6 @@ from botocore.exceptions import ClientError
from botocore.client import Config
import time
from io import BytesIO
from rag.settings import s3_logger
from rag.utils import singleton
@singleton
@ -21,7 +20,7 @@ class RAGFlowS3(object):
try:
if self.conn:
self.__close__()
except Exception as e:
except Exception:
pass
try:
@ -40,9 +39,9 @@ class RAGFlowS3(object):
aws_secret_access_key=self.secret_key,
config=config
)
except Exception as e:
s3_logger.error(
"Fail to connect %s " % self.endpoint + str(e))
except Exception:
logger.exception(
"Fail to connect %s" % self.endpoint)
def __close__(self):
del self.conn
@ -50,11 +49,11 @@ class RAGFlowS3(object):
def bucket_exists(self, bucket):
try:
s3_logger.error(f"head_bucket bucketname {bucket}")
logger.debug(f"head_bucket bucketname {bucket}")
self.conn.head_bucket(Bucket=bucket)
exists = True
except ClientError as e:
s3_logger.error(f"head_bucket error {bucket}: " + str(e))
except ClientError:
logger.exception(f"head_bucket error {bucket}")
exists = False
return exists
@ -63,7 +62,7 @@ class RAGFlowS3(object):
if not self.bucket_exists(bucket):
self.conn.create_bucket(Bucket=bucket)
s3_logger.error(f"create bucket {bucket} ********")
logger.debug(f"create bucket {bucket} ********")
r = self.conn.upload_fileobj(BytesIO(binary), bucket, fnm)
return r
@ -75,25 +74,25 @@ class RAGFlowS3(object):
return []
def put(self, bucket, fnm, binary):
s3_logger.error(f"bucket name {bucket}; filename :{fnm}:")
logger.debug(f"bucket name {bucket}; filename :{fnm}:")
for _ in range(1):
try:
if not self.bucket_exists(bucket):
self.conn.create_bucket(Bucket=bucket)
s3_logger.error(f"create bucket {bucket} ********")
logger.info(f"create bucket {bucket} ********")
r = self.conn.upload_fileobj(BytesIO(binary), bucket, fnm)
return r
except Exception as e:
s3_logger.error(f"Fail put {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail put {bucket}/{fnm}")
self.__open__()
time.sleep(1)
def rm(self, bucket, fnm):
try:
self.conn.delete_object(Bucket=bucket, Key=fnm)
except Exception as e:
s3_logger.error(f"Fail rm {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"Fail rm {bucket}/{fnm}")
def get(self, bucket, fnm):
for _ in range(1):
@ -101,8 +100,8 @@ class RAGFlowS3(object):
r = self.conn.get_object(Bucket=bucket, Key=fnm)
object_data = r['Body'].read()
return object_data
except Exception as e:
s3_logger.error(f"fail get {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"fail get {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return
@ -128,8 +127,8 @@ class RAGFlowS3(object):
ExpiresIn=expires)
return r
except Exception as e:
s3_logger.error(f"fail get url {bucket}/{fnm}: " + str(e))
except Exception:
logger.exception(f"fail get url {bucket}/{fnm}")
self.__open__()
time.sleep(1)
return