optimize srv broker and executor logic (#630)

### What problem does this PR solve?

Optimize task broker and executor for reduce memory usage and deployment
complexity.

### Type of change
- [x] Performance Improvement
- [x] Refactoring

### Change Log
- Enhance redis utils for message queue(use stream)
- Modify task broker logic via message queue (1.get parse event from
message queue 2.use ThreadPoolExecutor async executor )
- Modify the table column name of document and task (process_duation ->
process_duration maybe just a spelling mistake)
- Reformat some code style(just what i see)
- Add requirement_dev.txt for developer
- Add redis container on docker compose

---------

Co-authored-by: Kevin Hu <kevinhu.sh@gmail.com>
This commit is contained in:
Fakai Zhao
2024-05-07 11:43:33 +08:00
committed by GitHub
parent c6b6c748ae
commit de839fc3f0
20 changed files with 414 additions and 300 deletions

View File

@ -5,6 +5,27 @@ import logging
from rag import settings
from rag.utils import singleton
class Payload:
def __init__(self, consumer, queue_name, group_name, msg_id, message):
self.__consumer = consumer
self.__queue_name = queue_name
self.__group_name = group_name
self.__msg_id = msg_id
self.__message = json.loads(message['message'])
def ack(self):
try:
self.__consumer.xack(self.__queue_name, self.__group_name, self.__msg_id)
return True
except Exception as e:
logging.warning("[EXCEPTION]ack" + str(self.__queue_name) + "||" + str(e))
return False
def get_message(self):
return self.__message
@singleton
class RedisDB:
def __init__(self):
@ -17,7 +38,8 @@ class RedisDB:
self.REDIS = redis.StrictRedis(host=self.config["host"].split(":")[0],
port=int(self.config.get("host", ":6379").split(":")[1]),
db=int(self.config.get("db", 1)),
password=self.config["password"])
password=self.config.get("password"),
decode_responses=True)
except Exception as e:
logging.warning("Redis can't be connected.")
return self.REDIS
@ -70,5 +92,48 @@ class RedisDB:
self.__open__()
return False
def queue_product(self, queue, message, exp=settings.SVR_QUEUE_RETENTION) -> bool:
try:
payload = {"message": json.dumps(message)}
pipeline = self.REDIS.pipeline()
pipeline.xadd(queue, payload)
pipeline.expire(queue, exp)
pipeline.execute()
return True
except Exception as e:
logging.warning("[EXCEPTION]producer" + str(queue) + "||" + str(e))
return False
REDIS_CONN = RedisDB()
def queue_consumer(self, queue_name, group_name, consumer_name, msg_id=b">") -> Payload:
try:
group_info = self.REDIS.xinfo_groups(queue_name)
if not any(e["name"] == group_name for e in group_info):
self.REDIS.xgroup_create(
queue_name,
group_name,
id="$",
mkstream=True
)
args = {
"groupname": group_name,
"consumername": consumer_name,
"count": 1,
"block": 10000,
"streams": {queue_name: msg_id},
}
messages = self.REDIS.xreadgroup(**args)
if not messages:
return None
stream, element_list = messages[0]
msg_id, payload = element_list[0]
res = Payload(self.REDIS, queue_name, group_name, msg_id, payload)
return res
except Exception as e:
if 'key' in str(e):
pass
else:
logging.warning("[EXCEPTION]consumer" + str(queue_name) + "||" + str(e))
return None
REDIS_CONN = RedisDB()