optimize srv broker and executor logic (#630)

### What problem does this PR solve?

Optimize task broker and executor for reduce memory usage and deployment
complexity.

### Type of change
- [x] Performance Improvement
- [x] Refactoring

### Change Log
- Enhance redis utils for message queue(use stream)
- Modify task broker logic via message queue (1.get parse event from
message queue 2.use ThreadPoolExecutor async executor )
- Modify the table column name of document and task (process_duation ->
process_duration maybe just a spelling mistake)
- Reformat some code style(just what i see)
- Add requirement_dev.txt for developer
- Add redis container on docker compose

---------

Co-authored-by: Kevin Hu <kevinhu.sh@gmail.com>
This commit is contained in:
Fakai Zhao
2024-05-07 11:43:33 +08:00
committed by GitHub
parent c6b6c748ae
commit de839fc3f0
20 changed files with 414 additions and 300 deletions

View File

@ -95,8 +95,7 @@ class OpenAIEmbed(Base):
def encode(self, texts: list, batch_size=32):
res = self.client.embeddings.create(input=texts,
model=self.model_name)
return np.array([d.embedding for d in res.data]
), res.usage.total_tokens
return np.array([d.embedding for d in res.data]), res.usage.total_tokens
def encode_queries(self, text):
res = self.client.embeddings.create(input=[text],

View File

@ -9,12 +9,11 @@ from elasticsearch_dsl import Q
from rag.nlp import rag_tokenizer, term_weight, synonym
class EsQueryer:
def __init__(self, es):
self.tw = term_weight.Dealer()
self.es = es
self.syn = synonym.Dealer(None)
self.syn = synonym.Dealer()
self.flds = ["ask_tks^10", "ask_small_tks"]
@staticmethod

View File

@ -47,3 +47,9 @@ cron_logger = getLogger("cron_logger")
cron_logger.setLevel(20)
chunk_logger = getLogger("chunk_logger")
database_logger = getLogger("database")
SVR_QUEUE_NAME = "rag_flow_svr_queue"
SVR_QUEUE_RETENTION = 60*60
SVR_QUEUE_MAX_LEN = 1024
SVR_CONSUMER_NAME = "rag_flow_svr_consumer"
SVR_CONSUMER_GROUP_NAME = "rag_flow_svr_consumer_group"

View File

@ -1,189 +0,0 @@
#
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import time
import random
from datetime import datetime
from api.db.db_models import Task
from api.db.db_utils import bulk_insert_into_db
from api.db.services.file2document_service import File2DocumentService
from api.db.services.task_service import TaskService
from deepdoc.parser import PdfParser
from deepdoc.parser.excel_parser import RAGFlowExcelParser
from rag.settings import cron_logger
from rag.utils.minio_conn import MINIO
from rag.utils import findMaxTm
import pandas as pd
from api.db import FileType, TaskStatus
from api.db.services.document_service import DocumentService
from api.settings import database_logger
from api.utils import get_format_time, get_uuid
from api.utils.file_utils import get_project_base_directory
from rag.utils.redis_conn import REDIS_CONN
from api.db.db_models import init_database_tables as init_web_db
from api.db.init_data import init_web_data
def collect(tm):
docs = DocumentService.get_newly_uploaded(tm)
if len(docs) == 0:
return pd.DataFrame()
docs = pd.DataFrame(docs)
mtm = docs["update_time"].max()
cron_logger.info("TOTAL:{}, To:{}".format(len(docs), mtm))
return docs
def set_dispatching(docid):
try:
DocumentService.update_by_id(
docid, {"progress": random.random() * 1 / 100.,
"progress_msg": "Task dispatched...",
"process_begin_at": get_format_time()
})
except Exception as e:
cron_logger.error("set_dispatching:({}), {}".format(docid, str(e)))
def dispatch():
tm_fnm = os.path.join(
get_project_base_directory(),
"rag/res",
f"broker.tm")
tm = findMaxTm(tm_fnm)
rows = collect(tm)
if len(rows) == 0:
return
tmf = open(tm_fnm, "a+")
for _, r in rows.iterrows():
try:
tsks = TaskService.query(doc_id=r["id"])
if tsks:
for t in tsks:
TaskService.delete_by_id(t.id)
except Exception as e:
cron_logger.exception(e)
def new_task():
nonlocal r
return {
"id": get_uuid(),
"doc_id": r["id"]
}
tsks = []
try:
bucket, name = File2DocumentService.get_minio_address(doc_id=r["id"])
file_bin = MINIO.get(bucket, name)
if r["type"] == FileType.PDF.value:
do_layout = r["parser_config"].get("layout_recognize", True)
pages = PdfParser.total_page_number(r["name"], file_bin)
page_size = r["parser_config"].get("task_page_size", 12)
if r["parser_id"] == "paper":
page_size = r["parser_config"].get("task_page_size", 22)
if r["parser_id"] == "one":
page_size = 1000000000
if not do_layout:
page_size = 1000000000
page_ranges = r["parser_config"].get("pages")
if not page_ranges:
page_ranges = [(1, 100000)]
for s, e in page_ranges:
s -= 1
s = max(0, s)
e = min(e - 1, pages)
for p in range(s, e, page_size):
task = new_task()
task["from_page"] = p
task["to_page"] = min(p + page_size, e)
tsks.append(task)
elif r["parser_id"] == "table":
rn = RAGFlowExcelParser.row_number(
r["name"], file_bin)
for i in range(0, rn, 3000):
task = new_task()
task["from_page"] = i
task["to_page"] = min(i + 3000, rn)
tsks.append(task)
else:
tsks.append(new_task())
bulk_insert_into_db(Task, tsks, True)
set_dispatching(r["id"])
except Exception as e:
cron_logger.exception(e)
tmf.write(str(r["update_time"]) + "\n")
tmf.close()
def update_progress():
docs = DocumentService.get_unfinished_docs()
for d in docs:
try:
tsks = TaskService.query(doc_id=d["id"], order_by=Task.create_time)
if not tsks:
continue
msg = []
prg = 0
finished = True
bad = 0
status = TaskStatus.RUNNING.value
for t in tsks:
if 0 <= t.progress < 1:
finished = False
prg += t.progress if t.progress >= 0 else 0
msg.append(t.progress_msg)
if t.progress == -1:
bad += 1
prg /= len(tsks)
if finished and bad:
prg = -1
status = TaskStatus.FAIL.value
elif finished:
status = TaskStatus.DONE.value
msg = "\n".join(msg)
info = {
"process_duation": datetime.timestamp(
datetime.now()) -
d["process_begin_at"].timestamp(),
"run": status}
if prg != 0:
info["progress"] = prg
if msg:
info["progress_msg"] = msg
DocumentService.update_by_id(d["id"], info)
except Exception as e:
cron_logger.error("fetch task exception:" + str(e))
if __name__ == "__main__":
peewee_logger = logging.getLogger('peewee')
peewee_logger.propagate = False
peewee_logger.addHandler(database_logger.handlers[0])
peewee_logger.setLevel(database_logger.level)
# init db
init_web_db()
init_web_data()
while True:
dispatch()
time.sleep(1)
update_progress()

View File

@ -28,7 +28,7 @@ from functools import partial
from api.db.services.file2document_service import File2DocumentService
from rag.utils.minio_conn import MINIO
from api.db.db_models import close_connection
from rag.settings import database_logger
from rag.settings import database_logger, SVR_QUEUE_NAME
from rag.settings import cron_logger, DOC_MAXIMUM_SIZE
from multiprocessing import Pool
import numpy as np
@ -93,20 +93,29 @@ def set_progress(task_id, from_page=0, to_page=-1,
sys.exit()
def collect(comm, mod, tm):
tasks = TaskService.get_tasks(tm, mod, comm)
#print(tasks)
if len(tasks) == 0:
time.sleep(1)
def collect():
try:
payload = REDIS_CONN.queue_consumer(SVR_QUEUE_NAME, "rag_flow_svr_task_broker", "rag_flow_svr_task_consumer")
if not payload:
time.sleep(1)
return pd.DataFrame()
except Exception as e:
cron_logger.error("Get task event from queue exception:" + str(e))
return pd.DataFrame()
msg = payload.get_message()
payload.ack()
if not msg: return pd.DataFrame()
if TaskService.do_cancel(msg["id"]):
return pd.DataFrame()
tasks = TaskService.get_tasks(msg["id"])
assert tasks, "{} empty task!".format(msg["id"])
tasks = pd.DataFrame(tasks)
mtm = tasks["update_time"].max()
cron_logger.info("TOTAL:{}, To:{}".format(len(tasks), mtm))
return tasks
def get_minio_binary(bucket, name):
global MINIO
return MINIO.get(bucket, name)
@ -122,13 +131,10 @@ def build(row):
row["from_page"],
row["to_page"])
chunker = FACTORY[row["parser_id"].lower()]
pool = Pool(processes=1)
try:
st = timer()
bucket, name = File2DocumentService.get_minio_address(doc_id=row["doc_id"])
thr = pool.apply_async(get_minio_binary, args=(bucket, name))
binary = thr.get(timeout=90)
pool.terminate()
binary = get_minio_binary(bucket, name)
cron_logger.info(
"From minio({}) {}/{}".format(timer()-st, row["location"], row["name"]))
cks = chunker.chunk(row["name"], binary=binary, from_page=row["from_page"],
@ -147,7 +153,6 @@ def build(row):
else:
callback(-1, f"Internal server error: %s" %
str(e).replace("'", ""))
pool.terminate()
traceback.print_exc()
cron_logger.error(
@ -238,20 +243,13 @@ def embedding(docs, mdl, parser_config={}, callback=None):
return tk_count
def main(comm, mod):
tm_fnm = os.path.join(
get_project_base_directory(),
"rag/res",
f"{comm}-{mod}.tm")
tm = findMaxTm(tm_fnm)
rows = collect(comm, mod, tm)
def main():
rows = collect()
if len(rows) == 0:
return
tmf = open(tm_fnm, "a+")
for _, r in rows.iterrows():
callback = partial(set_progress, r["id"], r["from_page"], r["to_page"])
#callback(random.random()/10., "Task has been received.")
try:
embd_mdl = LLMBundle(r["tenant_id"], LLMType.EMBEDDING, llm_name=r["embd_id"], lang=r["language"])
except Exception as e:
@ -265,7 +263,6 @@ def main(comm, mod):
if cks is None:
continue
if not cks:
tmf.write(str(r["update_time"]) + "\n")
callback(1., "No chunk! Done!")
continue
# TODO: exception handler
@ -305,8 +302,6 @@ def main(comm, mod):
"Chunk doc({}), token({}), chunks({}), elapsed:{}".format(
r["id"], tk_count, len(cks), timer()-st))
tmf.write(str(r["update_time"]) + "\n")
tmf.close()
if __name__ == "__main__":
@ -315,8 +310,6 @@ if __name__ == "__main__":
peewee_logger.addHandler(database_logger.handlers[0])
peewee_logger.setLevel(database_logger.level)
#from mpi4py import MPI
#comm = MPI.COMM_WORLD
while True:
main(int(sys.argv[2]), int(sys.argv[1]))
main()
close_connection()

View File

@ -5,6 +5,27 @@ import logging
from rag import settings
from rag.utils import singleton
class Payload:
def __init__(self, consumer, queue_name, group_name, msg_id, message):
self.__consumer = consumer
self.__queue_name = queue_name
self.__group_name = group_name
self.__msg_id = msg_id
self.__message = json.loads(message['message'])
def ack(self):
try:
self.__consumer.xack(self.__queue_name, self.__group_name, self.__msg_id)
return True
except Exception as e:
logging.warning("[EXCEPTION]ack" + str(self.__queue_name) + "||" + str(e))
return False
def get_message(self):
return self.__message
@singleton
class RedisDB:
def __init__(self):
@ -17,7 +38,8 @@ class RedisDB:
self.REDIS = redis.StrictRedis(host=self.config["host"].split(":")[0],
port=int(self.config.get("host", ":6379").split(":")[1]),
db=int(self.config.get("db", 1)),
password=self.config["password"])
password=self.config.get("password"),
decode_responses=True)
except Exception as e:
logging.warning("Redis can't be connected.")
return self.REDIS
@ -70,5 +92,48 @@ class RedisDB:
self.__open__()
return False
def queue_product(self, queue, message, exp=settings.SVR_QUEUE_RETENTION) -> bool:
try:
payload = {"message": json.dumps(message)}
pipeline = self.REDIS.pipeline()
pipeline.xadd(queue, payload)
pipeline.expire(queue, exp)
pipeline.execute()
return True
except Exception as e:
logging.warning("[EXCEPTION]producer" + str(queue) + "||" + str(e))
return False
REDIS_CONN = RedisDB()
def queue_consumer(self, queue_name, group_name, consumer_name, msg_id=b">") -> Payload:
try:
group_info = self.REDIS.xinfo_groups(queue_name)
if not any(e["name"] == group_name for e in group_info):
self.REDIS.xgroup_create(
queue_name,
group_name,
id="$",
mkstream=True
)
args = {
"groupname": group_name,
"consumername": consumer_name,
"count": 1,
"block": 10000,
"streams": {queue_name: msg_id},
}
messages = self.REDIS.xreadgroup(**args)
if not messages:
return None
stream, element_list = messages[0]
msg_id, payload = element_list[0]
res = Payload(self.REDIS, queue_name, group_name, msg_id, payload)
return res
except Exception as e:
if 'key' in str(e):
pass
else:
logging.warning("[EXCEPTION]consumer" + str(queue_name) + "||" + str(e))
return None
REDIS_CONN = RedisDB()