From 312f1a04773e32f0425b7beb2c9e7cd29eec1c3c Mon Sep 17 00:00:00 2001 From: Kevin Hu Date: Wed, 20 Aug 2025 17:29:15 +0800 Subject: [PATCH] Fix: enlarge raptor timeout limits. (#9600) ### What problem does this PR solve? ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue) --- rag/app/naive.py | 2 +- rag/raptor.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rag/app/naive.py b/rag/app/naive.py index 173ef0aef..8c0708173 100644 --- a/rag/app/naive.py +++ b/rag/app/naive.py @@ -289,7 +289,7 @@ class Pdf(PdfParser): return [(b["text"], self._line_tag(b, zoomin)) for b in self.boxes], tbls, figures else: tbls = self._extract_table_figure(True, zoomin, True, True) - # self._naive_vertical_merge() + self._naive_vertical_merge() self._concat_downward() # self._filter_forpages() logging.info("layouts cost: {}s".format(timer() - first_start)) diff --git a/rag/raptor.py b/rag/raptor.py index 961c5ab13..f2fbecd72 100644 --- a/rag/raptor.py +++ b/rag/raptor.py @@ -42,7 +42,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval: self._prompt = prompt self._max_token = max_token - @timeout(60) + @timeout(60*3) async def _chat(self, system, history, gen_conf): response = get_llm_cache(self._llm_model.llm_name, system, history, gen_conf) if response: @@ -86,7 +86,7 @@ class RecursiveAbstractiveProcessing4TreeOrganizedRetrieval: layers = [(0, len(chunks))] start, end = 0, len(chunks) - @timeout(60) + @timeout(60*3) async def summarize(ck_idx: list[int]): nonlocal chunks texts = [chunks[i][0] for i in ck_idx]