From 5fa6f2f151c4ff39aa9e9175e97f0778c1fbc2be Mon Sep 17 00:00:00 2001 From: Stephen Hu Date: Tue, 15 Jul 2025 14:04:58 +0800 Subject: [PATCH] Update embedding_model.py (#8836) ### What problem does this PR solve? Remove useless covert for bge encode_queries ### Type of change - [x] Performance Improvement --- rag/llm/embedding_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index 16a415bf7..98f9bcbc1 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -114,7 +114,7 @@ class DefaultEmbedding(Base): def encode_queries(self, text: str): token_count = num_tokens_from_string(text) - return self._model.encode_queries([text]).tolist()[0], token_count + return self._model.encode_queries([text], convert_to_numpy=False)[0][0].cpu().numpy(), token_count class OpenAIEmbed(Base):