mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 12:32:30 +08:00
### What problem does this PR solve? Fixes #10933 This PR fixes a `TypeError` in the Gemini model provider where the `total_token_count_from_response()` function could receive a `None` response object, causing the error: TypeError: argument of type 'NoneType' is not iterable **Root Cause:** The function attempted to use the `in` operator to check dictionary keys (lines 48, 54, 60) without first validating that `resp` was not `None`. When Gemini's `chat_streamly()` method returns `None`, this triggers the error. **Solution:** 1. Added a null check at the beginning of the function to return `0` if `resp is None` 2. Added `isinstance(resp, dict)` checks before all `in` operations to ensure type safety 3. This defensive programming approach prevents the TypeError while maintaining backward compatibility ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue) ### Changes Made **File:** `rag/utils/__init__.py` - Line 36-38: Added `if resp is None: return 0` check - Line 52: Added `isinstance(resp, dict)` before `'usage' in resp` - Line 58: Added `isinstance(resp, dict)` before `'usage' in resp` - Line 64: Added `isinstance(resp, dict)` before `'meta' in resp` ### Testing - [x] Code compiles without errors - [x] Follows existing code style and conventions - [x] Change is minimal and focused on the specific issue ### Additional Notes This fix ensures robust handling of various response types from LLM providers, particularly Gemini, w --------- Signed-off-by: Zhang Zhefang <zhangzhefang@example.com>
83 lines
2.7 KiB
Python
83 lines
2.7 KiB
Python
#
|
|
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
|
|
|
|
import os
|
|
import tiktoken
|
|
|
|
from common.file_utils import get_project_base_directory
|
|
|
|
tiktoken_cache_dir = get_project_base_directory()
|
|
os.environ["TIKTOKEN_CACHE_DIR"] = tiktoken_cache_dir
|
|
# encoder = tiktoken.encoding_for_model("gpt-3.5-turbo")
|
|
encoder = tiktoken.get_encoding("cl100k_base")
|
|
|
|
|
|
def num_tokens_from_string(string: str) -> int:
|
|
"""Returns the number of tokens in a text string."""
|
|
try:
|
|
code_list = encoder.encode(string)
|
|
return len(code_list)
|
|
except Exception:
|
|
return 0
|
|
|
|
def total_token_count_from_response(resp):
|
|
"""
|
|
Extract token count from LLM response in various formats.
|
|
|
|
Handles None responses and different response structures from various LLM providers.
|
|
Returns 0 if token count cannot be determined.
|
|
"""
|
|
if resp is None:
|
|
return 0
|
|
|
|
if hasattr(resp, "usage") and hasattr(resp.usage, "total_tokens"):
|
|
try:
|
|
return resp.usage.total_tokens
|
|
except Exception:
|
|
pass
|
|
|
|
if hasattr(resp, "usage_metadata") and hasattr(resp.usage_metadata, "total_tokens"):
|
|
try:
|
|
return resp.usage_metadata.total_tokens
|
|
except Exception:
|
|
pass
|
|
|
|
if isinstance(resp, dict) and 'usage' in resp and 'total_tokens' in resp['usage']:
|
|
try:
|
|
return resp["usage"]["total_tokens"]
|
|
except Exception:
|
|
pass
|
|
|
|
if isinstance(resp, dict) and 'usage' in resp and 'input_tokens' in resp['usage'] and 'output_tokens' in resp['usage']:
|
|
try:
|
|
return resp["usage"]["input_tokens"] + resp["usage"]["output_tokens"]
|
|
except Exception:
|
|
pass
|
|
|
|
if isinstance(resp, dict) and 'meta' in resp and 'tokens' in resp['meta'] and 'input_tokens' in resp['meta']['tokens'] and 'output_tokens' in resp['meta']['tokens']:
|
|
try:
|
|
return resp["meta"]["tokens"]["input_tokens"] + resp["meta"]["tokens"]["output_tokens"]
|
|
except Exception:
|
|
pass
|
|
return 0
|
|
|
|
|
|
def truncate(string: str, max_len: int) -> str:
|
|
"""Returns truncated text if the length of text exceed max_len."""
|
|
return encoder.decode(encoder.encode(string)[:max_len])
|
|
|