mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Refa: remove temperature since some LLMs fail to support. (#8981)
### What problem does this PR solve? ### Type of change - [x] Refactoring
This commit is contained in:
@ -105,10 +105,9 @@ class GraphExtractor(Extractor):
|
||||
**self._prompt_variables,
|
||||
self._input_text_key: content,
|
||||
}
|
||||
gen_conf = {"temperature": 0.3}
|
||||
hint_prompt = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
||||
async with chat_limiter:
|
||||
response = await trio.to_thread.run_sync(lambda: self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], gen_conf))
|
||||
response = await trio.to_thread.run_sync(lambda: self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], {}))
|
||||
token_count += num_tokens_from_string(hint_prompt + response)
|
||||
|
||||
results = response or ""
|
||||
@ -118,7 +117,7 @@ class GraphExtractor(Extractor):
|
||||
for i in range(self._max_gleanings):
|
||||
history.append({"role": "user", "content": CONTINUE_PROMPT})
|
||||
async with chat_limiter:
|
||||
response = await trio.to_thread.run_sync(lambda: self._chat("", history, gen_conf))
|
||||
response = await trio.to_thread.run_sync(lambda: self._chat("", history, {}))
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + response)
|
||||
results += response or ""
|
||||
|
||||
|
||||
Reference in New Issue
Block a user