mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 20:42:30 +08:00
Feat: change default models (#7777)
### What problem does this PR solve? change default models to buildin models https://github.com/infiniflow/ragflow/issues/7774 ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
@ -14,8 +14,9 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from ragflow_sdk import RAGFlow
|
||||
from common import HOST_ADDRESS
|
||||
from ragflow_sdk import RAGFlow
|
||||
from ragflow_sdk.modules.chat import Chat
|
||||
|
||||
|
||||
def test_create_chat_with_name(get_api_key_fixture):
|
||||
@ -31,7 +32,18 @@ def test_create_chat_with_name(get_api_key_fixture):
|
||||
docs = kb.upload_documents(documents)
|
||||
for doc in docs:
|
||||
doc.add_chunk("This is a test to add chunk")
|
||||
rag.create_chat("test_create_chat", dataset_ids=[kb.id])
|
||||
llm = Chat.LLM(
|
||||
rag,
|
||||
{
|
||||
"model_name": "glm-4-flash@ZHIPU-AI",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3,
|
||||
"presence_penalty": 0.4,
|
||||
"frequency_penalty": 0.7,
|
||||
"max_tokens": 512,
|
||||
},
|
||||
)
|
||||
rag.create_chat("test_create_chat", dataset_ids=[kb.id], llm=llm)
|
||||
|
||||
|
||||
def test_update_chat_with_name(get_api_key_fixture):
|
||||
@ -47,7 +59,18 @@ def test_update_chat_with_name(get_api_key_fixture):
|
||||
docs = kb.upload_documents(documents)
|
||||
for doc in docs:
|
||||
doc.add_chunk("This is a test to add chunk")
|
||||
chat = rag.create_chat("test_update_chat", dataset_ids=[kb.id])
|
||||
llm = Chat.LLM(
|
||||
rag,
|
||||
{
|
||||
"model_name": "glm-4-flash@ZHIPU-AI",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3,
|
||||
"presence_penalty": 0.4,
|
||||
"frequency_penalty": 0.7,
|
||||
"max_tokens": 512,
|
||||
},
|
||||
)
|
||||
chat = rag.create_chat("test_update_chat", dataset_ids=[kb.id], llm=llm)
|
||||
chat.update({"name": "new_chat"})
|
||||
|
||||
|
||||
@ -64,7 +87,18 @@ def test_delete_chats_with_success(get_api_key_fixture):
|
||||
docs = kb.upload_documents(documents)
|
||||
for doc in docs:
|
||||
doc.add_chunk("This is a test to add chunk")
|
||||
chat = rag.create_chat("test_delete_chat", dataset_ids=[kb.id])
|
||||
llm = Chat.LLM(
|
||||
rag,
|
||||
{
|
||||
"model_name": "glm-4-flash@ZHIPU-AI",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3,
|
||||
"presence_penalty": 0.4,
|
||||
"frequency_penalty": 0.7,
|
||||
"max_tokens": 512,
|
||||
},
|
||||
)
|
||||
chat = rag.create_chat("test_delete_chat", dataset_ids=[kb.id], llm=llm)
|
||||
rag.delete_chats(ids=[chat.id])
|
||||
|
||||
|
||||
@ -81,6 +115,17 @@ def test_list_chats_with_success(get_api_key_fixture):
|
||||
docs = kb.upload_documents(documents)
|
||||
for doc in docs:
|
||||
doc.add_chunk("This is a test to add chunk")
|
||||
rag.create_chat("test_list_1", dataset_ids=[kb.id])
|
||||
rag.create_chat("test_list_2", dataset_ids=[kb.id])
|
||||
llm = Chat.LLM(
|
||||
rag,
|
||||
{
|
||||
"model_name": "glm-4-flash@ZHIPU-AI",
|
||||
"temperature": 0.1,
|
||||
"top_p": 0.3,
|
||||
"presence_penalty": 0.4,
|
||||
"frequency_penalty": 0.7,
|
||||
"max_tokens": 512,
|
||||
},
|
||||
)
|
||||
rag.create_chat("test_list_1", dataset_ids=[kb.id], llm=llm)
|
||||
rag.create_chat("test_list_2", dataset_ids=[kb.id], llm=llm)
|
||||
rag.list_chats()
|
||||
|
||||
Reference in New Issue
Block a user