refine templates of graph (#1368)

### What problem does this PR solve?

#918 
### Type of change

- [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
KevinHuSh
2024-07-04 10:33:49 +08:00
committed by GitHub
parent 3ccb62910b
commit 4122695a1a
8 changed files with 984 additions and 252 deletions

View File

@ -36,7 +36,7 @@ def templates():
@login_required
def canvas_list():
return get_json_result(data=sorted([c.to_dict() for c in \
UserCanvasService.query(user_id=current_user.id)], key=lambda x: x["update_time"])
UserCanvasService.query(user_id=current_user.id)], key=lambda x: x["update_time"]*-1)
)
@ -148,7 +148,7 @@ def reset():
req = request.json
try:
user_canvas = UserCanvasService.get_by_id(req["canvas_id"])
canvas = Canvas(req["dsl"], current_user.id)
canvas = Canvas(user_canvas.dsl, current_user.id)
canvas.reset()
req["dsl"] = json.loads(str(canvas))
UserCanvasService.update_by_id(req["canvas_id"], dsl=req["dsl"])

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,7 +1,7 @@
{
"components": {
"begin": {
"obj": {
"obj":{
"component_name": "Begin",
"params": {
"prologue": "Hi! How can I help you?"
@ -16,14 +16,7 @@
"params": {}
},
"downstream": ["categorize:0"],
"upstream": [
"begin",
"generate:casual",
"generate:answer",
"generate:complain",
"generate:ask_contact",
"message:get_contact"
]
"upstream": ["begin", "generate:0", "generate:casual", "generate:answer", "generate:complain", "generate:ask_contact", "message:get_contact"]
},
"categorize:0": {
"obj": {
@ -55,12 +48,7 @@
"message_history_window_size": 8
}
},
"downstream": [
"retrieval:0",
"generate:casual",
"generate:complain",
"message:get_contact"
],
"downstream": ["retrieval:0", "generate:casual", "generate:complain", "message:get_contact"],
"upstream": ["answer:0"]
},
"generate:casual": {
@ -146,7 +134,7 @@
"upstream": ["relevant:0"]
},
"message:get_contact": {
"obj": {
"obj":{
"component_name": "Message",
"params": {
"messages": [
@ -164,6 +152,6 @@
"history": [],
"messages": [],
"path": [],
"reference": {},
"reference": [],
"answer": []
}

View File

@ -205,6 +205,6 @@
"history": [],
"messages": [],
"path": [],
"reference": {},
"reference": [],
"answer": []
}

View File

@ -0,0 +1,39 @@
{
"components": {
"begin": {
"obj":{
"component_name": "Begin",
"params": {
"prologue": "Hi there! Please enter the text you want to translate in format like: 'text you want to translate' => target language. For an example: 您好! => English"
}
},
"downstream": ["answer:0"],
"upstream": []
},
"answer:0": {
"obj": {
"component_name": "Answer",
"params": {}
},
"downstream": ["generate:0"],
"upstream": ["begin", "generate:0"]
},
"generate:0": {
"obj": {
"component_name": "Generate",
"params": {
"llm_id": "deepseek-chat",
"prompt": "You are an professional interpreter.\n- Role: an professional interpreter.\n- Input format: content need to be translated => target language. \n- Answer format: => translated content in target language. \n- Examples:\n - user: 您好! => English. assistant: => How are you doing!\n - user: You look good today. => Japanese. assistant: => 今日は調子がいいですね 。\n",
"temperature": 0.5
}
},
"downstream": ["answer:0"],
"upstream": ["answer:0"]
}
},
"history": [],
"messages": [],
"reference": {},
"path": [],
"answer": []
}

View File

@ -108,6 +108,7 @@ class AzureGptV4(Base):
)
return res.choices[0].message.content.strip(), res.usage.total_tokens
class QWenCV(Base):
def __init__(self, key, model_name="qwen-vl-chat-v1", lang="Chinese", **kwargs):
import dashscope