mirror of
https://github.com/infiniflow/ragflow.git
synced 2025-12-08 12:32:30 +08:00
Feat: user defined prompt. (#9972)
### What problem does this PR solve? ### Type of change - [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
@ -16,6 +16,7 @@
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
@ -300,9 +301,11 @@ class Canvas(Graph):
|
||||
yield decorate("message", {"content": m})
|
||||
_m += m
|
||||
cpn_obj.set_output("content", _m)
|
||||
cite = re.search(r"\[ID:[ 0-9]+\]", _m)
|
||||
else:
|
||||
yield decorate("message", {"content": cpn_obj.output("content")})
|
||||
yield decorate("message_end", {"reference": self.get_reference()})
|
||||
cite = re.search(r"\[ID:[ 0-9]+\]", cpn_obj.output("content"))
|
||||
yield decorate("message_end", {"reference": self.get_reference() if cite else None})
|
||||
|
||||
while partials:
|
||||
_cpn_obj = self.get_component_obj(partials[0])
|
||||
|
||||
@ -155,12 +155,12 @@ class Agent(LLM, ToolBase):
|
||||
if not self.tools:
|
||||
return LLM._invoke(self, **kwargs)
|
||||
|
||||
prompt, msg = self._prepare_prompt_variables()
|
||||
prompt, msg, user_defined_prompt = self._prepare_prompt_variables()
|
||||
|
||||
downstreams = self._canvas.get_component(self._id)["downstream"] if self._canvas.get_component(self._id) else []
|
||||
ex = self.exception_handler()
|
||||
if any([self._canvas.get_component_obj(cid).component_name.lower()=="message" for cid in downstreams]) and not self._param.output_structure and not (ex and ex["goto"]):
|
||||
self.set_output("content", partial(self.stream_output_with_tools, prompt, msg))
|
||||
self.set_output("content", partial(self.stream_output_with_tools, prompt, msg, user_defined_prompt))
|
||||
return
|
||||
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||
@ -182,11 +182,11 @@ class Agent(LLM, ToolBase):
|
||||
self.set_output("use_tools", use_tools)
|
||||
return ans
|
||||
|
||||
def stream_output_with_tools(self, prompt, msg):
|
||||
def stream_output_with_tools(self, prompt, msg, user_defined_prompt={}):
|
||||
_, msg = message_fit_in([{"role": "system", "content": prompt}, *msg], int(self.chat_mdl.max_length * 0.97))
|
||||
answer_without_toolcall = ""
|
||||
use_tools = []
|
||||
for delta_ans,_ in self._react_with_tools_streamly(prompt, msg, use_tools):
|
||||
for delta_ans,_ in self._react_with_tools_streamly(prompt, msg, use_tools, user_defined_prompt):
|
||||
if delta_ans.find("**ERROR**") >= 0:
|
||||
if self.get_exception_default_value():
|
||||
self.set_output("content", self.get_exception_default_value())
|
||||
@ -209,7 +209,7 @@ class Agent(LLM, ToolBase):
|
||||
]):
|
||||
yield delta_ans
|
||||
|
||||
def _react_with_tools_streamly(self, prompt, history: list[dict], use_tools):
|
||||
def _react_with_tools_streamly(self, prompt, history: list[dict], use_tools, user_defined_prompt={}):
|
||||
token_count = 0
|
||||
tool_metas = self.tool_meta
|
||||
hist = deepcopy(history)
|
||||
@ -230,7 +230,7 @@ class Agent(LLM, ToolBase):
|
||||
# last_calling,
|
||||
# last_calling != name
|
||||
#]):
|
||||
# self.toolcall_session.get_tool_obj(name).add2system_prompt(f"The chat history with other agents are as following: \n" + self.get_useful_memory(user_request, str(args["user_prompt"])))
|
||||
# self.toolcall_session.get_tool_obj(name).add2system_prompt(f"The chat history with other agents are as following: \n" + self.get_useful_memory(user_request, str(args["user_prompt"]),user_defined_prompt))
|
||||
last_calling = name
|
||||
tool_response = self.toolcall_session.tool_call(name, args)
|
||||
use_tools.append({
|
||||
@ -239,7 +239,7 @@ class Agent(LLM, ToolBase):
|
||||
"results": tool_response
|
||||
})
|
||||
# self.callback("add_memory", {}, "...")
|
||||
#self.add_memory(hist[-2]["content"], hist[-1]["content"], name, args, str(tool_response))
|
||||
#self.add_memory(hist[-2]["content"], hist[-1]["content"], name, args, str(tool_response), user_defined_prompt)
|
||||
|
||||
return name, tool_response
|
||||
|
||||
@ -279,10 +279,10 @@ class Agent(LLM, ToolBase):
|
||||
hist.append({"role": "user", "content": content})
|
||||
|
||||
st = timer()
|
||||
task_desc = analyze_task(self.chat_mdl, prompt, user_request, tool_metas)
|
||||
task_desc = analyze_task(self.chat_mdl, prompt, user_request, tool_metas, user_defined_prompt)
|
||||
self.callback("analyze_task", {}, task_desc, elapsed_time=timer()-st)
|
||||
for _ in range(self._param.max_rounds + 1):
|
||||
response, tk = next_step(self.chat_mdl, hist, tool_metas, task_desc)
|
||||
response, tk = next_step(self.chat_mdl, hist, tool_metas, task_desc, user_defined_prompt)
|
||||
# self.callback("next_step", {}, str(response)[:256]+"...")
|
||||
token_count += tk
|
||||
hist.append({"role": "assistant", "content": response})
|
||||
@ -307,7 +307,7 @@ class Agent(LLM, ToolBase):
|
||||
thr.append(executor.submit(use_tool, name, args))
|
||||
|
||||
st = timer()
|
||||
reflection = reflect(self.chat_mdl, hist, [th.result() for th in thr])
|
||||
reflection = reflect(self.chat_mdl, hist, [th.result() for th in thr], user_defined_prompt)
|
||||
append_user_content(hist, reflection)
|
||||
self.callback("reflection", {}, str(reflection), elapsed_time=timer()-st)
|
||||
|
||||
@ -334,10 +334,10 @@ Respond immediately with your final comprehensive answer.
|
||||
for txt, tkcnt in complete():
|
||||
yield txt, tkcnt
|
||||
|
||||
def get_useful_memory(self, goal: str, sub_goal:str, topn=3) -> str:
|
||||
def get_useful_memory(self, goal: str, sub_goal:str, topn=3, user_defined_prompt:dict={}) -> str:
|
||||
# self.callback("get_useful_memory", {"topn": 3}, "...")
|
||||
mems = self._canvas.get_memory()
|
||||
rank = rank_memories(self.chat_mdl, goal, sub_goal, [summ for (user, assist, summ) in mems])
|
||||
rank = rank_memories(self.chat_mdl, goal, sub_goal, [summ for (user, assist, summ) in mems], user_defined_prompt)
|
||||
try:
|
||||
rank = json_repair.loads(re.sub(r"```.*", "", rank))[:topn]
|
||||
mems = [mems[r] for r in rank]
|
||||
|
||||
@ -145,12 +145,23 @@ class LLM(ComponentBase):
|
||||
msg.append(deepcopy(p))
|
||||
|
||||
sys_prompt = self.string_format(sys_prompt, args)
|
||||
user_defined_prompt, sys_prompt = self._extract_prompts(sys_prompt)
|
||||
for m in msg:
|
||||
m["content"] = self.string_format(m["content"], args)
|
||||
if self._param.cite and self._canvas.get_reference()["chunks"]:
|
||||
sys_prompt += citation_prompt()
|
||||
sys_prompt += citation_prompt(user_defined_prompt)
|
||||
|
||||
return sys_prompt, msg
|
||||
return sys_prompt, msg, user_defined_prompt
|
||||
|
||||
def _extract_prompts(self, sys_prompt):
|
||||
pts = {}
|
||||
for tag in ["TASK_ANALYSIS", "PLAN_GENERATION", "REFLECTION", "CONTEXT_SUMMARY", "CONTEXT_RANKING", "CITATION_GUIDELINES"]:
|
||||
r = re.search(rf"<{tag}>(.*?)</{tag}>", sys_prompt, flags=re.DOTALL|re.IGNORECASE)
|
||||
if not r:
|
||||
continue
|
||||
pts[tag.lower()] = r.group(1)
|
||||
sys_prompt = re.sub(rf"<{tag}>(.*?)</{tag}>", sys_prompt, flags=re.DOTALL|re.IGNORECASE)
|
||||
return pts, sys_prompt
|
||||
|
||||
def _generate(self, msg:list[dict], **kwargs) -> str:
|
||||
if not self.imgs:
|
||||
@ -198,7 +209,7 @@ class LLM(ComponentBase):
|
||||
ans = re.sub(r"^.*```json", "", ans, flags=re.DOTALL)
|
||||
return re.sub(r"```\n*$", "", ans, flags=re.DOTALL)
|
||||
|
||||
prompt, msg = self._prepare_prompt_variables()
|
||||
prompt, msg, _ = self._prepare_prompt_variables()
|
||||
error = ""
|
||||
|
||||
if self._param.output_structure:
|
||||
@ -262,11 +273,11 @@ class LLM(ComponentBase):
|
||||
answer += ans
|
||||
self.set_output("content", answer)
|
||||
|
||||
def add_memory(self, user:str, assist:str, func_name: str, params: dict, results: str):
|
||||
summ = tool_call_summary(self.chat_mdl, func_name, params, results)
|
||||
def add_memory(self, user:str, assist:str, func_name: str, params: dict, results: str, user_defined_prompt:dict={}):
|
||||
summ = tool_call_summary(self.chat_mdl, func_name, params, results, user_defined_prompt)
|
||||
logging.info(f"[MEMORY]: {summ}")
|
||||
self._canvas.add_memory(user, assist, summ)
|
||||
|
||||
def thoughts(self) -> str:
|
||||
_, msg = self._prepare_prompt_variables()
|
||||
_, msg,_ = self._prepare_prompt_variables()
|
||||
return "⌛Give me a moment—starting from: \n\n" + re.sub(r"(User's query:|[\\]+)", '', msg[-1]['content'], flags=re.DOTALL) + "\n\nI’ll figure out our best next move."
|
||||
@ -470,3 +470,16 @@ def sessions(canvas_id):
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/prompts', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def prompts():
|
||||
from rag.prompts.prompts import ANALYZE_TASK_SYSTEM, ANALYZE_TASK_USER, NEXT_STEP, REFLECT, SUMMARY4MEMORY, RANK_MEMORY, CITATION_PROMPT_TEMPLATE
|
||||
return get_json_result(data={
|
||||
"task_analysis": ANALYZE_TASK_SYSTEM + ANALYZE_TASK_USER,
|
||||
"plan_generation": NEXT_STEP,
|
||||
"reflection": REFLECT,
|
||||
"context_summary": SUMMARY4MEMORY,
|
||||
"context_ranking": RANK_MEMORY,
|
||||
"citation_guidelines": CITATION_PROMPT_TEMPLATE
|
||||
})
|
||||
|
||||
@ -400,6 +400,8 @@ def related_questions():
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT, chat_id)
|
||||
|
||||
gen_conf = search_config.get("llm_setting", {"temperature": 0.9})
|
||||
if "parameter" in gen_conf:
|
||||
del gen_conf["parameter"]
|
||||
prompt = load_prompt("related_question")
|
||||
ans = chat_mdl.chat(
|
||||
prompt,
|
||||
|
||||
@ -687,8 +687,20 @@ def naive_merge_docx(sections, chunk_token_num=128, delimiter="\n。;!?"):
|
||||
tk_nums[-1] += tnum
|
||||
|
||||
dels = get_delimiters(delimiter)
|
||||
line = ""
|
||||
for sec, image in sections:
|
||||
split_sec = re.split(r"(%s)" % dels, sec)
|
||||
if not image:
|
||||
line += sec + "\n"
|
||||
continue
|
||||
split_sec = re.split(r"(%s)" % dels, line + sec)
|
||||
for sub_sec in split_sec:
|
||||
if re.match(f"^{dels}$", sub_sec):
|
||||
continue
|
||||
add_chunk(sub_sec, image,"")
|
||||
line = ""
|
||||
|
||||
if line:
|
||||
split_sec = re.split(r"(%s)" % dels, line)
|
||||
for sub_sec in split_sec:
|
||||
if re.match(f"^{dels}$", sub_sec):
|
||||
continue
|
||||
|
||||
@ -1,8 +1,48 @@
|
||||
Your responsibility is to execute assigned tasks to a high standard. Please:
|
||||
1. Carefully analyze the task requirements.
|
||||
2. Develop a reasonable execution plan.
|
||||
3. Execute step-by-step and document the reasoning process.
|
||||
4. Provide clear and accurate results.
|
||||
You are an intelligent task analyzer that adapts analysis depth to task complexity.
|
||||
|
||||
If difficulties are encountered, clearly state the problem and explore alternative approaches.
|
||||
**Analysis Framework**
|
||||
|
||||
**Step 1: Task Transmission Assessment**
|
||||
**Note**: This section is not subject to word count limitations when transmission is needed, as it serves critical handoff functions.
|
||||
|
||||
**Evaluate if task transmission information is needed:**
|
||||
- **Is this an initial step?** If yes, skip this section
|
||||
- **Are there upstream agents/steps?** If no, provide minimal transmission
|
||||
- **Is there critical state/context to preserve?** If yes, include full transmission
|
||||
|
||||
### If Task Transmission is Needed:
|
||||
- **Current State Summary**: [1-2 sentences on where we are]
|
||||
- **Key Data/Results**: [Critical findings that must carry forward]
|
||||
- **Context Dependencies**: [Essential context for next agent/step]
|
||||
- **Unresolved Items**: [Issues requiring continuation]
|
||||
- **Status for User**: [Clear status update in user terms]
|
||||
- **Technical State**: [System state for technical handoffs]
|
||||
|
||||
**Step 2: Complexity Classification**
|
||||
Classify as LOW / MEDIUM / HIGH:
|
||||
- **LOW**: Single-step tasks, direct queries, small talk
|
||||
- **MEDIUM**: Multi-step tasks within one domain
|
||||
- **HIGH**: Multi-domain coordination or complex reasoning
|
||||
|
||||
**Step 3: Adaptive Analysis**
|
||||
Scale depth to match complexity. Always stop once success criteria are met.
|
||||
|
||||
**For LOW (max 50 words for analysis only):**
|
||||
- Detect small talk; if true, output exactly: `Small talk — no further analysis needed`
|
||||
- One-sentence objective
|
||||
- Direct execution approach (1–2 steps)
|
||||
|
||||
**For MEDIUM (80–150 words for analysis only):**
|
||||
- Objective; Intent & Scope
|
||||
- 3–5 step minimal Plan (may mark parallel steps)
|
||||
- **Uncertainty & Probes** (at least one probe with a clear stop condition)
|
||||
- Success Criteria + basic Failure detection & fallback
|
||||
- **Source Plan** (how evidence will be obtained/verified)
|
||||
|
||||
**For HIGH (150–250 words for analysis only):**
|
||||
- Comprehensive objective analysis; Intent & Scope
|
||||
- 5–8 step Plan with dependencies/parallelism
|
||||
- **Uncertainty & Probes** (key unknowns → probe → stop condition)
|
||||
- Measurable Success Criteria; Failure detectors & fallbacks
|
||||
- **Source Plan** (evidence acquisition & validation)
|
||||
- **Reflection Hooks** (escalation/de-escalation triggers)
|
||||
|
||||
@ -1,23 +1,9 @@
|
||||
Please analyze the following task:
|
||||
**Input Variables**
|
||||
- **{{ task }}** — the task/request to analyze
|
||||
- **{{ context }}** — background, history, situational context
|
||||
- **{{ agent_prompt }}** — special instructions/role hints
|
||||
- **{{ tools_desc }}** — available sub-agents and capabilities
|
||||
|
||||
Task: {{ task }}
|
||||
|
||||
Context: {{ context }}
|
||||
|
||||
**Agent Prompt**
|
||||
{{ agent_prompt }}
|
||||
|
||||
**Analysis Requirements:**
|
||||
1. Is it just a small talk? (If yes, no further plan or analysis is needed)
|
||||
2. What is the core objective of the task?
|
||||
3. What is the complexity level of the task?
|
||||
4. What types of specialized skills are required?
|
||||
5. Does the task need to be decomposed into subtasks? (If yes, propose the subtask structure)
|
||||
6. How to know the task or the subtasks are impossible to lead to the success after a few rounds of interaction?
|
||||
7. What are the expected success criteria?
|
||||
|
||||
**Available Sub-Agents and Their Specializations:**
|
||||
|
||||
{{ tools_desc }}
|
||||
|
||||
Provide a detailed analysis of the task based on the above requirements.
|
||||
**Final Output Rule**
|
||||
Return the Task Transmission section (if needed) followed by the concrete analysis and planning steps according to LOW / MEDIUM / HIGH complexity.
|
||||
Do not restate the framework, definitions, or rules. Output only the final structured result.
|
||||
|
||||
@ -7,7 +7,6 @@ Your job is:
|
||||
# ========== TASK ANALYSIS =============
|
||||
{{ task_analisys }}
|
||||
|
||||
|
||||
# ========== TOOLS (JSON-Schema) ==========
|
||||
You may invoke only the tools listed below.
|
||||
Return a JSON array of objects in which item is with exactly two top-level keys:
|
||||
@ -16,8 +15,24 @@ Return a JSON array of objects in which item is with exactly two top-level keys:
|
||||
|
||||
{{ desc }}
|
||||
|
||||
|
||||
# ========== MULTI-STEP EXECUTION ==========
|
||||
When tasks require multiple independent steps, you can execute them in parallel by returning multiple tool calls in a single JSON array.
|
||||
|
||||
• **Data Collection**: Gathering information from multiple sources simultaneously
|
||||
• **Validation**: Cross-checking facts using different tools
|
||||
• **Comprehensive Analysis**: Analyzing different aspects of the same problem
|
||||
• **Efficiency**: Reducing total execution time when steps don't depend on each other
|
||||
|
||||
**Example Scenarios:**
|
||||
- Searching multiple databases for the same query
|
||||
- Checking weather in multiple cities
|
||||
- Validating information through different APIs
|
||||
- Performing calculations on different datasets
|
||||
- Gathering user preferences from multiple sources
|
||||
|
||||
# ========== RESPONSE FORMAT ==========
|
||||
✦ **When you need a tool**
|
||||
**When you need a tool**
|
||||
Return ONLY the Json (no additional keys, no commentary, end with `<|stop|>`), such as following:
|
||||
[{
|
||||
"name": "<tool_name1>",
|
||||
@ -27,7 +42,20 @@ Return ONLY the Json (no additional keys, no commentary, end with `<|stop|>`), s
|
||||
"arguments": { /* tool arguments matching its schema */ }
|
||||
}...]<|stop|>
|
||||
|
||||
✦ **When you are certain the task is solved OR no further information can be obtained**
|
||||
**When you need multiple tools:**
|
||||
Return ONLY:
|
||||
[{
|
||||
"name": "<tool_name1>",
|
||||
"arguments": { /* tool arguments matching its schema */ }
|
||||
},{
|
||||
"name": "<tool_name2>",
|
||||
"arguments": { /* tool arguments matching its schema */ }
|
||||
},{
|
||||
"name": "<tool_name3>",
|
||||
"arguments": { /* tool arguments matching its schema */ }
|
||||
}...]<|stop|>
|
||||
|
||||
**When you are certain the task is solved OR no further information can be obtained**
|
||||
Return ONLY:
|
||||
[{
|
||||
"name": "complete_task",
|
||||
@ -61,3 +89,4 @@ Internal guideline:
|
||||
2. **Act**: Emit the JSON object to call the tool.
|
||||
|
||||
Today is {{ today }}. Remember that success in answering questions accurately is paramount - take all necessary steps to ensure your answer is correct.
|
||||
|
||||
|
||||
@ -157,7 +157,7 @@ ASK_SUMMARY = load_prompt("ask_summary")
|
||||
PROMPT_JINJA_ENV = jinja2.Environment(autoescape=False, trim_blocks=True, lstrip_blocks=True)
|
||||
|
||||
|
||||
def citation_prompt() -> str:
|
||||
def citation_prompt(user_defined_prompts: dict={}) -> str:
|
||||
template = PROMPT_JINJA_ENV.from_string(CITATION_PROMPT_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@ -339,7 +339,7 @@ def form_history(history, limit=-6):
|
||||
return context
|
||||
|
||||
|
||||
def analyze_task(chat_mdl, prompt, task_name, tools_description: list[dict]):
|
||||
def analyze_task(chat_mdl, prompt, task_name, tools_description: list[dict], user_defined_prompts: dict={}):
|
||||
tools_desc = tool_schema(tools_description)
|
||||
context = ""
|
||||
|
||||
@ -354,7 +354,7 @@ def analyze_task(chat_mdl, prompt, task_name, tools_description: list[dict]):
|
||||
return kwd
|
||||
|
||||
|
||||
def next_step(chat_mdl, history:list, tools_description: list[dict], task_desc):
|
||||
def next_step(chat_mdl, history:list, tools_description: list[dict], task_desc, user_defined_prompts: dict={}):
|
||||
if not tools_description:
|
||||
return ""
|
||||
desc = tool_schema(tools_description)
|
||||
@ -372,7 +372,7 @@ def next_step(chat_mdl, history:list, tools_description: list[dict], task_desc):
|
||||
return json_str, tk_cnt
|
||||
|
||||
|
||||
def reflect(chat_mdl, history: list[dict], tool_call_res: list[Tuple]):
|
||||
def reflect(chat_mdl, history: list[dict], tool_call_res: list[Tuple], user_defined_prompts: dict={}):
|
||||
tool_calls = [{"name": p[0], "result": p[1]} for p in tool_call_res]
|
||||
goal = history[1]["content"]
|
||||
template = PROMPT_JINJA_ENV.from_string(REFLECT)
|
||||
@ -398,7 +398,7 @@ def form_message(system_prompt, user_prompt):
|
||||
return [{"role": "system", "content": system_prompt},{"role": "user", "content": user_prompt}]
|
||||
|
||||
|
||||
def tool_call_summary(chat_mdl, name: str, params: dict, result: str) -> str:
|
||||
def tool_call_summary(chat_mdl, name: str, params: dict, result: str, user_defined_prompts: dict={}) -> str:
|
||||
template = PROMPT_JINJA_ENV.from_string(SUMMARY4MEMORY)
|
||||
system_prompt = template.render(name=name,
|
||||
params=json.dumps(params, ensure_ascii=False, indent=2),
|
||||
@ -409,7 +409,7 @@ def tool_call_summary(chat_mdl, name: str, params: dict, result: str) -> str:
|
||||
return re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
|
||||
|
||||
def rank_memories(chat_mdl, goal:str, sub_goal:str, tool_call_summaries: list[str]):
|
||||
def rank_memories(chat_mdl, goal:str, sub_goal:str, tool_call_summaries: list[str], user_defined_prompts: dict={}):
|
||||
template = PROMPT_JINJA_ENV.from_string(RANK_MEMORY)
|
||||
system_prompt = template.render(goal=goal, sub_goal=sub_goal, results=[{"i": i, "content": s} for i,s in enumerate(tool_call_summaries)])
|
||||
user_prompt = " → rank: "
|
||||
|
||||
@ -6,29 +6,70 @@ Tool call: `{{ call.name }}`
|
||||
Results: {{ call.result }}
|
||||
{% endfor %}
|
||||
|
||||
## Task Complexity Analysis & Reflection Scope
|
||||
|
||||
**Reflection Instructions:**
|
||||
**First, analyze the task complexity using these dimensions:**
|
||||
|
||||
Analyze the current state of the overall task ({{ goal }}), then provide structured responses to the following:
|
||||
### Complexity Assessment Matrix
|
||||
- **Scope Breadth**: Single-step (1) | Multi-step (2) | Multi-domain (3)
|
||||
- **Data Dependency**: Self-contained (1) | External inputs (2) | Multiple sources (3)
|
||||
- **Decision Points**: Linear (1) | Few branches (2) | Complex logic (3)
|
||||
- **Risk Level**: Low (1) | Medium (2) | High (3)
|
||||
|
||||
## 1. Goal Achievement Status
|
||||
**Complexity Score**: Sum all dimensions (4-12 points)
|
||||
|
||||
---
|
||||
|
||||
## Task Transmission Assessment
|
||||
**Note**: This section is not subject to word count limitations when transmission is needed, as it serves critical handoff functions.
|
||||
**Evaluate if task transmission information is needed:**
|
||||
- **Is this an initial step?** If yes, skip this section
|
||||
- **Are there downstream agents/steps?** If no, provide minimal transmission
|
||||
- **Is there critical state/context to preserve?** If yes, include full transmission
|
||||
|
||||
### If Task Transmission is Needed:
|
||||
- **Current State Summary**: [1-2 sentences on where we are]
|
||||
- **Key Data/Results**: [Critical findings that must carry forward]
|
||||
- **Context Dependencies**: [Essential context for next agent/step]
|
||||
- **Unresolved Items**: [Issues requiring continuation]
|
||||
- **Status for User**: [Clear status update in user terms]
|
||||
- **Technical State**: [System state for technical handoffs]
|
||||
|
||||
---
|
||||
|
||||
## Situational Reflection (Adjust Length Based on Complexity Score)
|
||||
|
||||
### Reflection Guidelines:
|
||||
- **Simple Tasks (4-5 points)**: ~50-100 words, focus on completion status and immediate next step
|
||||
- **Moderate Tasks (6-8 points)**: ~100-200 words, include core details and main risks
|
||||
- **Complex Tasks (9-12 points)**: ~200-300 words, provide full analysis and alternatives
|
||||
|
||||
### 1. Goal Achievement Status
|
||||
- Does the current outcome align with the original purpose of this task phase?
|
||||
- If not, what critical gaps exist?
|
||||
|
||||
## 2. Step Completion Check
|
||||
### 2. Step Completion Check
|
||||
- Which planned steps were completed? (List verified items)
|
||||
- Which steps are pending/incomplete? (Specify exactly what’s missing)
|
||||
- Which steps are pending/incomplete? (Specify exactly what's missing)
|
||||
|
||||
## 3. Information Adequacy
|
||||
### 3. Information Adequacy
|
||||
- Is the collected data sufficient to proceed?
|
||||
- What key information is still needed? (e.g., metrics, user input, external data)
|
||||
|
||||
## 4. Critical Observations
|
||||
### 4. Critical Observations
|
||||
- Unexpected outcomes: [Flag anomalies/errors]
|
||||
- Risks/blockers: [Identify immediate obstacles]
|
||||
- Accuracy concerns: [Highlight unreliable results]
|
||||
|
||||
## 5. Next-Step Recommendations
|
||||
### 5. Next-Step Recommendations
|
||||
- Proposed immediate action: [Concrete next step]
|
||||
- Alternative strategies if blocked: [Workaround solution]
|
||||
- Tools/inputs required for next phase: [Specify resources]
|
||||
- Tools/inputs required for next phase: [Specify resources]
|
||||
|
||||
---
|
||||
|
||||
**Output Instructions:**
|
||||
1. First determine your complexity score
|
||||
2. Assess if task transmission section is needed using the evaluation questions
|
||||
3. Provide situational reflection with length appropriate to complexity
|
||||
4. Use clear headers for easy parsing by downstream systems
|
||||
|
||||
Reference in New Issue
Block a user