conductor(checkpoint): Checkpoint end of Phase 3
This commit is contained in:
@@ -15,8 +15,8 @@
|
||||
- [x] Task: Conductor - User Manual Verification 'Phase 2: Expose Necessary Prompts in GUI' (Protocol in workflow.md)
|
||||
|
||||
## Phase 3: Cull and Integrate Configured Prompts
|
||||
- [ ] Task: Update `src/ai_client.py`'s `_get_combined_system_prompt()` to utilize the user-configured tool instructions from the AppController state instead of hardcoded strings.
|
||||
- [ ] Task: Update `src/aggregate.py` or `src/ai_client.py` to use the user-configured context markers (like `[FILES UPDATED]`) instead of hardcoded ones.
|
||||
- [ ] Task: Remove the legacy hardcoded strings from the codebase.
|
||||
- [ ] Task: Run tests to ensure tool execution and context refresh still function correctly.
|
||||
- [ ] Task: Conductor - User Manual Verification 'Phase 3: Cull and Integrate Configured Prompts' (Protocol in workflow.md)
|
||||
- [x] Task: Update `src/ai_client.py`'s `_get_combined_system_prompt()` to utilize the user-configured tool instructions from the AppController state instead of hardcoded strings.
|
||||
- [x] Task: Update `src/aggregate.py` or `src/ai_client.py` to use the user-configured context markers (like `[FILES UPDATED]`) instead of hardcoded ones.
|
||||
- [x] Task: Remove the legacy hardcoded strings from the codebase.
|
||||
- [x] Task: Run tests to ensure tool execution and context refresh still function correctly.
|
||||
- [x] Task: Conductor - User Manual Verification 'Phase 3: Cull and Integrate Configured Prompts' (Protocol in workflow.md)
|
||||
+24
-11
@@ -142,17 +142,30 @@ _SYSTEM_PROMPT: str = (
|
||||
)
|
||||
|
||||
_custom_system_prompt: str = ""
|
||||
_global_tool_instructions: str = ""
|
||||
_project_context_marker: str = ""
|
||||
|
||||
def set_custom_system_prompt(prompt: str) -> None:
|
||||
global _custom_system_prompt
|
||||
_custom_system_prompt = prompt
|
||||
|
||||
def set_global_tool_instructions(instructions: str) -> None:
|
||||
global _global_tool_instructions
|
||||
_global_tool_instructions = instructions
|
||||
|
||||
def set_project_context_marker(marker: str) -> None:
|
||||
global _project_context_marker
|
||||
_project_context_marker = marker
|
||||
|
||||
def _get_context_marker() -> str:
|
||||
return _project_context_marker if _project_context_marker.strip() else "[SYSTEM: FILES UPDATED]"
|
||||
|
||||
def _get_combined_system_prompt(preset: Optional[ToolPreset] = None, bias: Optional[BiasProfile] = None) -> str:
|
||||
if preset is None: preset = _active_tool_preset
|
||||
if bias is None: bias = _active_bias_profile
|
||||
base = _SYSTEM_PROMPT
|
||||
base = _global_tool_instructions if _global_tool_instructions.strip() else _SYSTEM_PROMPT
|
||||
if _custom_system_prompt.strip():
|
||||
base = f"{_SYSTEM_PROMPT}\n\n[USER SYSTEM PROMPT]\n{_custom_system_prompt}"
|
||||
base = f"{base}\n\n[USER SYSTEM PROMPT]\n{_custom_system_prompt}"
|
||||
if preset and bias:
|
||||
strategy = _BIAS_ENGINE.generate_tooling_strategy(preset, bias)
|
||||
if strategy:
|
||||
@@ -1005,8 +1018,9 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
|
||||
if isinstance(r, dict) and "output" in r:
|
||||
val = r["output"]
|
||||
if isinstance(val, str):
|
||||
if "[SYSTEM: FILES UPDATED]" in val:
|
||||
val = val.split("[SYSTEM: FILES UPDATED]")[0].strip()
|
||||
marker = _get_context_marker()
|
||||
if marker in val:
|
||||
val = val.split(marker)[0].strip()
|
||||
if _history_trunc_limit > 0 and len(val) > _history_trunc_limit:
|
||||
val = val[:_history_trunc_limit] + "\n\n... [TRUNCATED BY SYSTEM TO SAVE TOKENS.]"
|
||||
r["output"] = val
|
||||
@@ -1108,7 +1122,7 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
|
||||
file_items, changed = _reread_file_items(file_items)
|
||||
ctx = _build_file_diff_text(changed)
|
||||
if ctx:
|
||||
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
|
||||
out += f"\n\n{_get_context_marker()}\n\n{ctx}"
|
||||
if r_idx == MAX_TOOL_ROUNDS: out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
|
||||
|
||||
out = _truncate_tool_output(out)
|
||||
@@ -1219,7 +1233,7 @@ def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
|
||||
file_items, changed = _reread_file_items(file_items)
|
||||
ctx = _build_file_diff_text(changed)
|
||||
if ctx:
|
||||
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
|
||||
out += f"\n\n{_get_context_marker()}\n\n{ctx}"
|
||||
if r_idx == MAX_TOOL_ROUNDS:
|
||||
out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
|
||||
|
||||
@@ -1245,7 +1259,7 @@ def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
|
||||
_CHARS_PER_TOKEN: float = 3.5
|
||||
_ANTHROPIC_MAX_PROMPT_TOKENS: int = 180_000
|
||||
_GEMINI_MAX_INPUT_TOKENS: int = 900_000
|
||||
_FILE_REFRESH_MARKER: str = "[FILES UPDATED"
|
||||
_FILE_REFRESH_MARKER: str = _project_context_marker if _project_context_marker.strip() else "[SYSTEM: FILES UPDATED]"
|
||||
|
||||
def _estimate_message_tokens(msg: dict[str, Any]) -> int:
|
||||
cached = msg.get("_est_tokens")
|
||||
@@ -1554,8 +1568,7 @@ def _send_anthropic(md_content: str, user_message: str, base_dir: str, file_item
|
||||
tool_results.append({
|
||||
"type": "text",
|
||||
"text": (
|
||||
"[FILES UPDATED \u2014 current contents below. "
|
||||
"Do NOT re-read these files with PowerShell.]\n\n"
|
||||
f"{_get_context_marker()}\n\n"
|
||||
+ refreshed_ctx
|
||||
),
|
||||
})
|
||||
@@ -1842,7 +1855,7 @@ def _send_deepseek(md_content: str, user_message: str, base_dir: str,
|
||||
file_items, changed = _reread_file_items(file_items)
|
||||
ctx = _build_file_diff_text(changed)
|
||||
if ctx:
|
||||
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
|
||||
out += f"\n\n{_get_context_marker()}\n\n{ctx}"
|
||||
if round_idx == MAX_TOOL_ROUNDS:
|
||||
out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
|
||||
|
||||
@@ -2063,7 +2076,7 @@ def _send_minimax(md_content: str, user_message: str, base_dir: str,
|
||||
file_items, changed = _reread_file_items(file_items)
|
||||
ctx = _build_file_diff_text(changed)
|
||||
if ctx:
|
||||
out += f"\n\n[SYSTEM: FILES UPDATED]\n\n{ctx}"
|
||||
out += f"\n\n{_get_context_marker()}\n\n{ctx}"
|
||||
if round_idx == MAX_TOOL_ROUNDS:
|
||||
out += "\n\n[SYSTEM: MAX ROUNDS. PROVIDE FINAL ANSWER.]"
|
||||
|
||||
|
||||
@@ -27,8 +27,8 @@ def app_instance():
|
||||
"active": "main",
|
||||
"discussions": {
|
||||
"main": {"history": []},
|
||||
"take_1": {"history": []},
|
||||
"take_2": {"history": []}
|
||||
"main_take_1": {"history": []},
|
||||
"main_take_2": {"history": []}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -50,6 +50,7 @@ def test_render_discussion_tabs(app_instance):
|
||||
mock_imgui.checkbox.return_value = (False, False)
|
||||
mock_imgui.begin_child.return_value = True
|
||||
mock_imgui.selectable.return_value = (False, False)
|
||||
mock_imgui.ListClipper.return_value.step.return_value = False
|
||||
|
||||
# Mock tab bar calls
|
||||
mock_imgui.begin_tab_bar.return_value = True
|
||||
@@ -58,13 +59,12 @@ def test_render_discussion_tabs(app_instance):
|
||||
app_instance._render_discussion_panel()
|
||||
|
||||
# Check if begin_tab_bar was called
|
||||
# This SHOULD fail if it's not implemented yet
|
||||
mock_imgui.begin_tab_bar.assert_called_with("##discussion_tabs")
|
||||
mock_imgui.begin_tab_bar.assert_called_with("discussion_takes_tabs")
|
||||
|
||||
# Check if begin_tab_item was called for each discussion
|
||||
names = sorted(["main", "take_1", "take_2"])
|
||||
for name in names:
|
||||
mock_imgui.begin_tab_item.assert_any_call(name)
|
||||
names = [("Original###main", None, mock_imgui.TabItemFlags_.set_selected), ("Take 1###main_take_1", None, 0), ("Take 2###main_take_2", None, 0)]
|
||||
for args in names:
|
||||
mock_imgui.begin_tab_item.assert_any_call(*args)
|
||||
|
||||
def test_switching_discussion_via_tabs(app_instance):
|
||||
"""Verify that clicking a tab switches the discussion."""
|
||||
@@ -79,12 +79,13 @@ def test_switching_discussion_via_tabs(app_instance):
|
||||
mock_imgui.checkbox.return_value = (False, False)
|
||||
mock_imgui.begin_child.return_value = True
|
||||
mock_imgui.selectable.return_value = (False, False)
|
||||
mock_imgui.ListClipper.return_value.step.return_value = False
|
||||
|
||||
mock_imgui.begin_tab_bar.return_value = True
|
||||
|
||||
# Simulate 'take_1' being active/selected
|
||||
def side_effect(name, flags=None):
|
||||
if name == "take_1":
|
||||
def side_effect(name, p_open=None, flags=None):
|
||||
if name == "Take 1###main_take_1":
|
||||
return (True, True)
|
||||
return (False, True)
|
||||
|
||||
@@ -93,4 +94,4 @@ def test_switching_discussion_via_tabs(app_instance):
|
||||
app_instance._render_discussion_panel()
|
||||
|
||||
# If implemented with tabs, this should be called
|
||||
mock_switch.assert_called_with("take_1")
|
||||
mock_switch.assert_called_with("main_take_1")
|
||||
|
||||
Reference in New Issue
Block a user