fix(tier1): disable tools in generate_tracks, add enable_tools param to ai_client.send

Tier 1 planning calls are strategic — the model should never use file tools
during epic initialization. This caused JSON parse failures when the model
tried to verify file references in the epic prompt.

- ai_client.py: add enable_tools param to send() and _send_gemini()
- orchestrator_pm.py: pass enable_tools=False in generate_tracks()
- tests/visual_sim_mma_v2.py: remove file reference from test epic

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-01 14:04:44 -05:00
parent c023ae14dc
commit ce5b6d202b
3 changed files with 8 additions and 5 deletions

View File

@@ -621,14 +621,15 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
file_items: list[dict[str, Any]] | None = None,
discussion_history: str = "",
pre_tool_callback: Optional[Callable[[str], bool]] = None,
qa_callback: Optional[Callable[[str], str]] = None) -> str:
qa_callback: Optional[Callable[[str], str]] = None,
enable_tools: bool = True) -> str:
global _gemini_chat, _gemini_cache, _gemini_cache_md_hash, _gemini_cache_created_at
try:
_ensure_gemini_client(); mcp_client.configure(file_items or [], [base_dir])
# Only stable content (files + screenshots) goes in the cached system instruction.
# Discussion history is sent as conversation messages so the cache isn't invalidated every turn.
sys_instr = f"{_get_combined_system_prompt()}\n\n<context>\n{md_content}\n</context>"
td = _gemini_tool_declaration()
td = _gemini_tool_declaration() if enable_tools else None
tools_decl = [td] if td else None
# DYNAMIC CONTEXT: Check if files/context changed mid-session
current_md_hash = hashlib.md5(md_content.encode()).hexdigest()
@@ -1628,6 +1629,7 @@ def send(
stream: bool = False,
pre_tool_callback: Optional[Callable[[str], bool]] = None,
qa_callback: Optional[Callable[[str], str]] = None,
enable_tools: bool = True,
) -> str:
"""
Send a message to the active provider.
@@ -1646,7 +1648,7 @@ def send(
"""
with _send_lock:
if _provider == "gemini":
return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback, enable_tools=enable_tools)
elif _provider == "gemini_cli":
return _send_gemini_cli(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
elif _provider == "anthropic":

View File

@@ -78,7 +78,8 @@ def generate_tracks(user_request: str, project_config: dict, file_items: list[di
# Note: We use gemini-1.5-pro or similar high-reasoning model for Tier 1
response = ai_client.send(
md_content="", # We pass everything in user_message for clarity
user_message=user_message
user_message=user_message,
enable_tools=False,
)
# 4. Parse JSON Output
try:

View File

@@ -73,7 +73,7 @@ def test_mma_complete_lifecycle(live_gui) -> None:
# ------------------------------------------------------------------
# Keep prompt short and simple so the model returns minimal JSON
client.set_value('mma_epic_input',
'Add a hello_world() function to utils.py')
'Add a hello_world greeting function to the project')
time.sleep(0.3)
client.click('btn_mma_plan_epic')
time.sleep(0.5) # frame-sync after click