fix(tier1): disable tools in generate_tracks, add enable_tools param to ai_client.send
Tier 1 planning calls are strategic — the model should never use file tools during epic initialization. This caused JSON parse failures when the model tried to verify file references in the epic prompt. - ai_client.py: add enable_tools param to send() and _send_gemini() - orchestrator_pm.py: pass enable_tools=False in generate_tracks() - tests/visual_sim_mma_v2.py: remove file reference from test epic Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -621,14 +621,15 @@ def _send_gemini(md_content: str, user_message: str, base_dir: str,
|
|||||||
file_items: list[dict[str, Any]] | None = None,
|
file_items: list[dict[str, Any]] | None = None,
|
||||||
discussion_history: str = "",
|
discussion_history: str = "",
|
||||||
pre_tool_callback: Optional[Callable[[str], bool]] = None,
|
pre_tool_callback: Optional[Callable[[str], bool]] = None,
|
||||||
qa_callback: Optional[Callable[[str], str]] = None) -> str:
|
qa_callback: Optional[Callable[[str], str]] = None,
|
||||||
|
enable_tools: bool = True) -> str:
|
||||||
global _gemini_chat, _gemini_cache, _gemini_cache_md_hash, _gemini_cache_created_at
|
global _gemini_chat, _gemini_cache, _gemini_cache_md_hash, _gemini_cache_created_at
|
||||||
try:
|
try:
|
||||||
_ensure_gemini_client(); mcp_client.configure(file_items or [], [base_dir])
|
_ensure_gemini_client(); mcp_client.configure(file_items or [], [base_dir])
|
||||||
# Only stable content (files + screenshots) goes in the cached system instruction.
|
# Only stable content (files + screenshots) goes in the cached system instruction.
|
||||||
# Discussion history is sent as conversation messages so the cache isn't invalidated every turn.
|
# Discussion history is sent as conversation messages so the cache isn't invalidated every turn.
|
||||||
sys_instr = f"{_get_combined_system_prompt()}\n\n<context>\n{md_content}\n</context>"
|
sys_instr = f"{_get_combined_system_prompt()}\n\n<context>\n{md_content}\n</context>"
|
||||||
td = _gemini_tool_declaration()
|
td = _gemini_tool_declaration() if enable_tools else None
|
||||||
tools_decl = [td] if td else None
|
tools_decl = [td] if td else None
|
||||||
# DYNAMIC CONTEXT: Check if files/context changed mid-session
|
# DYNAMIC CONTEXT: Check if files/context changed mid-session
|
||||||
current_md_hash = hashlib.md5(md_content.encode()).hexdigest()
|
current_md_hash = hashlib.md5(md_content.encode()).hexdigest()
|
||||||
@@ -1628,6 +1629,7 @@ def send(
|
|||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
pre_tool_callback: Optional[Callable[[str], bool]] = None,
|
pre_tool_callback: Optional[Callable[[str], bool]] = None,
|
||||||
qa_callback: Optional[Callable[[str], str]] = None,
|
qa_callback: Optional[Callable[[str], str]] = None,
|
||||||
|
enable_tools: bool = True,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Send a message to the active provider.
|
Send a message to the active provider.
|
||||||
@@ -1646,7 +1648,7 @@ def send(
|
|||||||
"""
|
"""
|
||||||
with _send_lock:
|
with _send_lock:
|
||||||
if _provider == "gemini":
|
if _provider == "gemini":
|
||||||
return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
|
return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback, enable_tools=enable_tools)
|
||||||
elif _provider == "gemini_cli":
|
elif _provider == "gemini_cli":
|
||||||
return _send_gemini_cli(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
|
return _send_gemini_cli(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
|
||||||
elif _provider == "anthropic":
|
elif _provider == "anthropic":
|
||||||
|
|||||||
@@ -78,7 +78,8 @@ def generate_tracks(user_request: str, project_config: dict, file_items: list[di
|
|||||||
# Note: We use gemini-1.5-pro or similar high-reasoning model for Tier 1
|
# Note: We use gemini-1.5-pro or similar high-reasoning model for Tier 1
|
||||||
response = ai_client.send(
|
response = ai_client.send(
|
||||||
md_content="", # We pass everything in user_message for clarity
|
md_content="", # We pass everything in user_message for clarity
|
||||||
user_message=user_message
|
user_message=user_message,
|
||||||
|
enable_tools=False,
|
||||||
)
|
)
|
||||||
# 4. Parse JSON Output
|
# 4. Parse JSON Output
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ def test_mma_complete_lifecycle(live_gui) -> None:
|
|||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
# Keep prompt short and simple so the model returns minimal JSON
|
# Keep prompt short and simple so the model returns minimal JSON
|
||||||
client.set_value('mma_epic_input',
|
client.set_value('mma_epic_input',
|
||||||
'Add a hello_world() function to utils.py')
|
'Add a hello_world greeting function to the project')
|
||||||
time.sleep(0.3)
|
time.sleep(0.3)
|
||||||
client.click('btn_mma_plan_epic')
|
client.click('btn_mma_plan_epic')
|
||||||
time.sleep(0.5) # frame-sync after click
|
time.sleep(0.5) # frame-sync after click
|
||||||
|
|||||||
Reference in New Issue
Block a user