chore(mma): Deterministic track IDs, worker spawn hooks, and improved simulation reliability

This commit is contained in:
2026-02-28 22:09:18 -05:00
parent cb0e14e1c0
commit 37df4c8003
8 changed files with 140 additions and 83 deletions

View File

@@ -261,7 +261,7 @@ def set_provider(provider: str, model: str) -> None:
if provider == "gemini_cli":
valid_models = _list_gemini_cli_models()
# If model is invalid or belongs to another provider (like deepseek), force default
if model not in valid_models or model.startswith("deepseek"):
if model != "mock" and (model not in valid_models or model.startswith("deepseek")):
_model = "gemini-3-flash-preview"
else:
_model = model
@@ -815,8 +815,8 @@ def _send_gemini_cli(md_content: str, user_message: str, base_dir: str,
global _gemini_cli_adapter
try:
if _gemini_cli_adapter is None:
_gemini_cli_adapter = GeminiCliAdapter(binary_path="gemini")
adapter = _gemini_cli_adapter
_gemini_cli_adapter = GeminiCliAdapter(binary_path="gemini")
adapter = _gemini_cli_adapter
mcp_client.configure(file_items or [], [base_dir])
# Construct the system instruction, combining the base system prompt and the current context.
sys_instr = f"{_get_combined_system_prompt()}\n\n<context>\n{md_content}\n</context>"
@@ -1621,16 +1621,16 @@ from typing import Any, Callable, Optional, List
# and the _send_xxx functions are also defined at module level.
def send(
md_content: str,
user_message: str,
base_dir: str = ".",
file_items: list[dict[str, Any]] | None = None,
discussion_history: str = "",
stream: bool = False,
pre_tool_callback: Optional[Callable[[str], bool]] = None,
qa_callback: Optional[Callable[[str], str]] = None,
md_content: str,
user_message: str,
base_dir: str = ".",
file_items: list[dict[str, Any]] | None = None,
discussion_history: str = "",
stream: bool = False,
pre_tool_callback: Optional[Callable[[str], bool]] = None,
qa_callback: Optional[Callable[[str], str]] = None,
) -> str:
"""
"""
Send a message to the active provider.
md_content : aggregated markdown string (for Gemini: stable content only,
@@ -1645,73 +1645,39 @@ def send(
pre_tool_callback : Optional callback (payload: str) -> bool called before tool execution
qa_callback : Optional callback (stderr: str) -> str called for Tier 4 error analysis
"""
# --- START MOCK LOGIC ---
# Assuming _model, _custom_system_prompt, and _system_prompt are module-level variables.
# If _model is not 'mock', proceed to original provider logic.
if _model == 'mock':
mock_response_content = None
# Use _custom_system_prompt for keyword detection
current_system_prompt = _custom_system_prompt # Assuming _custom_system_prompt is accessible and defined
# --- START MOCK LOGIC ---
if _model == 'mock':
import json
keyword = "unknown"
if 'Epic Initialization' in _custom_system_prompt:
keyword = "Epic Initialization"
mock_response_content = [
{"id": "mock-track-1", "type": "Track", "module": "core", "persona": "Tech Lead", "severity": "Medium", "goal": "Mock Goal 1", "acceptance_criteria": ["criteria 1"]},
{"id": "mock-track-2", "type": "Track", "module": "ui", "persona": "Frontend Lead", "severity": "Low", "goal": "Mock Goal 2", "acceptance_criteria": ["criteria 2"]}
]
elif 'Sprint Planning' in _custom_system_prompt:
keyword = "Sprint Planning"
mock_response_content = [
{"id": "mock-ticket-1", "type": "Ticket", "goal": "Mock Ticket 1", "target_file": "file1.py", "depends_on": [], "context_requirements": "req 1"},
{"id": "mock-ticket-2", "type": "Ticket", "goal": "Mock Ticket 2", "target_file": "file2.py", "depends_on": ["mock-ticket-1"], "context_requirements": "req 2"}
]
else:
mock_response_content = "Mock AI Response"
if 'tier1_epic_init' in current_system_prompt:
mock_response_content = [
{
"id": "mock-track-1",
"type": "epic",
"module": "conductor",
"persona": "Tier 1 Orchestrator",
"severity": "high",
"goal": "Initialize a new track.",
"acceptance_criteria": "Track created successfully with required fields."
},
{
"id": "mock-track-2",
"type": "epic",
"module": "conductor",
"persona": "Tier 1 Orchestrator",
"severity": "medium",
"goal": "Initialize another track.",
"acceptance_criteria": "Second track created successfully."
}
]
elif 'tier2_sprint_planning' in current_system_prompt:
mock_response_content = [
{
"id": "mock-ticket-1",
"type": "story",
"goal": "Implement feature X.",
"target_file": "src/feature_x.py",
"depends_on": [],
"context_requirements": ["requirements.txt", "main.py"]
},
{
"id": "mock-ticket-2",
"type": "bug",
"goal": "Fix bug Y.",
"target_file": "src/bug_y.py",
"depends_on": ["mock-ticket-1"],
"context_requirements": ["tests/test_bug_y.py"]
}
]
else:
mock_response_content = "Mock AI Response"
# The function is typed to return 'str', so we return a JSON string.
# Ensure 'json' is imported at the module level.
return json.dumps(mock_response_content)
# --- END MOCK LOGIC ---
with _send_lock:
if _provider == "gemini":
return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
elif _provider == "gemini_cli":
return _send_gemini_cli(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
elif _provider == "anthropic":
return _send_anthropic(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
elif _provider == "deepseek":
return _send_deepseek(md_content, user_message, base_dir, file_items, discussion_history, stream=stream, pre_tool_callback=pre_tool_callback, qa_callback=qa_callback)
raise ValueError(f"unknown provider: {_provider}")
print(f"[MOCK AI] Triggered for prompt keyword: {keyword}")
return json.dumps(mock_response_content)
# --- END MOCK LOGIC ---
with _send_lock:
if _provider == "gemini":
return _send_gemini(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
elif _provider == "gemini_cli":
return _send_gemini_cli(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
elif _provider == "anthropic":
return _send_anthropic(md_content, user_message, base_dir, file_items, discussion_history, pre_tool_callback, qa_callback)
elif _provider == "deepseek":
return _send_deepseek(md_content, user_message, base_dir, file_items, discussion_history, stream=stream, pre_tool_callback=pre_tool_callback, qa_callback=qa_callback)
raise ValueError(f"unknown provider: {_provider}")
def get_history_bleed_stats(md_content: str | None = None) -> dict[str, Any]:
"""
Calculates how close the current conversation history is to the token limit.