Three independent root causes fixed: - gui_2.py: Route mma_spawn_approval/mma_step_approval events in _process_event_queue - multi_agent_conductor.py: Pass asyncio loop from ConductorEngine.run() through to thread-pool workers for thread-safe event queue access; add _queue_put helper - ai_client.py: Preserve GeminiCliAdapter in reset_session() instead of nulling it Test: visual_sim_mma_v2::test_mma_complete_lifecycle passes in ~8s Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
92 lines
3.3 KiB
Python
92 lines
3.3 KiB
Python
import sys
|
|
import json
|
|
import subprocess
|
|
import os
|
|
|
|
def main() -> None:
|
|
sys.stderr.write(f"DEBUG: mock_gemini_cli called with args: {sys.argv}\n")
|
|
sys.stderr.write(f"DEBUG: GEMINI_CLI_HOOK_CONTEXT: {os.environ.get('GEMINI_CLI_HOOK_CONTEXT')}\n")
|
|
# Read prompt from stdin
|
|
try:
|
|
prompt = sys.stdin.read()
|
|
except EOFError:
|
|
prompt = ""
|
|
sys.stderr.write(f"DEBUG: Received prompt via stdin ({len(prompt)} chars)\n")
|
|
sys.stderr.flush()
|
|
|
|
# Skip management commands
|
|
if len(sys.argv) > 1 and sys.argv[1] in ["mcp", "extensions", "skills", "hooks"]:
|
|
return
|
|
|
|
# Check for specific simulation contexts
|
|
# Use the full prompt string since context length can vary depending on history or project state
|
|
if 'You are assigned to Ticket' in prompt:
|
|
# This is a Tier 3 worker.
|
|
pass # Let it fall through to the default mock response
|
|
|
|
elif 'PATH: Epic Initialization' in prompt:
|
|
mock_response = [
|
|
{"id": "mock-track-1", "type": "Track", "module": "core", "persona": "Tech Lead", "severity": "Medium", "goal": "Mock Goal 1", "acceptance_criteria": ["criteria 1"], "title": "Mock Goal 1"},
|
|
{"id": "mock-track-2", "type": "Track", "module": "ui", "persona": "Frontend Lead", "severity": "Low", "goal": "Mock Goal 2", "acceptance_criteria": ["criteria 2"], "title": "Mock Goal 2"}
|
|
]
|
|
print(json.dumps({
|
|
"type": "message",
|
|
"role": "assistant",
|
|
"content": json.dumps(mock_response)
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
|
|
"session_id": "mock-session-epic"
|
|
}), flush=True)
|
|
return
|
|
|
|
elif 'PATH: Sprint Planning' in prompt:
|
|
mock_response = [
|
|
{"id": "mock-ticket-1", "type": "Ticket", "goal": "Mock Ticket 1", "target_file": "file1.py", "depends_on": [], "context_requirements": "req 1"},
|
|
{"id": "mock-ticket-2", "type": "Ticket", "goal": "Mock Ticket 2", "target_file": "file2.py", "depends_on": ["mock-ticket-1"], "context_requirements": "req 2"}
|
|
]
|
|
print(json.dumps({
|
|
"type": "message",
|
|
"role": "assistant",
|
|
"content": json.dumps(mock_response)
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
|
|
"session_id": "mock-session-sprint"
|
|
}), flush=True)
|
|
return
|
|
|
|
# If the prompt contains tool results, provide final answer
|
|
if '"role": "tool"' in prompt or '"tool_call_id"' in prompt:
|
|
print(json.dumps({
|
|
"type": "message",
|
|
"role": "assistant",
|
|
"content": "SUCCESS: Mock Tier 3 worker implemented the change. [MOCK OUTPUT]"
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 100, "input_tokens": 80, "output_tokens": 20},
|
|
"session_id": "mock-session-final"
|
|
}), flush=True)
|
|
return
|
|
|
|
# Default flow: simply return a message instead of making a tool call that blocks the test.
|
|
print(json.dumps({
|
|
"type": "message",
|
|
"role": "assistant",
|
|
"content": "SUCCESS: Mock Tier 3 worker implemented the change. [MOCK OUTPUT]"
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 10, "input_tokens": 10, "output_tokens": 0},
|
|
"session_id": "mock-session-default"
|
|
}), flush=True)
|
|
if __name__ == "__main__":
|
|
main()
|