import sys import json import subprocess import os def main() -> None: sys.stderr.write(f"DEBUG: mock_gemini_cli called with args: {sys.argv}\n") sys.stderr.write(f"DEBUG: GEMINI_CLI_HOOK_CONTEXT: {os.environ.get('GEMINI_CLI_HOOK_CONTEXT')}\n") # Read prompt from stdin try: prompt = sys.stdin.read() except EOFError: prompt = "" sys.stderr.write(f"DEBUG: Received prompt via stdin ({len(prompt)} chars)\n") sys.stderr.flush() # Skip management commands if len(sys.argv) > 1 and sys.argv[1] in ["mcp", "extensions", "skills", "hooks"]: return # Check for specific simulation contexts # Use the full prompt string since context length can vary depending on history or project state if 'You are assigned to Ticket' in prompt: # This is a Tier 3 worker. pass # Let it fall through to the default mock response elif 'PATH: Epic Initialization' in prompt: mock_response = [ {"id": "mock-track-1", "type": "Track", "module": "core", "persona": "Tech Lead", "severity": "Medium", "goal": "Mock Goal 1", "acceptance_criteria": ["criteria 1"], "title": "Mock Goal 1"}, {"id": "mock-track-2", "type": "Track", "module": "ui", "persona": "Frontend Lead", "severity": "Low", "goal": "Mock Goal 2", "acceptance_criteria": ["criteria 2"], "title": "Mock Goal 2"} ] print(json.dumps({ "type": "message", "role": "assistant", "content": json.dumps(mock_response) }), flush=True) print(json.dumps({ "type": "result", "status": "success", "stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50}, "session_id": "mock-session-epic" }), flush=True) return elif 'PATH: Sprint Planning' in prompt or 'generate the implementation tickets' in prompt: mock_response = [ {"id": "mock-ticket-1", "description": "Mock Ticket 1", "status": "todo", "assigned_to": "worker", "depends_on": []}, {"id": "mock-ticket-2", "description": "Mock Ticket 2", "status": "todo", "assigned_to": "worker", "depends_on": ["mock-ticket-1"]} ] print(json.dumps({ "type": "message", "role": "assistant", "content": json.dumps(mock_response) }), flush=True) print(json.dumps({ "type": "result", "status": "success", "stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50}, "session_id": "mock-session-sprint" }), flush=True) return # If the prompt contains tool results, provide final answer if '"role": "tool"' in prompt or '"tool_call_id"' in prompt: print(json.dumps({ "type": "message", "role": "assistant", "content": "I have processed the tool results and here is the final answer." }), flush=True) print(json.dumps({ "type": "result", "status": "success", "stats": {"total_tokens": 100, "input_tokens": 80, "output_tokens": 20}, "session_id": "mock-session-final" }), flush=True) return # Default flow: emit a tool call to test multi-round looping print(json.dumps({ "type": "message", "role": "assistant", "content": "I need to check the directory first." }), flush=True) print(json.dumps({ "type": "tool_use", "name": "list_directory", "id": "mock-call-1", "args": {"dir_path": "."} }), flush=True) print(json.dumps({ "type": "result", "status": "success", "stats": {"total_tokens": 10, "input_tokens": 10, "output_tokens": 0}, "session_id": "mock-session-default" }), flush=True) if __name__ == "__main__": main()