Files
manual_slop/tests/mock_gemini_cli.py
Ed_ 0593b289e5 fix(mock): correct sprint ticket format and add keyword detection
- description/status/assigned_to fields now match parse_json_tickets expectations
- Sprint planning branch also detects 'generate the implementation tickets'

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-01 14:21:21 -05:00

92 lines
3.3 KiB
Python

import sys
import json
import subprocess
import os
def main() -> None:
sys.stderr.write(f"DEBUG: mock_gemini_cli called with args: {sys.argv}\n")
sys.stderr.write(f"DEBUG: GEMINI_CLI_HOOK_CONTEXT: {os.environ.get('GEMINI_CLI_HOOK_CONTEXT')}\n")
# Read prompt from stdin
try:
prompt = sys.stdin.read()
except EOFError:
prompt = ""
sys.stderr.write(f"DEBUG: Received prompt via stdin ({len(prompt)} chars)\n")
sys.stderr.flush()
# Skip management commands
if len(sys.argv) > 1 and sys.argv[1] in ["mcp", "extensions", "skills", "hooks"]:
return
# Check for specific simulation contexts
# Use the full prompt string since context length can vary depending on history or project state
if 'You are assigned to Ticket' in prompt:
# This is a Tier 3 worker.
pass # Let it fall through to the default mock response
elif 'PATH: Epic Initialization' in prompt:
mock_response = [
{"id": "mock-track-1", "type": "Track", "module": "core", "persona": "Tech Lead", "severity": "Medium", "goal": "Mock Goal 1", "acceptance_criteria": ["criteria 1"], "title": "Mock Goal 1"},
{"id": "mock-track-2", "type": "Track", "module": "ui", "persona": "Frontend Lead", "severity": "Low", "goal": "Mock Goal 2", "acceptance_criteria": ["criteria 2"], "title": "Mock Goal 2"}
]
print(json.dumps({
"type": "message",
"role": "assistant",
"content": json.dumps(mock_response)
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
"session_id": "mock-session-epic"
}), flush=True)
return
elif 'PATH: Sprint Planning' in prompt or 'generate the implementation tickets' in prompt:
mock_response = [
{"id": "mock-ticket-1", "description": "Mock Ticket 1", "status": "todo", "assigned_to": "worker", "depends_on": []},
{"id": "mock-ticket-2", "description": "Mock Ticket 2", "status": "todo", "assigned_to": "worker", "depends_on": ["mock-ticket-1"]}
]
print(json.dumps({
"type": "message",
"role": "assistant",
"content": json.dumps(mock_response)
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
"session_id": "mock-session-sprint"
}), flush=True)
return
# If the prompt contains tool results, provide final answer
if '"role": "tool"' in prompt or '"tool_call_id"' in prompt:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "SUCCESS: Mock Tier 3 worker implemented the change. [MOCK OUTPUT]"
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 100, "input_tokens": 80, "output_tokens": 20},
"session_id": "mock-session-final"
}), flush=True)
return
# Default flow: simply return a message instead of making a tool call that blocks the test.
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "SUCCESS: Mock Tier 3 worker implemented the change. [MOCK OUTPUT]"
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 10, "input_tokens": 10, "output_tokens": 0},
"session_id": "mock-session-default"
}), flush=True)
if __name__ == "__main__":
main()