Files
manual_slop/tests/mock_gemini_cli.py

238 lines
7.6 KiB
Python

import sys
import json
import subprocess
import os
def main() -> None:
sys.stderr.write(f"DEBUG: mock_gemini_cli called with args: {sys.argv}\n")
sys.stderr.write(f"DEBUG: GEMINI_CLI_HOOK_CONTEXT: {os.environ.get('GEMINI_CLI_HOOK_CONTEXT')}\n")
# Read prompt from stdin
try:
prompt = sys.stdin.read()
except EOFError:
prompt = ""
sys.stderr.write(f"DEBUG: Received prompt via stdin ({len(prompt)} chars)\n")
sys.stderr.flush()
# Skip management commands
if len(sys.argv) > 1 and sys.argv[1] in ["mcp", "extensions", "skills", "hooks"]:
return
# Check for multi-round integration test triggers
is_resume = '--resume' in " ".join(sys.argv) or '"role": "tool"' in prompt or '"tool_call_id"' in prompt
is_resume_list = is_resume and 'list_directory' in prompt
is_resume_read = is_resume and 'read_file' in prompt
is_resume_powershell = is_resume and 'run_powershell' in prompt
if 'List the files in the current directory' in prompt or 'List the files' in prompt or is_resume_list:
if not is_resume:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "I will list the files in the current directory."
}), flush=True)
print(json.dumps({
"type": "tool_use",
"name": "list_directory",
"id": "mock-list-dir-call",
"args": {"path": "."}
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 10, "input_tokens": 5, "output_tokens": 5},
"session_id": "mock-session-list-dir"
}), flush=True)
return
else:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "Here are the files in the current directory: aggregate.py, ai_client.py, etc."
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 20, "input_tokens": 10, "output_tokens": 10},
"session_id": "mock-session-list-dir-res"
}), flush=True)
return
if 'Read the first 10 lines' in prompt or is_resume_read:
if not is_resume:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "I will read the first 10 lines of the file."
}), flush=True)
# Extract file name if present
file_path = "aggregate.py"
if "aggregate.py" in prompt: file_path = "aggregate.py"
print(json.dumps({
"type": "tool_use",
"name": "read_file",
"id": "mock-read-file-call",
"args": {"path": file_path, "start_line": 1, "end_line": 10}
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 10, "input_tokens": 5, "output_tokens": 5},
"session_id": "mock-session-read-file"
}), flush=True)
return
else:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "Here are the lines from the file: [Line 1, Line 2...]"
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 20, "input_tokens": 10, "output_tokens": 10},
"session_id": "mock-session-read-file-res"
}), flush=True)
return
if 'Create a hello.ps1 script' in prompt or is_resume_powershell:
if not is_resume:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "I will create the hello.ps1 script."
}), flush=True)
print(json.dumps({
"type": "tool_use",
"name": "run_powershell",
"id": "mock-hello-call",
"args": {"script": "Write-Output 'Simulation Test'"}
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 10, "input_tokens": 5, "output_tokens": 5},
"session_id": "mock-session-hello"
}), flush=True)
return
else:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "Script hello.ps1 created successfully. Output: Simulation Test"
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 20, "input_tokens": 10, "output_tokens": 10},
"session_id": "mock-session-hello-res"
}), flush=True)
return
# Check for specific simulation contexts
# Use the full prompt string since context length can vary depending on history or project state
if 'You are assigned to Ticket' in prompt:
# This is a Tier 3 worker.
pass # Let it fall through to the default mock response
elif 'PATH: Epic Initialization' in prompt:
mock_response = [
{"id": "mock-track-1", "type": "Track", "module": "core", "persona": "Tech Lead", "severity": "Medium", "goal": "Mock Goal 1", "acceptance_criteria": ["criteria 1"], "title": "Mock Goal 1"},
{"id": "mock-track-2", "type": "Track", "module": "ui", "persona": "Frontend Lead", "severity": "Low", "goal": "Mock Goal 2", "acceptance_criteria": ["criteria 2"], "title": "Mock Goal 2"}
]
print(json.dumps({
"type": "message",
"role": "assistant",
"content": json.dumps(mock_response)
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
"session_id": "mock-session-epic"
}), flush=True)
return
elif 'PATH: Sprint Planning' in prompt or 'generate the implementation tickets' in prompt:
mock_response = [
{"id": "mock-ticket-1", "description": "Mock Ticket 1", "status": "todo", "assigned_to": "worker", "depends_on": []},
{"id": "mock-ticket-2", "description": "Mock Ticket 2", "status": "todo", "assigned_to": "worker", "depends_on": ["mock-ticket-1"]}
]
print(json.dumps({
"type": "message",
"role": "assistant",
"content": json.dumps(mock_response)
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
"session_id": "mock-session-sprint"
}), flush=True)
return
if is_resume or 'Perform multi-round tool test' in prompt or 'Please read test.txt' in prompt or 'Deny me' in prompt:
if not is_resume:
# First round: emit tool call
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "I need to check the directory first."
}), flush=True)
print(json.dumps({
"type": "tool_use",
"name": "run_powershell",
"id": "mock-call-1",
"args": {"script": "Get-ChildItem"}
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 10, "input_tokens": 10, "output_tokens": 0},
"session_id": "mock-session-default"
}), flush=True)
return
else:
# Second round
if "USER REJECTED" in prompt:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "Tool execution was denied. I cannot proceed."
}), flush=True)
else:
print(json.dumps({
"type": "message",
"role": "assistant",
"content": "I have processed the tool results and here is the final answer."
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 100, "input_tokens": 80, "output_tokens": 20},
"session_id": "mock-session-final"
}), flush=True)
return
# Default response
content = "I am a mock CLI and I have processed your request."
if 'Acknowledged' in prompt:
content = "Acknowledged."
elif 'What is the current date' in prompt:
content = "The current date is March 1, 2026."
print(json.dumps({
"type": "message",
"role": "assistant",
"content": content
}), flush=True)
print(json.dumps({
"type": "result",
"status": "success",
"stats": {"total_tokens": 50, "input_tokens": 25, "output_tokens": 25},
"session_id": "mock-session-default"
}), flush=True)
if __name__ == "__main__":
main()