130 lines
4.4 KiB
Python
130 lines
4.4 KiB
Python
import sys
|
|
import json
|
|
import subprocess
|
|
import os
|
|
|
|
def main() -> None:
|
|
sys.stderr.write(f"DEBUG: mock_gemini_cli called with args: {sys.argv}\n")
|
|
sys.stderr.write(f"DEBUG: GEMINI_CLI_HOOK_CONTEXT: {os.environ.get('GEMINI_CLI_HOOK_CONTEXT')}\n")
|
|
# Read prompt from stdin
|
|
try:
|
|
prompt = sys.stdin.read()
|
|
except EOFError:
|
|
prompt = ""
|
|
sys.stderr.write(f"DEBUG: Received prompt via stdin ({len(prompt)} chars)\n")
|
|
sys.stderr.flush()
|
|
|
|
# Skip management commands
|
|
if len(sys.argv) > 1 and sys.argv[1] in ["mcp", "extensions", "skills", "hooks"]:
|
|
return
|
|
|
|
# Check for specific simulation contexts
|
|
# Use startswith or check the beginning of the prompt to avoid matching text inside skeletons
|
|
if 'PATH: Epic Initialization' in prompt[:500]:
|
|
mock_response = [
|
|
{"id": "mock-track-1", "type": "Track", "module": "core", "persona": "Tech Lead", "severity": "Medium", "goal": "Mock Goal 1", "acceptance_criteria": ["criteria 1"], "title": "Mock Goal 1"},
|
|
{"id": "mock-track-2", "type": "Track", "module": "ui", "persona": "Frontend Lead", "severity": "Low", "goal": "Mock Goal 2", "acceptance_criteria": ["criteria 2"], "title": "Mock Goal 2"}
|
|
]
|
|
print(json.dumps({
|
|
"type": "message",
|
|
"role": "assistant",
|
|
"content": json.dumps(mock_response)
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
|
|
"session_id": "mock-session-epic"
|
|
}), flush=True)
|
|
return
|
|
|
|
if 'PATH: Sprint Planning' in prompt[:500]:
|
|
mock_response = [
|
|
{"id": "mock-ticket-1", "type": "Ticket", "goal": "Mock Ticket 1", "target_file": "file1.py", "depends_on": [], "context_requirements": "req 1"},
|
|
{"id": "mock-ticket-2", "type": "Ticket", "goal": "Mock Ticket 2", "target_file": "file2.py", "depends_on": ["mock-ticket-1"], "context_requirements": "req 2"}
|
|
]
|
|
print(json.dumps({
|
|
"type": "message",
|
|
"role": "assistant",
|
|
"content": json.dumps(mock_response)
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
|
|
"session_id": "mock-session-sprint"
|
|
}), flush=True)
|
|
return
|
|
|
|
# If the prompt contains tool results, provide final answer
|
|
if '"role": "tool"' in prompt or '"tool_call_id"' in prompt:
|
|
print(json.dumps({
|
|
"type": "message",
|
|
"role": "assistant",
|
|
"content": "SUCCESS: Mock Tier 3 worker implemented the change. [MOCK OUTPUT]"
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 100, "input_tokens": 80, "output_tokens": 20},
|
|
"session_id": "mock-session-final"
|
|
}), flush=True)
|
|
return
|
|
|
|
# Default flow: simulate a tool call
|
|
bridge_path = os.path.abspath("scripts/cli_tool_bridge.py")
|
|
# Using format that bridge understands
|
|
bridge_tool_call = {
|
|
"name": "read_file",
|
|
"input": {"path": "test.txt"}
|
|
}
|
|
sys.stderr.write(f"DEBUG: Calling bridge at {bridge_path}\n")
|
|
sys.stderr.flush()
|
|
try:
|
|
# CRITICAL: Use the current process environment to ensure GEMINI_CLI_HOOK_CONTEXT is passed
|
|
process = subprocess.Popen(
|
|
[sys.executable, bridge_path],
|
|
stdin=subprocess.PIPE,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE,
|
|
text=True,
|
|
env=os.environ
|
|
)
|
|
stdout, stderr = process.communicate(input=json.dumps(bridge_tool_call))
|
|
sys.stderr.write(f"DEBUG: Bridge stdout: {stdout}\n")
|
|
sys.stderr.write(f"DEBUG: Bridge stderr: {stderr}\n")
|
|
decision_data = json.loads(stdout.strip())
|
|
decision = decision_data.get("decision")
|
|
except Exception as e:
|
|
sys.stderr.write(f"DEBUG: Bridge failed: {e}\n")
|
|
decision = "deny"
|
|
if decision == "allow":
|
|
# Simulate REAL CLI field names for adapter normalization test
|
|
print(json.dumps({
|
|
"type": "tool_use",
|
|
"tool_name": "read_file",
|
|
"tool_id": "call_123",
|
|
"parameters": {"path": "test.txt"}
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 50, "input_tokens": 40, "output_tokens": 10},
|
|
"session_id": "mock-session-123"
|
|
}), flush=True)
|
|
else:
|
|
print(json.dumps({
|
|
"type": "message",
|
|
"role": "assistant",
|
|
"content": f"Tool execution was denied. Decision: {decision}"
|
|
}), flush=True)
|
|
print(json.dumps({
|
|
"type": "result",
|
|
"status": "success",
|
|
"stats": {"total_tokens": 10, "input_tokens": 10, "output_tokens": 0},
|
|
"session_id": "mock-session-denied"
|
|
}), flush=True)
|
|
|
|
if __name__ == "__main__":
|
|
main()
|