checkpoint: this is a mess... need to define stricter DSL or system for how the AI devices sims and hookup api for tests.
This commit is contained in:
@@ -8,21 +8,60 @@ def main() -> None:
|
||||
sys.stderr.write(f"DEBUG: GEMINI_CLI_HOOK_CONTEXT: {os.environ.get('GEMINI_CLI_HOOK_CONTEXT')}\n")
|
||||
# Read prompt from stdin
|
||||
try:
|
||||
# On Windows, stdin might be closed or behave weirdly if not handled
|
||||
prompt = sys.stdin.read()
|
||||
except EOFError:
|
||||
prompt = ""
|
||||
sys.stderr.write(f"DEBUG: Received prompt via stdin ({len(prompt)} chars)\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
# Skip management commands
|
||||
if len(sys.argv) > 1 and sys.argv[1] in ["mcp", "extensions", "skills", "hooks"]:
|
||||
return
|
||||
# If the prompt contains tool results, provide final answer
|
||||
|
||||
# Check for specific simulation contexts
|
||||
# Use startswith or check the beginning of the prompt to avoid matching text inside skeletons
|
||||
if 'PATH: Epic Initialization' in prompt[:500]:
|
||||
mock_response = [
|
||||
{"id": "mock-track-1", "type": "Track", "module": "core", "persona": "Tech Lead", "severity": "Medium", "goal": "Mock Goal 1", "acceptance_criteria": ["criteria 1"], "title": "Mock Goal 1"},
|
||||
{"id": "mock-track-2", "type": "Track", "module": "ui", "persona": "Frontend Lead", "severity": "Low", "goal": "Mock Goal 2", "acceptance_criteria": ["criteria 2"], "title": "Mock Goal 2"}
|
||||
]
|
||||
print(json.dumps({
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": json.dumps(mock_response)
|
||||
}), flush=True)
|
||||
print(json.dumps({
|
||||
"type": "result",
|
||||
"status": "success",
|
||||
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
|
||||
"session_id": "mock-session-epic"
|
||||
}), flush=True)
|
||||
return
|
||||
|
||||
if 'PATH: Sprint Planning' in prompt[:500]:
|
||||
mock_response = [
|
||||
{"id": "mock-ticket-1", "type": "Ticket", "goal": "Mock Ticket 1", "target_file": "file1.py", "depends_on": [], "context_requirements": "req 1"},
|
||||
{"id": "mock-ticket-2", "type": "Ticket", "goal": "Mock Ticket 2", "target_file": "file2.py", "depends_on": ["mock-ticket-1"], "context_requirements": "req 2"}
|
||||
]
|
||||
print(json.dumps({
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": json.dumps(mock_response)
|
||||
}), flush=True)
|
||||
print(json.dumps({
|
||||
"type": "result",
|
||||
"status": "success",
|
||||
"stats": {"total_tokens": 100, "input_tokens": 50, "output_tokens": 50},
|
||||
"session_id": "mock-session-sprint"
|
||||
}), flush=True)
|
||||
return
|
||||
|
||||
# If the prompt contains tool results, provide final answer
|
||||
if '"role": "tool"' in prompt or '"tool_call_id"' in prompt:
|
||||
print(json.dumps({
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": "I have processed the tool results. Everything looks good!"
|
||||
"content": "SUCCESS: Mock Tier 3 worker implemented the change. [MOCK OUTPUT]"
|
||||
}), flush=True)
|
||||
print(json.dumps({
|
||||
"type": "result",
|
||||
@@ -31,7 +70,8 @@ def main() -> None:
|
||||
"session_id": "mock-session-final"
|
||||
}), flush=True)
|
||||
return
|
||||
# Default flow: simulate a tool call
|
||||
|
||||
# Default flow: simulate a tool call
|
||||
bridge_path = os.path.abspath("scripts/cli_tool_bridge.py")
|
||||
# Using format that bridge understands
|
||||
bridge_tool_call = {
|
||||
@@ -66,11 +106,6 @@ def main() -> None:
|
||||
"tool_id": "call_123",
|
||||
"parameters": {"path": "test.txt"}
|
||||
}), flush=True)
|
||||
print(json.dumps({
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"content": "I am reading the file now..."
|
||||
}), flush=True)
|
||||
print(json.dumps({
|
||||
"type": "result",
|
||||
"status": "success",
|
||||
|
||||
@@ -22,7 +22,7 @@ base_dir = "."
|
||||
paths = []
|
||||
|
||||
[gemini_cli]
|
||||
binary_path = "gemini"
|
||||
binary_path = "C:\\projects\\manual_slop\\.venv\\Scripts\\python.exe C:\\projects\\manual_slop\\tests\\mock_gemini_cli.py"
|
||||
|
||||
[deepseek]
|
||||
reasoning_effort = "medium"
|
||||
@@ -40,27 +40,3 @@ fetch_url = true
|
||||
epic = "Develop a new feature"
|
||||
active_track_id = ""
|
||||
tracks = []
|
||||
|
||||
[mma.active_track]
|
||||
id = "track_024370f1b453"
|
||||
description = "Mock Goal 1"
|
||||
|
||||
[[mma.active_track.tickets]]
|
||||
id = "mock-ticket-1"
|
||||
description = "Mock Ticket 1"
|
||||
status = "todo"
|
||||
assigned_to = "unassigned"
|
||||
context_requirements = []
|
||||
depends_on = []
|
||||
step_mode = false
|
||||
|
||||
[[mma.active_track.tickets]]
|
||||
id = "mock-ticket-2"
|
||||
description = "Mock Ticket 2"
|
||||
status = "todo"
|
||||
assigned_to = "unassigned"
|
||||
context_requirements = []
|
||||
depends_on = [
|
||||
"mock-ticket-1",
|
||||
]
|
||||
step_mode = false
|
||||
|
||||
@@ -10,7 +10,7 @@ auto_add = true
|
||||
|
||||
[discussions.main]
|
||||
git_commit = ""
|
||||
last_updated = "2026-02-28T22:11:24"
|
||||
last_updated = "2026-02-28T22:41:40"
|
||||
history = [
|
||||
"@2026-02-28T22:02:40\nSystem:\n[PERFORMANCE ALERT] CPU usage high: 83.5%. Please consider optimizing recent changes or reducing load.",
|
||||
"@2026-02-28T22:03:10\nSystem:\n[PERFORMANCE ALERT] CPU usage high: 103.9%. Please consider optimizing recent changes or reducing load.",
|
||||
|
||||
@@ -17,11 +17,14 @@ def test_mma_complete_lifecycle(live_gui) -> None:
|
||||
client = ApiHookClient()
|
||||
assert client.wait_for_server(timeout=10)
|
||||
|
||||
# 1. Set model to 'mock'.
|
||||
# 1. Set up the mock CLI provider
|
||||
try:
|
||||
client.set_value('current_model', 'mock')
|
||||
client.set_value('current_provider', 'gemini_cli')
|
||||
# Point the CLI adapter to our mock script
|
||||
mock_cli_path = f'{sys.executable} {os.path.abspath("tests/mock_gemini_cli.py")}'
|
||||
client.set_value('gcli_path', mock_cli_path)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Failed to set model to 'mock': {e}")
|
||||
pytest.fail(f"Failed to set up mock provider: {e}")
|
||||
|
||||
# 2. Enter epic and click 'Plan Epic'.
|
||||
client.set_value('mma_epic_input', 'Develop a new feature')
|
||||
@@ -136,19 +139,30 @@ def test_mma_complete_lifecycle(live_gui) -> None:
|
||||
|
||||
# 8. Verify 'active_tier' change and output in 'mma_streams'.
|
||||
streams_found = False
|
||||
for _ in range(30):
|
||||
for _ in range(60): # Give it more time for the worker to spawn and respond
|
||||
status = client.get_mma_status()
|
||||
streams = status.get('mma_streams', {})
|
||||
if streams and any("Tier 3" in k for k in streams.keys()):
|
||||
print(f"[SIM] Found Tier 3 worker output in streams: {list(streams.keys())}")
|
||||
streams_found = True
|
||||
break
|
||||
# Keep approving if needed
|
||||
|
||||
# Handle approvals if they pop up during worker execution
|
||||
if status and status.get('pending_spawn') is True:
|
||||
print('[SIM] Worker spawn required. Clicking btn_approve_spawn...')
|
||||
client.click('btn_approve_spawn')
|
||||
elif status and status.get('pending_approval') is True:
|
||||
print('[SIM] Tool approval required. Clicking btn_approve_tool...')
|
||||
client.click('btn_approve_tool')
|
||||
|
||||
streams = status.get('mma_streams', {})
|
||||
print(f"Polling streams: {list(streams.keys())}")
|
||||
|
||||
if streams and any("Tier 3" in k for k in streams.keys()):
|
||||
print(f"[SIM] Found Tier 3 worker output in streams: {list(streams.keys())}")
|
||||
# Check for our specific mock content
|
||||
tier3_key = [k for k in streams.keys() if "Tier 3" in k][0]
|
||||
if "SUCCESS: Mock Tier 3 worker" in streams[tier3_key]:
|
||||
print("[SIM] Verified mock worker output content.")
|
||||
streams_found = True
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
assert streams_found or 'Tier 1' in status.get('mma_streams', {}), "No output found in 'mma_streams'."
|
||||
assert streams_found, "No Tier 3 mock output found in 'mma_streams'."
|
||||
print("MMA complete lifecycle simulation successful.")
|
||||
|
||||
Reference in New Issue
Block a user