refactor(indentation): Apply codebase-wide 1-space ultra-compact refactor. Formatted 21 core modules and tests.

This commit is contained in:
2026-02-28 19:36:38 -05:00
parent 8bfc41ddba
commit 173ea96fb4
21 changed files with 917 additions and 930 deletions

View File

@@ -1,21 +1,20 @@
import sys, json, os, subprocess
prompt = sys.stdin.read()
if '"role": "tool"' in prompt:
print(json.dumps({"type": "message", "role": "assistant", "content": "Tool worked!"}), flush=True)
print(json.dumps({"type": "result", "stats": {"total_tokens": 20}}), flush=True)
print(json.dumps({"type": "message", "role": "assistant", "content": "Tool worked!"}), flush=True)
print(json.dumps({"type": "result", "stats": {"total_tokens": 20}}), flush=True)
else:
# We must call the bridge to trigger the GUI approval!
tool_call = {"name": "list_directory", "input": {"dir_path": "."}}
bridge_cmd = [sys.executable, "C:/projects/manual_slop/scripts/cli_tool_bridge.py"]
proc = subprocess.Popen(bridge_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
stdout, _ = proc.communicate(input=json.dumps(tool_call))
# Even if bridge says allow, we emit the tool_use to the adapter
print(json.dumps({"type": "message", "role": "assistant", "content": "I will list the directory."}), flush=True)
print(json.dumps({
"type": "tool_use",
"name": "list_directory",
"id": "alias_call",
"args": {"dir_path": "."}
}), flush=True)
print(json.dumps({"type": "result", "stats": {"total_tokens": 10}}), flush=True)
# We must call the bridge to trigger the GUI approval!
tool_call = {"name": "list_directory", "input": {"dir_path": "."}}
bridge_cmd = [sys.executable, "C:/projects/manual_slop/scripts/cli_tool_bridge.py"]
proc = subprocess.Popen(bridge_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
stdout, _ = proc.communicate(input=json.dumps(tool_call))
# Even if bridge says allow, we emit the tool_use to the adapter
print(json.dumps({"type": "message", "role": "assistant", "content": "I will list the directory."}), flush=True)
print(json.dumps({
"type": "tool_use",
"name": "list_directory",
"id": "alias_call",
"args": {"dir_path": "."}
}), flush=True)
print(json.dumps({"type": "result", "stats": {"total_tokens": 10}}), flush=True)

View File

@@ -4,104 +4,104 @@ from unittest.mock import MagicMock, patch
import ai_client
class MockUsage:
def __init__(self) -> None:
self.prompt_token_count = 10
self.candidates_token_count = 5
self.total_token_count = 15
self.cached_content_token_count = 0
def __init__(self) -> None:
self.prompt_token_count = 10
self.candidates_token_count = 5
self.total_token_count = 15
self.cached_content_token_count = 0
class MockPart:
def __init__(self, text: Any, function_call: Any) -> None:
self.text = text
self.function_call = function_call
def __init__(self, text: Any, function_call: Any) -> None:
self.text = text
self.function_call = function_call
class MockContent:
def __init__(self, parts: Any) -> None:
self.parts = parts
def __init__(self, parts: Any) -> None:
self.parts = parts
class MockCandidate:
def __init__(self, parts: Any) -> None:
self.content = MockContent(parts)
self.finish_reason = MagicMock()
self.finish_reason.name = "STOP"
def __init__(self, parts: Any) -> None:
self.content = MockContent(parts)
self.finish_reason = MagicMock()
self.finish_reason.name = "STOP"
def test_ai_client_event_emitter_exists() -> None:
# This should fail initially because 'events' won't exist on ai_client
assert hasattr(ai_client, 'events')
# This should fail initially because 'events' won't exist on ai_client
assert hasattr(ai_client, 'events')
def test_event_emission() -> None:
callback = MagicMock()
ai_client.events.on("test_event", callback)
ai_client.events.emit("test_event", payload={"data": 123})
callback.assert_called_once_with(payload={"data": 123})
callback = MagicMock()
ai_client.events.on("test_event", callback)
ai_client.events.emit("test_event", payload={"data": 123})
callback.assert_called_once_with(payload={"data": 123})
def test_send_emits_events() -> None:
with patch("ai_client._send_gemini") as mock_send_gemini, \
patch("ai_client._send_anthropic") as mock_send_anthropic:
mock_send_gemini.return_value = "gemini response"
start_callback = MagicMock()
response_callback = MagicMock()
ai_client.events.on("request_start", start_callback)
ai_client.events.on("response_received", response_callback)
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
ai_client.send("context", "message")
# We mocked _send_gemini so it doesn't emit events inside.
# But wait, ai_client.send itself emits request_start and response_received?
# Actually, ai_client.send delegates to _send_gemini.
# Let's mock _gemini_client instead to let _send_gemini run and emit events.
pass
with patch("ai_client._send_gemini") as mock_send_gemini, \
patch("ai_client._send_anthropic") as mock_send_anthropic:
mock_send_gemini.return_value = "gemini response"
start_callback = MagicMock()
response_callback = MagicMock()
ai_client.events.on("request_start", start_callback)
ai_client.events.on("response_received", response_callback)
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
ai_client.send("context", "message")
# We mocked _send_gemini so it doesn't emit events inside.
# But wait, ai_client.send itself emits request_start and response_received?
# Actually, ai_client.send delegates to _send_gemini.
# Let's mock _gemini_client instead to let _send_gemini run and emit events.
pass
def test_send_emits_events_proper() -> None:
with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client") as mock_client:
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
mock_response = MagicMock()
mock_response.candidates = [MockCandidate([MockPart("gemini response", None)])]
mock_response.usage_metadata = MockUsage()
mock_chat.send_message.return_value = mock_response
start_callback = MagicMock()
response_callback = MagicMock()
ai_client.events.on("request_start", start_callback)
ai_client.events.on("response_received", response_callback)
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
ai_client.send("context", "message")
assert start_callback.called
assert response_callback.called
args, kwargs = start_callback.call_args
assert kwargs['payload']['provider'] == 'gemini'
with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client") as mock_client:
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
mock_response = MagicMock()
mock_response.candidates = [MockCandidate([MockPart("gemini response", None)])]
mock_response.usage_metadata = MockUsage()
mock_chat.send_message.return_value = mock_response
start_callback = MagicMock()
response_callback = MagicMock()
ai_client.events.on("request_start", start_callback)
ai_client.events.on("response_received", response_callback)
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
ai_client.send("context", "message")
assert start_callback.called
assert response_callback.called
args, kwargs = start_callback.call_args
assert kwargs['payload']['provider'] == 'gemini'
def test_send_emits_tool_events() -> None:
import mcp_client
with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client") as mock_client, \
patch("mcp_client.dispatch") as mock_dispatch:
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
# 1. Setup mock response with a tool call
mock_fc = MagicMock()
mock_fc.name = "read_file"
mock_fc.args = {"path": "test.txt"}
mock_response_with_tool = MagicMock()
mock_response_with_tool.candidates = [MockCandidate([MockPart("tool call text", mock_fc)])]
mock_response_with_tool.usage_metadata = MockUsage()
# 2. Setup second mock response (final answer)
mock_response_final = MagicMock()
mock_response_final.candidates = [MockCandidate([MockPart("final answer", None)])]
mock_response_final.usage_metadata = MockUsage()
mock_chat.send_message.side_effect = [mock_response_with_tool, mock_response_final]
mock_dispatch.return_value = "file content"
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
tool_callback = MagicMock()
ai_client.events.on("tool_execution", tool_callback)
ai_client.send("context", "message")
# Should be called twice: once for 'started', once for 'completed'
assert tool_callback.call_count == 2
# Check 'started' call
args, kwargs = tool_callback.call_args_list[0]
assert kwargs['payload']['status'] == 'started'
assert kwargs['payload']['tool'] == 'read_file'
# Check 'completed' call
args, kwargs = tool_callback.call_args_list[1]
assert kwargs['payload']['status'] == 'completed'
assert kwargs['payload']['result'] == 'file content'
import mcp_client
with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client") as mock_client, \
patch("mcp_client.dispatch") as mock_dispatch:
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
# 1. Setup mock response with a tool call
mock_fc = MagicMock()
mock_fc.name = "read_file"
mock_fc.args = {"path": "test.txt"}
mock_response_with_tool = MagicMock()
mock_response_with_tool.candidates = [MockCandidate([MockPart("tool call text", mock_fc)])]
mock_response_with_tool.usage_metadata = MockUsage()
# 2. Setup second mock response (final answer)
mock_response_final = MagicMock()
mock_response_final.candidates = [MockCandidate([MockPart("final answer", None)])]
mock_response_final.usage_metadata = MockUsage()
mock_chat.send_message.side_effect = [mock_response_with_tool, mock_response_final]
mock_dispatch.return_value = "file content"
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
tool_callback = MagicMock()
ai_client.events.on("tool_execution", tool_callback)
ai_client.send("context", "message")
# Should be called twice: once for 'started', once for 'completed'
assert tool_callback.call_count == 2
# Check 'started' call
args, kwargs = tool_callback.call_args_list[0]
assert kwargs['payload']['status'] == 'started'
assert kwargs['payload']['tool'] == 'read_file'
# Check 'completed' call
args, kwargs = tool_callback.call_args_list[1]
assert kwargs['payload']['status'] == 'completed'
assert kwargs['payload']['result'] == 'file content'

View File

@@ -5,105 +5,105 @@ import json
import conductor_tech_lead
class TestConductorTechLead(unittest.TestCase):
@patch('ai_client.send')
@patch('ai_client.set_provider')
@patch('ai_client.reset_session')
def test_generate_tickets_success(self, mock_reset_session: Any, mock_set_provider: Any, mock_send: Any) -> None:
mock_tickets = [
{
"id": "ticket_1",
"type": "Ticket",
"goal": "Test goal",
"target_file": "test.py",
"depends_on": [],
"context_requirements": []
}
]
mock_send.return_value = "```json\n" + json.dumps(mock_tickets) + "\n```"
track_brief = "Test track brief"
module_skeletons = "Test skeletons"
# Call the function
tickets = conductor_tech_lead.generate_tickets(track_brief, module_skeletons)
# Verify set_provider was called
mock_set_provider.assert_called_with('gemini', 'gemini-2.5-flash-lite')
mock_reset_session.assert_called_once()
# Verify send was called
mock_send.assert_called_once()
args, kwargs = mock_send.call_args
self.assertEqual(kwargs['md_content'], "")
self.assertIn(track_brief, kwargs['user_message'])
self.assertIn(module_skeletons, kwargs['user_message'])
# Verify tickets were parsed correctly
self.assertEqual(tickets, mock_tickets)
@patch('ai_client.send')
@patch('ai_client.set_provider')
@patch('ai_client.reset_session')
def test_generate_tickets_success(self, mock_reset_session: Any, mock_set_provider: Any, mock_send: Any) -> None:
mock_tickets = [
{
"id": "ticket_1",
"type": "Ticket",
"goal": "Test goal",
"target_file": "test.py",
"depends_on": [],
"context_requirements": []
}
]
mock_send.return_value = "```json\n" + json.dumps(mock_tickets) + "\n```"
track_brief = "Test track brief"
module_skeletons = "Test skeletons"
# Call the function
tickets = conductor_tech_lead.generate_tickets(track_brief, module_skeletons)
# Verify set_provider was called
mock_set_provider.assert_called_with('gemini', 'gemini-2.5-flash-lite')
mock_reset_session.assert_called_once()
# Verify send was called
mock_send.assert_called_once()
args, kwargs = mock_send.call_args
self.assertEqual(kwargs['md_content'], "")
self.assertIn(track_brief, kwargs['user_message'])
self.assertIn(module_skeletons, kwargs['user_message'])
# Verify tickets were parsed correctly
self.assertEqual(tickets, mock_tickets)
@patch('ai_client.send')
@patch('ai_client.set_provider')
@patch('ai_client.reset_session')
def test_generate_tickets_parse_error(self, mock_reset_session: Any, mock_set_provider: Any, mock_send: Any) -> None:
# Setup mock invalid response
mock_send.return_value = "Invalid JSON"
# Call the function
tickets = conductor_tech_lead.generate_tickets("brief", "skeletons")
# Verify it returns an empty list on parse error
self.assertEqual(tickets, [])
@patch('ai_client.send')
@patch('ai_client.set_provider')
@patch('ai_client.reset_session')
def test_generate_tickets_parse_error(self, mock_reset_session: Any, mock_set_provider: Any, mock_send: Any) -> None:
# Setup mock invalid response
mock_send.return_value = "Invalid JSON"
# Call the function
tickets = conductor_tech_lead.generate_tickets("brief", "skeletons")
# Verify it returns an empty list on parse error
self.assertEqual(tickets, [])
class TestTopologicalSort(unittest.TestCase):
def test_topological_sort_empty(self) -> None:
tickets = []
sorted_tickets = conductor_tech_lead.topological_sort(tickets)
self.assertEqual(sorted_tickets, [])
def test_topological_sort_empty(self) -> None:
tickets = []
sorted_tickets = conductor_tech_lead.topological_sort(tickets)
self.assertEqual(sorted_tickets, [])
def test_topological_sort_linear(self) -> None:
tickets = [
{"id": "t2", "depends_on": ["t1"]},
{"id": "t1", "depends_on": []},
{"id": "t3", "depends_on": ["t2"]},
]
sorted_tickets = conductor_tech_lead.topological_sort(tickets)
ids = [t["id"] for t in sorted_tickets]
self.assertEqual(ids, ["t1", "t2", "t3"])
def test_topological_sort_linear(self) -> None:
tickets = [
{"id": "t2", "depends_on": ["t1"]},
{"id": "t1", "depends_on": []},
{"id": "t3", "depends_on": ["t2"]},
]
sorted_tickets = conductor_tech_lead.topological_sort(tickets)
ids = [t["id"] for t in sorted_tickets]
self.assertEqual(ids, ["t1", "t2", "t3"])
def test_topological_sort_complex(self) -> None:
# t1
# | \
# t2 t3
# | /
# t4
tickets = [
{"id": "t4", "depends_on": ["t2", "t3"]},
{"id": "t3", "depends_on": ["t1"]},
{"id": "t2", "depends_on": ["t1"]},
{"id": "t1", "depends_on": []},
]
sorted_tickets = conductor_tech_lead.topological_sort(tickets)
ids = [t["id"] for t in sorted_tickets]
# Possible valid orders: [t1, t2, t3, t4] or [t1, t3, t2, t4]
self.assertEqual(ids[0], "t1")
self.assertEqual(ids[-1], "t4")
self.assertSetEqual(set(ids[1:3]), {"t2", "t3"})
def test_topological_sort_complex(self) -> None:
# t1
# | \
# t2 t3
# | /
# t4
tickets = [
{"id": "t4", "depends_on": ["t2", "t3"]},
{"id": "t3", "depends_on": ["t1"]},
{"id": "t2", "depends_on": ["t1"]},
{"id": "t1", "depends_on": []},
]
sorted_tickets = conductor_tech_lead.topological_sort(tickets)
ids = [t["id"] for t in sorted_tickets]
# Possible valid orders: [t1, t2, t3, t4] or [t1, t3, t2, t4]
self.assertEqual(ids[0], "t1")
self.assertEqual(ids[-1], "t4")
self.assertSetEqual(set(ids[1:3]), {"t2", "t3"})
def test_topological_sort_cycle(self) -> None:
tickets = [
{"id": "t1", "depends_on": ["t2"]},
{"id": "t2", "depends_on": ["t1"]},
]
with self.assertRaises(ValueError) as cm:
conductor_tech_lead.topological_sort(tickets)
self.assertIn("Circular dependency detected", str(cm.exception))
def test_topological_sort_cycle(self) -> None:
tickets = [
{"id": "t1", "depends_on": ["t2"]},
{"id": "t2", "depends_on": ["t1"]},
]
with self.assertRaises(ValueError) as cm:
conductor_tech_lead.topological_sort(tickets)
self.assertIn("Circular dependency detected", str(cm.exception))
def test_topological_sort_missing_dependency(self) -> None:
# If a ticket depends on something not in the list, we should probably handle it or let it fail.
# Usually in our context, we only care about dependencies within the same track.
tickets = [
{"id": "t1", "depends_on": ["missing"]},
]
# For now, let's assume it should raise an error if a dependency is missing within the set we are sorting,
# OR it should just treat it as "ready" if it's external?
# Actually, let's just test that it doesn't crash if it's not a cycle.
# But if 'missing' is not in tickets, it will never be satisfied.
# Let's say it raises ValueError for missing internal dependencies.
with self.assertRaises(ValueError):
conductor_tech_lead.topological_sort(tickets)
def test_topological_sort_missing_dependency(self) -> None:
# If a ticket depends on something not in the list, we should probably handle it or let it fail.
# Usually in our context, we only care about dependencies within the same track.
tickets = [
{"id": "t1", "depends_on": ["missing"]},
]
# For now, let's assume it should raise an error if a dependency is missing within the set we are sorting,
# OR it should just treat it as "ready" if it's external?
# Actually, let's just test that it doesn't crash if it's not a cycle.
# But if 'missing' is not in tickets, it will never be satisfied.
# Let's say it raises ValueError for missing internal dependencies.
with self.assertRaises(ValueError):
conductor_tech_lead.topological_sort(tickets)
if __name__ == '__main__':
unittest.main()
unittest.main()

View File

@@ -13,105 +13,105 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from gemini_cli_adapter import GeminiCliAdapter
class TestGeminiCliAdapter(unittest.TestCase):
def setUp(self) -> None:
self.adapter = GeminiCliAdapter(binary_path="gemini")
def setUp(self) -> None:
self.adapter = GeminiCliAdapter(binary_path="gemini")
@patch('subprocess.Popen')
def test_send_starts_subprocess_with_correct_args(self, mock_popen: Any) -> None:
"""
@patch('subprocess.Popen')
def test_send_starts_subprocess_with_correct_args(self, mock_popen: Any) -> None:
"""
Verify that send(message) correctly starts the subprocess with
--output-format stream-json and the provided message via stdin using communicate.
"""
# Setup mock process with a minimal valid JSONL termination
process_mock = MagicMock()
stdout_content = json.dumps({"type": "result", "usage": {}}) + "\n"
process_mock.communicate.return_value = (stdout_content, "")
process_mock.poll.return_value = 0
process_mock.wait.return_value = 0
mock_popen.return_value = process_mock
message = "Hello Gemini CLI"
self.adapter.send(message)
# Verify subprocess.Popen call
mock_popen.assert_called_once()
args, kwargs = mock_popen.call_args
cmd = args[0]
# Check mandatory CLI components
self.assertIn("gemini", cmd)
self.assertIn("--output-format", cmd)
self.assertIn("stream-json", cmd)
# Message should NOT be in cmd now
self.assertNotIn(message, cmd)
# Verify message was sent via communicate
process_mock.communicate.assert_called_once_with(input=message)
# Check process configuration
self.assertEqual(kwargs.get('stdout'), subprocess.PIPE)
self.assertEqual(kwargs.get('stdin'), subprocess.PIPE)
self.assertEqual(kwargs.get('text'), True)
# Setup mock process with a minimal valid JSONL termination
process_mock = MagicMock()
stdout_content = json.dumps({"type": "result", "usage": {}}) + "\n"
process_mock.communicate.return_value = (stdout_content, "")
process_mock.poll.return_value = 0
process_mock.wait.return_value = 0
mock_popen.return_value = process_mock
message = "Hello Gemini CLI"
self.adapter.send(message)
# Verify subprocess.Popen call
mock_popen.assert_called_once()
args, kwargs = mock_popen.call_args
cmd = args[0]
# Check mandatory CLI components
self.assertIn("gemini", cmd)
self.assertIn("--output-format", cmd)
self.assertIn("stream-json", cmd)
# Message should NOT be in cmd now
self.assertNotIn(message, cmd)
# Verify message was sent via communicate
process_mock.communicate.assert_called_once_with(input=message)
# Check process configuration
self.assertEqual(kwargs.get('stdout'), subprocess.PIPE)
self.assertEqual(kwargs.get('stdin'), subprocess.PIPE)
self.assertEqual(kwargs.get('text'), True)
@patch('subprocess.Popen')
def test_send_parses_jsonl_output(self, mock_popen: Any) -> None:
"""
@patch('subprocess.Popen')
def test_send_parses_jsonl_output(self, mock_popen: Any) -> None:
"""
Verify that it correctly parses multiple JSONL 'message' events
and returns the combined text.
"""
jsonl_output = [
json.dumps({"type": "message", "role": "model", "text": "The quick brown "}),
json.dumps({"type": "message", "role": "model", "text": "fox jumps."}),
json.dumps({"type": "result", "usage": {"prompt_tokens": 5, "candidates_tokens": 5}})
]
stdout_content = "\n".join(jsonl_output) + "\n"
process_mock = MagicMock()
process_mock.communicate.return_value = (stdout_content, "")
process_mock.poll.return_value = 0
process_mock.wait.return_value = 0
mock_popen.return_value = process_mock
result = self.adapter.send("test message")
self.assertEqual(result["text"], "The quick brown fox jumps.")
self.assertEqual(result["tool_calls"], [])
jsonl_output = [
json.dumps({"type": "message", "role": "model", "text": "The quick brown "}),
json.dumps({"type": "message", "role": "model", "text": "fox jumps."}),
json.dumps({"type": "result", "usage": {"prompt_tokens": 5, "candidates_tokens": 5}})
]
stdout_content = "\n".join(jsonl_output) + "\n"
process_mock = MagicMock()
process_mock.communicate.return_value = (stdout_content, "")
process_mock.poll.return_value = 0
process_mock.wait.return_value = 0
mock_popen.return_value = process_mock
result = self.adapter.send("test message")
self.assertEqual(result["text"], "The quick brown fox jumps.")
self.assertEqual(result["tool_calls"], [])
@patch('subprocess.Popen')
def test_send_handles_tool_use_events(self, mock_popen: Any) -> None:
"""
@patch('subprocess.Popen')
def test_send_handles_tool_use_events(self, mock_popen: Any) -> None:
"""
Verify that it correctly handles 'tool_use' events in the stream
by continuing to read until the final 'result' event.
"""
jsonl_output = [
json.dumps({"type": "message", "role": "assistant", "text": "Calling tool..."}),
json.dumps({"type": "tool_use", "name": "read_file", "args": {"path": "test.txt"}}),
json.dumps({"type": "message", "role": "assistant", "text": "\nFile read successfully."}),
json.dumps({"type": "result", "usage": {}})
]
stdout_content = "\n".join(jsonl_output) + "\n"
process_mock = MagicMock()
process_mock.communicate.return_value = (stdout_content, "")
process_mock.poll.return_value = 0
process_mock.wait.return_value = 0
mock_popen.return_value = process_mock
result = self.adapter.send("read test.txt")
# Result should contain the combined text from all 'message' events
self.assertEqual(result["text"], "Calling tool...\nFile read successfully.")
self.assertEqual(len(result["tool_calls"]), 1)
self.assertEqual(result["tool_calls"][0]["name"], "read_file")
jsonl_output = [
json.dumps({"type": "message", "role": "assistant", "text": "Calling tool..."}),
json.dumps({"type": "tool_use", "name": "read_file", "args": {"path": "test.txt"}}),
json.dumps({"type": "message", "role": "assistant", "text": "\nFile read successfully."}),
json.dumps({"type": "result", "usage": {}})
]
stdout_content = "\n".join(jsonl_output) + "\n"
process_mock = MagicMock()
process_mock.communicate.return_value = (stdout_content, "")
process_mock.poll.return_value = 0
process_mock.wait.return_value = 0
mock_popen.return_value = process_mock
result = self.adapter.send("read test.txt")
# Result should contain the combined text from all 'message' events
self.assertEqual(result["text"], "Calling tool...\nFile read successfully.")
self.assertEqual(len(result["tool_calls"]), 1)
self.assertEqual(result["tool_calls"][0]["name"], "read_file")
@patch('subprocess.Popen')
def test_send_captures_usage_metadata(self, mock_popen: Any) -> None:
"""
@patch('subprocess.Popen')
def test_send_captures_usage_metadata(self, mock_popen: Any) -> None:
"""
Verify that usage data is extracted from the 'result' event.
"""
usage_data = {"total_tokens": 42}
jsonl_output = [
json.dumps({"type": "message", "text": "Finalizing"}),
json.dumps({"type": "result", "usage": usage_data})
]
stdout_content = "\n".join(jsonl_output) + "\n"
process_mock = MagicMock()
process_mock.communicate.return_value = (stdout_content, "")
process_mock.poll.return_value = 0
process_mock.wait.return_value = 0
mock_popen.return_value = process_mock
self.adapter.send("usage test")
# Verify the usage was captured in the adapter instance
self.assertEqual(self.adapter.last_usage, usage_data)
usage_data = {"total_tokens": 42}
jsonl_output = [
json.dumps({"type": "message", "text": "Finalizing"}),
json.dumps({"type": "result", "usage": usage_data})
]
stdout_content = "\n".join(jsonl_output) + "\n"
process_mock = MagicMock()
process_mock.communicate.return_value = (stdout_content, "")
process_mock.poll.return_value = 0
process_mock.wait.return_value = 0
mock_popen.return_value = process_mock
self.adapter.send("usage test")
# Verify the usage was captured in the adapter instance
self.assertEqual(self.adapter.last_usage, usage_data)
if __name__ == '__main__':
unittest.main()
unittest.main()

View File

@@ -7,67 +7,67 @@ import mma_prompts
class TestOrchestratorPM(unittest.TestCase):
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_success(self, mock_send: Any, mock_summarize: Any) -> None:
# Setup mocks
mock_summarize.return_value = "REPO_MAP_CONTENT"
mock_response_data = [
{
"id": "track_1",
"type": "Track",
"module": "test_module",
"persona": "Tech Lead",
"severity": "Medium",
"goal": "Test goal",
"acceptance_criteria": ["criteria 1"]
}
]
mock_send.return_value = json.dumps(mock_response_data)
user_request = "Implement unit tests"
project_config = {"files": {"paths": ["src"]}}
file_items = [{"path": "src/main.py", "content": "print('hello')"}]
# Execute
result = orchestrator_pm.generate_tracks(user_request, project_config, file_items)
# Verify summarize call
mock_summarize.assert_called_once_with(file_items)
# Verify ai_client.send call
expected_system_prompt = mma_prompts.PROMPTS['tier1_epic_init']
mock_send.assert_called_once()
args, kwargs = mock_send.call_args
self.assertEqual(kwargs['md_content'], "")
# Cannot check system_prompt via mock_send kwargs anymore as it's set globally
# But we can verify user_message was passed
self.assertIn(user_request, kwargs['user_message'])
self.assertIn("REPO_MAP_CONTENT", kwargs['user_message'])
# Verify result
self.assertEqual(result[0]['id'], mock_response_data[0]['id'])
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_success(self, mock_send: Any, mock_summarize: Any) -> None:
# Setup mocks
mock_summarize.return_value = "REPO_MAP_CONTENT"
mock_response_data = [
{
"id": "track_1",
"type": "Track",
"module": "test_module",
"persona": "Tech Lead",
"severity": "Medium",
"goal": "Test goal",
"acceptance_criteria": ["criteria 1"]
}
]
mock_send.return_value = json.dumps(mock_response_data)
user_request = "Implement unit tests"
project_config = {"files": {"paths": ["src"]}}
file_items = [{"path": "src/main.py", "content": "print('hello')"}]
# Execute
result = orchestrator_pm.generate_tracks(user_request, project_config, file_items)
# Verify summarize call
mock_summarize.assert_called_once_with(file_items)
# Verify ai_client.send call
expected_system_prompt = mma_prompts.PROMPTS['tier1_epic_init']
mock_send.assert_called_once()
args, kwargs = mock_send.call_args
self.assertEqual(kwargs['md_content'], "")
# Cannot check system_prompt via mock_send kwargs anymore as it's set globally
# But we can verify user_message was passed
self.assertIn(user_request, kwargs['user_message'])
self.assertIn("REPO_MAP_CONTENT", kwargs['user_message'])
# Verify result
self.assertEqual(result[0]['id'], mock_response_data[0]['id'])
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_markdown_wrapped(self, mock_send: Any, mock_summarize: Any) -> None:
mock_summarize.return_value = "REPO_MAP"
mock_response_data = [{"id": "track_1"}]
expected_result = [{"id": "track_1", "title": "Untitled Track"}]
# Wrapped in ```json ... ```
mock_send.return_value = f"Here is the plan:\n```json\n{json.dumps(mock_response_data)}\n```\nHope this helps."
result = orchestrator_pm.generate_tracks("req", {}, [])
self.assertEqual(result, expected_result)
# Wrapped in ``` ... ```
mock_send.return_value = f"```\n{json.dumps(mock_response_data)}\n```"
result = orchestrator_pm.generate_tracks("req", {}, [])
self.assertEqual(result, expected_result)
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_markdown_wrapped(self, mock_send: Any, mock_summarize: Any) -> None:
mock_summarize.return_value = "REPO_MAP"
mock_response_data = [{"id": "track_1"}]
expected_result = [{"id": "track_1", "title": "Untitled Track"}]
# Wrapped in ```json ... ```
mock_send.return_value = f"Here is the plan:\n```json\n{json.dumps(mock_response_data)}\n```\nHope this helps."
result = orchestrator_pm.generate_tracks("req", {}, [])
self.assertEqual(result, expected_result)
# Wrapped in ``` ... ```
mock_send.return_value = f"```\n{json.dumps(mock_response_data)}\n```"
result = orchestrator_pm.generate_tracks("req", {}, [])
self.assertEqual(result, expected_result)
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_malformed_json(self, mock_send: Any, mock_summarize: Any) -> None:
mock_summarize.return_value = "REPO_MAP"
mock_send.return_value = "NOT A JSON"
# Should return empty list and print error (we can mock print if we want to be thorough)
with patch('builtins.print') as mock_print:
result = orchestrator_pm.generate_tracks("req", {}, [])
self.assertEqual(result, [])
mock_print.assert_any_call("Error parsing Tier 1 response: Expecting value: line 1 column 1 (char 0)")
@patch('summarize.build_summary_markdown')
@patch('ai_client.send')
def test_generate_tracks_malformed_json(self, mock_send: Any, mock_summarize: Any) -> None:
mock_summarize.return_value = "REPO_MAP"
mock_send.return_value = "NOT A JSON"
# Should return empty list and print error (we can mock print if we want to be thorough)
with patch('builtins.print') as mock_print:
result = orchestrator_pm.generate_tracks("req", {}, [])
self.assertEqual(result, [])
mock_print.assert_any_call("Error parsing Tier 1 response: Expecting value: line 1 column 1 (char 0)")
if __name__ == '__main__':
unittest.main()
unittest.main()

View File

@@ -10,7 +10,7 @@ import session_logger
@pytest.fixture
def temp_logs(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Generator[Path, None, None]:
# Ensure closed before starting
# Ensure closed before starting
session_logger.close_session()
monkeypatch.setattr(session_logger, "_comms_fh", None)
# Mock _LOG_DIR in session_logger

View File

@@ -15,15 +15,14 @@ def test_api_ask_client_method(live_gui) -> None:
def make_blocking_request() -> None:
try:
# This call should block until we respond
# This call should block until we respond
results["response"] = client.request_confirmation(
tool_name="powershell",
args={"command": "echo hello"}
)
except Exception as e:
results["error"] = str(e)
# Start the request in a background thread
# Start the request in a background thread
t = threading.Thread(target=make_blocking_request)
t.start()
# Poll for the 'ask_received' event

View File

@@ -14,7 +14,7 @@ from api_hook_client import ApiHookClient
class TestMMAGUIRobust(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
# 1. Launch gui_2.py with --enable-test-hooks
# 1. Launch gui_2.py with --enable-test-hooks
cls.gui_command = [sys.executable, "gui_2.py", "--enable-test-hooks"]
print(f"Launching GUI: {' '.join(cls.gui_command)}")
cls.gui_process = subprocess.Popen(