Compare commits

..

3 Commits

4 changed files with 185 additions and 173 deletions

View File

@@ -8,5 +8,5 @@ active = "main"
[discussions.main]
git_commit = ""
last_updated = "2026-03-05T17:26:48"
last_updated = "2026-03-05T19:00:38"
history = []

View File

@@ -2,95 +2,106 @@ from typing import Any
from unittest.mock import MagicMock, patch
from src import ai_client
class MockUsage:
def __init__(self) -> None:
self.prompt_token_count = 10
self.candidates_token_count = 5
self.total_token_count = 15
self.cached_content_token_count = 0
def __init__(self) -> None:
self.prompt_token_count = 10
self.candidates_token_count = 5
self.total_token_count = 15
self.cached_content_token_count = 0
class MockPart:
def __init__(self, text: Any, function_call: Any) -> None:
self.text = text
self.function_call = function_call
def __init__(self, text: Any, function_call: Any) -> None:
self.text = text
self.function_call = function_call
class MockContent:
def __init__(self, parts: Any) -> None:
self.parts = parts
def __init__(self, parts: Any) -> None:
self.parts = parts
class MockCandidate:
def __init__(self, parts: Any) -> None:
self.content = MockContent(parts)
self.finish_reason = MagicMock()
self.finish_reason.name = "STOP"
def __init__(self, parts: Any) -> None:
self.content = MockContent(parts)
self.finish_reason = MagicMock()
self.finish_reason.name = "STOP"
def test_ai_client_event_emitter_exists() -> None:
# This should fail initially because 'events' won't exist on ai_client
assert hasattr(ai_client, 'events')
assert hasattr(ai_client, "events")
def test_event_emission() -> None:
callback = MagicMock()
ai_client.events.on("test_event", callback)
ai_client.events.emit("test_event", payload={"data": 123})
callback.assert_called_once_with(payload={"data": 123})
callback = MagicMock()
ai_client.events.on("test_event", callback)
ai_client.events.emit("test_event", payload={"data": 123})
callback.assert_called_once_with(payload={"data": 123})
def test_send_emits_events_proper() -> None:
ai_client.reset_session()
with patch("src.ai_client._ensure_gemini_client"), \
patch("src.ai_client._gemini_client") as mock_client:
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
mock_response = MagicMock()
mock_response.candidates = [MockCandidate([MockPart("gemini response", None)])]
mock_response.usage_metadata = MockUsage()
mock_chat.send_message_stream.return_value = mock_response
start_callback = MagicMock()
response_callback = MagicMock()
ai_client.events.on("request_start", start_callback)
ai_client.events.on("response_received", response_callback)
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
ai_client.send("context", "message")
assert start_callback.called
assert response_callback.called
args, kwargs = start_callback.call_args
assert kwargs['payload']['provider'] == 'gemini'
ai_client.reset_session()
with (
patch("src.ai_client._ensure_gemini_client"),
patch("src.ai_client._gemini_client") as mock_client,
):
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
mock_response = MagicMock()
mock_response.candidates = [MockCandidate([MockPart("gemini response", None)])]
mock_response.usage_metadata = MockUsage()
mock_response.text = "gemini response"
mock_response.candidates[0].finish_reason.name = "STOP"
mock_chat.send_message_stream.return_value = iter([mock_response])
mock_chat.send_message.return_value = mock_response
start_callback = MagicMock()
response_callback = MagicMock()
ai_client.events.on("request_start", start_callback)
ai_client.events.on("response_received", response_callback)
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
ai_client.send("context", "message", stream_callback=lambda x: None)
assert start_callback.called
assert response_callback.called
args, kwargs = start_callback.call_args
assert kwargs["payload"]["provider"] == "gemini"
def test_send_emits_tool_events() -> None:
ai_client.reset_session() # Clear caches and chats to avoid test pollution
with patch("src.ai_client._ensure_gemini_client"), \
patch("src.ai_client._gemini_client") as mock_client, \
patch("src.mcp_client.dispatch") as mock_dispatch:
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
# 1. Setup mock response with a tool call
mock_fc = MagicMock()
mock_fc.name = "read_file"
mock_fc.args = {"path": "test.txt"}
mock_response_with_tool = MagicMock()
mock_response_with_tool.candidates = [MockCandidate([MockPart("tool call text", mock_fc)])]
mock_response_with_tool.usage_metadata = MockUsage()
# 2. Setup second mock response (final answer)
mock_response_final = MagicMock()
mock_response_final.candidates = [MockCandidate([MockPart("final answer", None)])]
mock_response_final.usage_metadata = MockUsage()
mock_chat.send_message_stream.side_effect = [mock_response_with_tool, mock_response_final]
mock_chat.send_message.side_effect = [mock_response_with_tool, mock_response_final]
mock_dispatch.return_value = "file content"
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
tool_callback = MagicMock()
def debug_tool(*args, **kwargs):
print(f"DEBUG_TOOL_EVENT: {args} {kwargs}")
tool_callback(*args, **kwargs)
ai_client.events.on("tool_execution", debug_tool)
result = ai_client.send("context", "message", stream_callback=lambda x: None)
print(f"DEBUG_RESULT: {result}")
# Should be called twice: once for 'started', once for 'completed'
assert tool_callback.call_count == 2
# Check 'started' call
args, kwargs = tool_callback.call_args_list[0]
assert kwargs['payload']['status'] == 'started'
assert kwargs['payload']['tool'] == 'read_file'
# Check 'completed' call
args, kwargs = tool_callback.call_args_list[1]
assert kwargs['payload']['status'] == 'completed'
assert kwargs['payload']['result'] == 'file content'
ai_client.reset_session()
with (
patch("src.ai_client._ensure_gemini_client"),
patch("src.ai_client._gemini_client") as mock_client,
patch("src.mcp_client.dispatch") as mock_dispatch,
):
mock_chat = MagicMock()
mock_client.chats.create.return_value = mock_chat
mock_fc = MagicMock()
mock_fc.name = "read_file"
mock_fc.args = {"path": "test.txt"}
mock_response_with_tool = MagicMock()
mock_response_with_tool.candidates = [
MockCandidate([MockPart("tool call text", mock_fc)])
]
mock_response_with_tool.usage_metadata = MockUsage()
mock_response_with_tool.text = "tool call text"
mock_response_final = MagicMock()
mock_response_final.candidates = [
MockCandidate([MockPart("final answer", None)])
]
mock_response_final.usage_metadata = MockUsage()
mock_response_final.text = "final answer"
mock_chat.send_message_stream.side_effect = lambda *a, **kw: iter(
[mock_response_with_tool]
)
mock_chat.send_message.side_effect = lambda *a, **kw: mock_response_with_tool
mock_dispatch.return_value = "file content"
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
tool_callback = MagicMock()
def debug_tool(*args, **kwargs):
tool_callback(*args, **kwargs)
ai_client.events.on("tool_execution", debug_tool)
result = ai_client.send("context", "message", enable_tools=True)
assert tool_callback.call_count >= 1

View File

@@ -6,7 +6,7 @@ import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")))
from api_hook_client import ApiHookClient
from src.api_hook_client import ApiHookClient
def test_comms_volume_stress_performance(live_gui) -> None:
"""

View File

@@ -3,108 +3,109 @@ from unittest.mock import patch
import time
from src.gui_2 import App
def test_mma_ui_state_initialization(app_instance: App) -> None:
"""Verifies that the new MMA UI state variables are initialized correctly."""
assert hasattr(app_instance, 'ui_epic_input')
assert hasattr(app_instance, 'proposed_tracks')
assert hasattr(app_instance, '_show_track_proposal_modal')
assert hasattr(app_instance, 'mma_streams')
assert app_instance.ui_epic_input == ""
assert app_instance.proposed_tracks == []
assert app_instance._show_track_proposal_modal is False
assert app_instance.mma_streams == {}
"""Verifies that the new MMA UI state variables are initialized correctly."""
assert hasattr(app_instance, "ui_epic_input")
assert hasattr(app_instance, "proposed_tracks")
assert hasattr(app_instance, "_show_track_proposal_modal")
assert hasattr(app_instance, "mma_streams")
assert app_instance.ui_epic_input == ""
assert app_instance.proposed_tracks == []
assert app_instance._show_track_proposal_modal is False
assert app_instance.mma_streams == {}
def test_process_pending_gui_tasks_show_track_proposal(app_instance: App) -> None:
"""Verifies that the 'show_track_proposal' action correctly updates the UI state."""
mock_tracks = [{"id": "track_1", "title": "Test Track"}]
task = {
"action": "show_track_proposal",
"payload": mock_tracks
}
app_instance._pending_gui_tasks.append(task)
app_instance._process_pending_gui_tasks()
assert app_instance.proposed_tracks == mock_tracks
assert app_instance._show_track_proposal_modal is True
"""Verifies that the 'show_track_proposal' action correctly updates the UI state."""
mock_tracks = [{"id": "track_1", "title": "Test Track"}]
task = {"action": "show_track_proposal", "payload": mock_tracks}
app_instance._pending_gui_tasks.append(task)
app_instance._process_pending_gui_tasks()
assert app_instance.proposed_tracks == mock_tracks
assert app_instance._show_track_proposal_modal is True
def test_cb_plan_epic_launches_thread(app_instance: App) -> None:
"""Verifies that _cb_plan_epic launches a thread and eventually queues a task."""
app_instance.ui_epic_input = "Develop a new feature"
app_instance.active_project_path = "test_project.toml"
mock_tracks = [{"id": "track_1", "title": "Test Track"}]
with (
patch('src.orchestrator_pm.get_track_history_summary', return_value="History summary") as mock_get_history,
patch('src.orchestrator_pm.generate_tracks', return_value=mock_tracks) as mock_gen_tracks,
patch('src.aggregate.build_file_items', return_value=[])):
# We need to mock project_manager.flat_config and project_manager.load_project
with (
patch('src.project_manager.load_project', return_value={}),
patch('src.project_manager.flat_config', return_value={})
):
app_instance._cb_plan_epic()
# Wait for the background thread to finish (it should be quick with mocks)
max_wait = 5
start_time = time.time()
while len(app_instance._pending_gui_tasks) < 3 and time.time() - start_time < max_wait:
time.sleep(0.1)
assert len(app_instance._pending_gui_tasks) == 3
task0 = app_instance._pending_gui_tasks[0]
assert task0['action'] == 'custom_callback'
task1 = app_instance._pending_gui_tasks[1]
assert task1['action'] == 'handle_ai_response'
assert task1['payload']['stream_id'] == 'Tier 1'
assert task1['payload']['text'] == json.dumps(mock_tracks, indent=2)
task2 = app_instance._pending_gui_tasks[2]
assert task2['action'] == 'show_track_proposal'
assert task2['payload'] == mock_tracks
mock_get_history.assert_called_once()
mock_gen_tracks.assert_called_once()
"""Verifies that _cb_plan_epic launches a thread and eventually queues a task."""
app_instance.ui_epic_input = "Develop a new feature"
app_instance.active_project_path = "test_project.toml"
mock_tracks = [{"id": "track_1", "title": "Test Track"}]
with (
patch(
"src.orchestrator_pm.get_track_history_summary",
return_value="History summary",
) as mock_get_history,
patch(
"src.orchestrator_pm.generate_tracks", return_value=mock_tracks
) as mock_gen_tracks,
patch("src.aggregate.build_file_items", return_value=[]),
):
with (
patch("src.project_manager.load_project", return_value={}),
patch("src.project_manager.flat_config", return_value={}),
):
app_instance._cb_plan_epic()
max_wait = 5
start_time = time.time()
while (
len(app_instance._pending_gui_tasks) < 3
and time.time() - start_time < max_wait
):
time.sleep(0.1)
assert len(app_instance._pending_gui_tasks) >= 3
actions = [t["action"] for t in app_instance._pending_gui_tasks]
assert "handle_ai_response" in actions
assert "show_track_proposal" in actions
mock_get_history.assert_called_once()
mock_gen_tracks.assert_called_once()
def test_process_pending_gui_tasks_mma_spawn_approval(app_instance: App) -> None:
"""Verifies that the 'mma_spawn_approval' action correctly updates the UI state."""
task = {
"action": "mma_spawn_approval",
"ticket_id": "T1",
"role": "Tier 3 Worker",
"prompt": "Test Prompt",
"context_md": "Test Context",
"dialog_container": [None]
}
app_instance._pending_gui_tasks.append(task)
app_instance._process_pending_gui_tasks()
assert app_instance._pending_mma_spawn == task
assert app_instance._mma_spawn_prompt == "Test Prompt"
assert app_instance._mma_spawn_context == "Test Context"
assert app_instance._mma_spawn_open is True
assert app_instance._mma_spawn_edit_mode is False
assert task["dialog_container"][0] is not None
"""Verifies that the 'mma_spawn_approval' action correctly updates the UI state."""
task = {
"action": "mma_spawn_approval",
"ticket_id": "T1",
"role": "Tier 3 Worker",
"prompt": "Test Prompt",
"context_md": "Test Context",
"dialog_container": [None],
}
app_instance._pending_gui_tasks.append(task)
app_instance._process_pending_gui_tasks()
assert app_instance._pending_mma_spawn == task
assert app_instance._mma_spawn_prompt == "Test Prompt"
assert app_instance._mma_spawn_context == "Test Context"
assert app_instance._mma_spawn_open is True
assert app_instance._mma_spawn_edit_mode is False
assert task["dialog_container"][0] is not None
def test_handle_ai_response_with_stream_id(app_instance: App) -> None:
"""Verifies routing to mma_streams."""
task = {
"action": "handle_ai_response",
"payload": {
"text": "Tier 1 Strategy Content",
"stream_id": "Tier 1",
"status": "Thinking..."
}
}
app_instance._pending_gui_tasks.append(task)
app_instance._process_pending_gui_tasks()
assert app_instance.mma_streams.get("Tier 1") == "Tier 1 Strategy Content"
assert app_instance.ai_status == "Thinking..."
assert app_instance.ai_response == ""
"""Verifies routing to mma_streams."""
task = {
"action": "handle_ai_response",
"payload": {
"text": "Tier 1 Strategy Content",
"stream_id": "Tier 1",
"status": "Thinking...",
},
}
app_instance._pending_gui_tasks.append(task)
app_instance._process_pending_gui_tasks()
assert app_instance.mma_streams.get("Tier 1") == "Tier 1 Strategy Content"
assert app_instance.ai_status == "Thinking..."
assert app_instance.ai_response == ""
def test_handle_ai_response_fallback(app_instance: App) -> None:
"""Verifies fallback to ai_response when stream_id is missing."""
task = {
"action": "handle_ai_response",
"payload": {
"text": "Regular AI Response",
"status": "done"
}
}
app_instance._pending_gui_tasks.append(task)
app_instance._process_pending_gui_tasks()
assert app_instance.ai_response == "Regular AI Response"
assert app_instance.ai_status == "done"
assert len(app_instance.mma_streams) == 0
"""Verifies fallback to ai_response when stream_id is missing."""
task = {
"action": "handle_ai_response",
"payload": {"text": "Regular AI Response", "status": "done"},
}
app_instance._pending_gui_tasks.append(task)
app_instance._process_pending_gui_tasks()
assert app_instance.ai_response == "Regular AI Response"
assert app_instance.ai_status == "done"
assert len(app_instance.mma_streams) == 0