WIP: PAIN4

This commit is contained in:
2026-03-05 15:53:50 -05:00
parent 184fb39e53
commit d04574aa8f
11 changed files with 127 additions and 835 deletions

View File

@@ -298,7 +298,6 @@ class AppController:
'show_confirm_modal': 'show_confirm_modal',
'mma_epic_input': 'ui_epic_input',
'mma_status': 'mma_status',
'ai_status': 'ai_status',
'mma_active_tier': 'active_tier',
'ui_new_track_name': 'ui_new_track_name',
'ui_new_track_desc': 'ui_new_track_desc',
@@ -314,11 +313,26 @@ class AppController:
'mma_streams': 'mma_streams',
'active_track': 'active_track',
'active_tickets': 'active_tickets',
'tracks': 'tracks'
'tracks': 'tracks',
'thinking_indicator': 'thinking_indicator',
'operations_live_indicator': 'operations_live_indicator',
'prior_session_indicator': 'prior_session_indicator'
})
self._init_actions()
@property
def thinking_indicator(self) -> bool:
return self.ai_status in ("sending...", "streaming...")
@property
def operations_live_indicator(self) -> bool:
return not self.is_viewing_prior_session
@property
def prior_session_indicator(self) -> bool:
return self.is_viewing_prior_session
def _init_actions(self) -> None:
# Set up state-related action maps
self._clickable_actions: dict[str, Callable[..., Any]] = {
@@ -357,6 +371,14 @@ class AppController:
"payload": status
})
def _set_mma_status(self, status: str) -> None:
"""Thread-safe update of mma_status via the GUI task queue."""
with self._pending_gui_tasks_lock:
self._pending_gui_tasks.append({
"action": "set_mma_status",
"payload": status
})
def _process_pending_gui_tasks(self) -> None:
if not self._pending_gui_tasks:
return
@@ -375,6 +397,8 @@ class AppController:
self.ai_status = task.get("payload", "")
sys.stderr.write(f"[DEBUG] Updated ai_status via task to: {self.ai_status}\n")
sys.stderr.flush()
elif action == "set_mma_status":
self.mma_status = task.get("payload", "")
elif action == "handle_ai_response":
payload = task.get("payload", {})
text = payload.get("text", "")
@@ -660,11 +684,11 @@ class AppController:
except json.JSONDecodeError:
continue
except Exception as e:
self.ai_status = f"log load error: {e}"
self._set_status(f"log load error: {e}")
return
self.prior_session_entries = entries
self.is_viewing_prior_session = True
self.ai_status = f"viewing prior session: {Path(path).name} ({len(entries)} entries)"
self._set_status(f"viewing prior session: {Path(path).name} ({len(entries)} entries)")
def _load_active_project(self) -> None:
"""Loads the active project configuration, with fallbacks."""
@@ -705,7 +729,7 @@ class AppController:
thread.start()
def _fetch_models(self, provider: str) -> None:
self.ai_status = "fetching models..."
self._set_status("fetching models...")
def do_fetch() -> None:
try:
models = ai_client.list_models(provider)
@@ -713,9 +737,9 @@ class AppController:
if self.current_model not in models and models:
self.current_model = models[0]
ai_client.set_provider(self._current_provider, self.current_model)
self.ai_status = f"models loaded: {len(models)}"
self._set_status(f"models loaded: {len(models)}")
except Exception as e:
self.ai_status = f"model fetch error: {e}"
self._set_status(f"model fetch error: {e}")
self.models_thread = threading.Thread(target=do_fetch, daemon=True)
self.models_thread.start()
@@ -822,6 +846,7 @@ class AppController:
def _handle_request_event(self, event: events.UserRequestEvent) -> None:
"""Processes a UserRequestEvent by calling the AI client."""
ai_client.current_tier = None # Ensure main discussion is untagged
if self.ui_auto_add_history:
with self._pending_history_adds_lock:
self._pending_history_adds.append({
@@ -931,10 +956,10 @@ class AppController:
if self.test_hooks_enabled and not getattr(self, "ui_manual_approve", False):
sys.stderr.write("[DEBUG] Auto-approving script.\n")
sys.stderr.flush()
self.ai_status = "running powershell..."
self._set_status("running powershell...")
output = shell_runner.run_powershell(script, base_dir, qa_callback=qa_callback)
self._append_tool_log(script, output)
self.ai_status = "powershell done, awaiting AI..."
self._set_status("powershell done, awaiting AI...")
return output
sys.stderr.write("[DEBUG] Creating ConfirmDialog.\n")
@@ -972,10 +997,10 @@ class AppController:
if not approved:
self._append_tool_log(final_script, "REJECTED by user")
return None
self.ai_status = "running powershell..."
self._set_status("running powershell...")
output = shell_runner.run_powershell(final_script, base_dir, qa_callback=qa_callback)
self._append_tool_log(final_script, output)
self.ai_status = "powershell done, awaiting AI..."
self._set_status("powershell done, awaiting AI...")
return output
def _append_tool_log(self, script: str, result: str, source_tier: str | None = None) -> None:
@@ -1068,12 +1093,53 @@ class AppController:
state[key] = val
return state
@api.get("/api/gui/mma_status", dependencies=[Depends(get_api_key)])
def get_mma_status() -> dict[str, Any]:
"""Dedicated endpoint for MMA-related status."""
return {
"mma_status": self.mma_status,
"ai_status": self.ai_status,
"mma_streams": self.mma_streams,
"active_tier": self.active_tier,
"active_tickets": self.active_tickets,
"proposed_tracks": self.proposed_tracks
}
@api.post("/api/gui", dependencies=[Depends(get_api_key)])
def post_gui(req: dict) -> dict[str, str]:
"""Pushes a GUI task to the event queue."""
self.event_queue.put("gui_task", req)
return {"status": "queued"}
@api.get("/api/session", dependencies=[Depends(get_api_key)])
def get_api_session() -> dict[str, Any]:
"""Returns current discussion session entries."""
with self._disc_entries_lock:
return {"session": {"entries": self.disc_entries}}
@api.post("/api/session", dependencies=[Depends(get_api_key)])
def post_api_session(req: dict) -> dict[str, str]:
"""Updates session entries."""
entries = req.get("entries", [])
with self._disc_entries_lock:
self.disc_entries = entries
return {"status": "updated"}
@api.get("/api/project", dependencies=[Depends(get_api_key)])
def get_api_project() -> dict[str, Any]:
"""Returns current project data."""
return {"project": self.project}
@api.get("/api/performance", dependencies=[Depends(get_api_key)])
def get_performance() -> dict[str, Any]:
"""Returns performance monitor metrics."""
return {"performance": self.perf_monitor.get_metrics()}
@api.get("/api/gui/diagnostics", dependencies=[Depends(get_api_key)])
def get_diagnostics() -> dict[str, Any]:
"""Alias for performance metrics."""
return self.perf_monitor.get_metrics()
@api.get("/status", dependencies=[Depends(get_api_key)])
def status() -> dict[str, Any]:
"""Returns the current status of the application."""
@@ -1234,7 +1300,7 @@ class AppController:
self._save_active_project()
self._flush_to_config()
save_config(self.config)
self.ai_status = "config saved"
self._set_status("config saved")
def _cb_disc_create(self) -> None:
nm = self.ui_disc_new_name_input.strip()
@@ -1244,7 +1310,7 @@ class AppController:
def _switch_project(self, path: str) -> None:
if not Path(path).exists():
self.ai_status = f"project file not found: {path}"
self._set_status(f"project file not found: {path}")
return
self._flush_to_project()
self._save_active_project()
@@ -1252,11 +1318,10 @@ class AppController:
self.project = project_manager.load_project(path)
self.active_project_path = path
except Exception as e:
self.ai_status = f"failed to load project: {e}"
self._set_status(f"failed to load project: {e}")
return
self._refresh_from_project()
self.ai_status = f"switched to: {Path(path).stem}"
self._set_status(f"switched to: {Path(path).stem}")
def _refresh_from_project(self) -> None:
self.files = list(self.project.get("files", {}).get("paths", []))
self.screenshots = list(self.project.get("screenshots", {}).get("paths", []))
@@ -1270,10 +1335,11 @@ class AppController:
self.ui_output_dir = proj.get("output", {}).get("output_dir", "./md_gen")
self.ui_files_base_dir = proj.get("files", {}).get("base_dir", ".")
self.ui_shots_base_dir = proj.get("screenshots", {}).get("base_dir", ".")
self.ui_project_git_dir = proj.get("project", {}).get("git_dir", "")
self.ui_project_system_prompt = proj.get("project", {}).get("system_prompt", "")
self.ui_project_main_context = proj.get("project", {}).get("main_context", "")
self.ui_gemini_cli_path = proj.get("gemini_cli", {}).get("binary_path", "gemini")
proj_meta = self.project.get("project", {})
self.ui_project_git_dir = proj_meta.get("git_dir", "")
self.ui_project_system_prompt = proj_meta.get("system_prompt", "")
self.ui_project_main_context = proj_meta.get("main_context", "")
self.ui_gemini_cli_path = self.project.get("gemini_cli", {}).get("binary_path", "gemini")
self.ui_auto_add_history = proj.get("discussion", {}).get("auto_add", False)
self.ui_auto_scroll_comms = proj.get("project", {}).get("auto_scroll_comms", True)
self.ui_auto_scroll_tool_calls = proj.get("project", {}).get("auto_scroll_tool_calls", True)
@@ -1337,9 +1403,9 @@ class AppController:
else:
self.disc_entries = []
self._recalculate_session_usage()
self.ai_status = f"Loaded track: {state.metadata.name}"
self._set_status(f"Loaded track: {state.metadata.name}")
except Exception as e:
self.ai_status = f"Load track error: {e}"
self._set_status(f"Load track error: {e}")
print(f"Error loading track {track_id}: {e}")
def _save_active_project(self) -> None:
@@ -1347,7 +1413,7 @@ class AppController:
try:
project_manager.save_project(self.project, self.active_project_path)
except Exception as e:
self.ai_status = f"save error: {e}"
self._set_status(f"save error: {e}")
def _get_discussion_names(self) -> list[str]:
disc_sec = self.project.get("discussion", {})
@@ -1359,7 +1425,7 @@ class AppController:
disc_sec = self.project.get("discussion", {})
discussions = disc_sec.get("discussions", {})
if name not in discussions:
self.ai_status = f"discussion not found: {name}"
self._set_status(f"discussion not found: {name}")
return
self.active_discussion = name
self._track_discussion_active = False
@@ -1367,7 +1433,7 @@ class AppController:
disc_data = discussions[name]
with self._disc_entries_lock:
self.disc_entries = models.parse_history_entries(disc_data.get("history", []), self.disc_roles)
self.ai_status = f"discussion: {name}"
self._set_status(f"discussion: {name}")
def _flush_disc_entries_to_project(self) -> None:
history_strings = [project_manager.entry_to_str(e) for e in self.disc_entries]
@@ -1384,7 +1450,7 @@ class AppController:
disc_sec = self.project.setdefault("discussion", {})
discussions = disc_sec.setdefault("discussions", {})
if name in discussions:
self.ai_status = f"discussion '{name}' already exists"
self._set_status(f"discussion '{name}' already exists")
return
discussions[name] = project_manager.default_discussion()
self._switch_discussion(name)
@@ -1395,7 +1461,7 @@ class AppController:
if old_name not in discussions:
return
if new_name in discussions:
self.ai_status = f"discussion '{new_name}' already exists"
self._set_status(f"discussion '{new_name}' already exists")
return
discussions[new_name] = discussions.pop(old_name)
if self.active_discussion == old_name:
@@ -1406,7 +1472,7 @@ class AppController:
disc_sec = self.project.get("discussion", {})
discussions = disc_sec.get("discussions", {})
if len(discussions) <= 1:
self.ai_status = "cannot delete the last discussion"
self._set_status("cannot delete the last discussion")
return
if name not in discussions:
return
@@ -1489,7 +1555,7 @@ class AppController:
for d_name in discussions:
discussions[d_name]["history"] = []
self.ai_status = "session reset"
self._set_status("session reset")
self.ai_response = ""
self.ui_ai_input = ""
self.ui_manual_approve = False
@@ -1512,11 +1578,11 @@ class AppController:
md, path, *_ = self._do_generate()
self.last_md = md
self.last_md_path = path
self.ai_status = f"md written: {path.name}"
self._set_status(f"md written: {path.name}")
# Refresh token budget metrics with CURRENT md
self._refresh_api_metrics({}, md_content=md)
except Exception as e:
self.ai_status = f"error: {e}"
self._set_status(f"error: {e}")
threading.Thread(target=worker, daemon=True).start()
def _handle_generate_send(self) -> None:
@@ -1531,7 +1597,7 @@ class AppController:
self.last_md_path = path
self.last_file_items = file_items
self.ai_status = "sending..."
self._set_status("sending...")
user_msg = self.ui_ai_input
base_dir = self.ui_files_base_dir
sys.stderr.write(f"[DEBUG] _do_generate success. Prompt: {user_msg[:50]}...\n")
@@ -1552,7 +1618,7 @@ class AppController:
import traceback
sys.stderr.write(f"[DEBUG] _do_generate ERROR: {e}\n{traceback.format_exc()}\n")
sys.stderr.flush()
self.ai_status = f"generate error: {e}"
self._set_status(f"generate error: {e}")
threading.Thread(target=worker, daemon=True).start()
def _recalculate_session_usage(self) -> None:
@@ -1652,7 +1718,7 @@ class AppController:
sys.stderr.write("[DEBUG] _cb_plan_epic _bg_task started\n")
sys.stderr.flush()
try:
self.ai_status = "Planning Epic (Tier 1)..."
self._set_status("Planning Epic (Tier 1)...")
history = orchestrator_pm.get_track_history_summary()
sys.stderr.write(f"[DEBUG] History summary length: {len(history)}\n")
sys.stderr.flush()
@@ -1687,7 +1753,7 @@ class AppController:
"payload": tracks
})
except Exception as e:
self.ai_status = f"Epic plan error: {e}"
self._set_status(f"Epic plan error: {e}")
print(f"ERROR in _cb_plan_epic background task: {e}")
threading.Thread(target=_bg_task, daemon=True).start()
@@ -1744,12 +1810,12 @@ class AppController:
if self.active_track:
# Use the active track object directly to start execution
self.mma_status = "running"
self._set_mma_status("running")
engine = multi_agent_conductor.ConductorEngine(self.active_track, self.event_queue, auto_queue=not self.mma_step_mode)
flat = project_manager.flat_config(self.project, self.active_discussion, track_id=self.active_track.id)
full_md, _, _ = aggregate.run(flat)
threading.Thread(target=engine.run, kwargs={"md_content": full_md}, daemon=True).start()
self.ai_status = f"Track '{self.active_track.description}' started."
self._set_status(f"Track '{self.active_track.description}' started.")
return
idx = 0
@@ -1761,7 +1827,7 @@ class AppController:
track_data = self.proposed_tracks[idx]
title = track_data.get("title") or track_data.get("goal", "Untitled Track")
threading.Thread(target=lambda: self._start_track_logic(track_data), daemon=True).start()
self.ai_status = f"Track '{title}' started."
self._set_status(f"Track '{title}' started.")
def _start_track_logic(self, track_data: dict[str, Any], skeletons_str: str | None = None) -> None:
try:

View File

@@ -1,41 +0,0 @@
import asyncio
from events import AsyncEventQueue
def test_async_event_queue_put_get() -> None:
"""Verify that an event can be asynchronously put and retrieved from the queue."""
async def run_test():
queue = AsyncEventQueue()
event_name = "test_event"
payload = {"data": "hello"}
await queue.put(event_name, payload)
ret_name, ret_payload = await queue.get()
assert ret_name == event_name
assert ret_payload == payload
asyncio.run(run_test())
def test_async_event_queue_multiple() -> None:
"""Verify that multiple events can be asynchronously put and retrieved in order."""
async def run_test():
queue = AsyncEventQueue()
await queue.put("event1", 1)
await queue.put("event2", 2)
name1, val1 = await queue.get()
name2, val2 = await queue.get()
assert name1 == "event1"
assert val1 == 1
assert name2 == "event2"
assert val2 == 2
asyncio.run(run_test())
def test_async_event_queue_none_payload() -> None:
"""Verify that an event with None payload works correctly."""
async def run_test():
queue = AsyncEventQueue()
await queue.put("no_payload")
name, payload = await queue.get()
assert name == "no_payload"
assert payload is None
asyncio.run(run_test())

View File

@@ -1,321 +0,0 @@
import pytest
from unittest.mock import MagicMock, patch
from src.models import Ticket, Track, WorkerContext
from src import ai_client
# These tests define the expected interface for multi_agent_conductor.py
# which will be implemented in the next phase of TDD.
def test_conductor_engine_initialization() -> None:
"""
Test that ConductorEngine can be initialized with a Track.
"""
track = Track(id="test_track", description="Test Track")
from src.multi_agent_conductor import ConductorEngine
engine = ConductorEngine(track=track, auto_queue=True)
assert engine.track == track
@pytest.mark.asyncio
async def test_conductor_engine_run_executes_tickets_in_order(monkeypatch: pytest.MonkeyPatch, vlogger) -> None:
"""
Test that run iterates through executable tickets and calls the worker lifecycle.
"""
ticket1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
ticket2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker2", depends_on=["T1"])
track = Track(id="track1", description="Track 1", tickets=[ticket1, ticket2])
from src.multi_agent_conductor import ConductorEngine
engine = ConductorEngine(track=track, auto_queue=True)
vlogger.log_state("Ticket Count", 0, 2)
vlogger.log_state("T1 Status", "todo", "todo")
vlogger.log_state("T2 Status", "todo", "todo")
# Mock ai_client.send using monkeypatch
mock_send = MagicMock()
monkeypatch.setattr(ai_client, 'send', mock_send)
# We mock run_worker_lifecycle as it is expected to be in the same module
with patch("src.multi_agent_conductor.run_worker_lifecycle") as mock_lifecycle:
# Mocking lifecycle to mark ticket as complete so dependencies can be resolved
def side_effect(ticket, context, *args, **kwargs):
ticket.mark_complete()
return "Success"
mock_lifecycle.side_effect = side_effect
await engine.run()
vlogger.log_state("T1 Status Final", "todo", ticket1.status)
vlogger.log_state("T2 Status Final", "todo", ticket2.status)
# Track.get_executable_tickets() should be called repeatedly until all are done
# T1 should run first, then T2.
assert mock_lifecycle.call_count == 2
assert ticket1.status == "completed"
assert ticket2.status == "completed"
# Verify sequence: T1 before T2
calls = mock_lifecycle.call_args_list
assert calls[0][0][0].id == "T1"
assert calls[1][0][0].id == "T2"
vlogger.finalize("Verify dependency execution order", "PASS", "T1 executed before T2")
@pytest.mark.asyncio
async def test_run_worker_lifecycle_calls_ai_client_send(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle triggers the AI client and updates ticket status on success.
"""
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
from src.multi_agent_conductor import run_worker_lifecycle
# Mock ai_client.send using monkeypatch
mock_send = MagicMock()
monkeypatch.setattr(ai_client, 'send', mock_send)
mock_send.return_value = "Task complete. I have updated the file."
result = run_worker_lifecycle(ticket, context)
assert result == "Task complete. I have updated the file."
assert ticket.status == "completed"
mock_send.assert_called_once()
# Check if description was passed to send()
args, kwargs = mock_send.call_args
# user_message is passed as a keyword argument
assert ticket.description in kwargs["user_message"]
@pytest.mark.asyncio
async def test_run_worker_lifecycle_context_injection(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle can take a context_files list and injects AST views into the prompt.
"""
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
context_files = ["primary.py", "secondary.py"]
from src.multi_agent_conductor import run_worker_lifecycle
# Mock ai_client.send using monkeypatch
mock_send = MagicMock()
monkeypatch.setattr(ai_client, 'send', mock_send)
# We mock ASTParser which is expected to be imported in multi_agent_conductor
with patch("src.multi_agent_conductor.ASTParser") as mock_ast_parser_class, \
patch("builtins.open", new_callable=MagicMock) as mock_open:
# Setup open mock to return different content for different files
file_contents = {
"primary.py": "def primary(): pass",
"secondary.py": "def secondary(): pass"
}
def mock_open_side_effect(file, *args, **kwargs):
content = file_contents.get(file, "")
mock_file = MagicMock()
mock_file.read.return_value = content
mock_file.__enter__.return_value = mock_file
return mock_file
mock_open.side_effect = mock_open_side_effect
# Setup ASTParser mock
mock_ast_parser = mock_ast_parser_class.return_value
mock_ast_parser.get_curated_view.return_value = "CURATED VIEW"
mock_ast_parser.get_skeleton.return_value = "SKELETON VIEW"
mock_send.return_value = "Success"
run_worker_lifecycle(ticket, context, context_files=context_files)
# Verify ASTParser calls:
# First file (primary) should get curated view, others (secondary) get skeleton
mock_ast_parser.get_curated_view.assert_called_once_with("def primary(): pass")
mock_ast_parser.get_skeleton.assert_called_once_with("def secondary(): pass")
# Verify user_message contains the views
_, kwargs = mock_send.call_args
user_message = kwargs["user_message"]
assert "CURATED VIEW" in user_message
assert "SKELETON VIEW" in user_message
assert "primary.py" in user_message
assert "secondary.py" in user_message
@pytest.mark.asyncio
async def test_run_worker_lifecycle_handles_blocked_response(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle marks the ticket as blocked if the AI indicates it cannot proceed.
"""
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
from src.multi_agent_conductor import run_worker_lifecycle
# Mock ai_client.send using monkeypatch
mock_send = MagicMock()
monkeypatch.setattr(ai_client, 'send', mock_send)
# Simulate a response indicating a block
mock_send.return_value = "I am BLOCKED because I don't have enough information."
run_worker_lifecycle(ticket, context)
assert ticket.status == "blocked"
assert "BLOCKED" in ticket.blocked_reason
@pytest.mark.asyncio
async def test_run_worker_lifecycle_step_mode_confirmation(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle passes confirm_execution to ai_client.send when step_mode is True.
Verify that if confirm_execution is called (simulated by mocking ai_client.send to call its callback),
the flow works as expected.
"""
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1", step_mode=True)
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
from src.multi_agent_conductor import run_worker_lifecycle
# Mock ai_client.send using monkeypatch
mock_send = MagicMock()
monkeypatch.setattr(ai_client, 'send', mock_send)
# Important: confirm_spawn is called first if event_queue is present!
with patch("src.multi_agent_conductor.confirm_spawn") as mock_spawn, \
patch("src.multi_agent_conductor.confirm_execution") as mock_confirm:
mock_spawn.return_value = (True, "mock prompt", "mock context")
mock_confirm.return_value = True
def mock_send_side_effect(md_content, user_message, **kwargs):
callback = kwargs.get("pre_tool_callback")
if callback:
# Simulate calling it with some payload
callback('{"tool": "read_file", "args": {"path": "test.txt"}}')
return "Success"
mock_send.side_effect = mock_send_side_effect
mock_event_queue = MagicMock()
run_worker_lifecycle(ticket, context, event_queue=mock_event_queue)
# Verify confirm_spawn was called because event_queue was present
mock_spawn.assert_called_once()
# Verify confirm_execution was called
mock_confirm.assert_called_once()
assert ticket.status == "completed"
@pytest.mark.asyncio
async def test_run_worker_lifecycle_step_mode_rejection(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Verify that if confirm_execution returns False, the logic (in ai_client, which we simulate here)
would prevent execution. In run_worker_lifecycle, we just check if it's passed.
"""
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1", step_mode=True)
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
from src.multi_agent_conductor import run_worker_lifecycle
# Mock ai_client.send using monkeypatch
mock_send = MagicMock()
monkeypatch.setattr(ai_client, 'send', mock_send)
with patch("src.multi_agent_conductor.confirm_spawn") as mock_spawn, \
patch("src.multi_agent_conductor.confirm_execution") as mock_confirm:
mock_spawn.return_value = (True, "mock prompt", "mock context")
mock_confirm.return_value = False
mock_send.return_value = "Task failed because tool execution was rejected."
mock_event_queue = MagicMock()
run_worker_lifecycle(ticket, context, event_queue=mock_event_queue)
# Verify it was passed to send
args, kwargs = mock_send.call_args
assert kwargs["pre_tool_callback"] is not None
@pytest.mark.asyncio
async def test_conductor_engine_dynamic_parsing_and_execution(monkeypatch: pytest.MonkeyPatch, vlogger) -> None:
"""
Test that parse_json_tickets correctly populates the track and run executes them in dependency order.
"""
import json
from src.multi_agent_conductor import ConductorEngine
track = Track(id="dynamic_track", description="Dynamic Track")
engine = ConductorEngine(track=track, auto_queue=True)
tickets_json = json.dumps([
{
"id": "T1",
"description": "Initial task",
"status": "todo",
"assigned_to": "worker1",
"depends_on": []
},
{
"id": "T2",
"description": "Dependent task",
"status": "todo",
"assigned_to": "worker2",
"depends_on": ["T1"]
},
{
"id": "T3",
"description": "Another initial task",
"status": "todo",
"assigned_to": "worker3",
"depends_on": []
}
])
engine.parse_json_tickets(tickets_json)
vlogger.log_state("Parsed Ticket Count", 0, len(engine.track.tickets))
assert len(engine.track.tickets) == 3
assert engine.track.tickets[0].id == "T1"
assert engine.track.tickets[1].id == "T2"
assert engine.track.tickets[2].id == "T3"
# Mock ai_client.send using monkeypatch
mock_send = MagicMock()
monkeypatch.setattr(ai_client, 'send', mock_send)
# Mock run_worker_lifecycle to mark tickets as complete
with patch("src.multi_agent_conductor.run_worker_lifecycle") as mock_lifecycle:
def side_effect(ticket, context, *args, **kwargs):
ticket.mark_complete()
return "Success"
mock_lifecycle.side_effect = side_effect
await engine.run()
assert mock_lifecycle.call_count == 3
# Verify dependency order: T1 must be called before T2
calls = [call[0][0].id for call in mock_lifecycle.call_args_list]
t1_idx = calls.index("T1")
t2_idx = calls.index("T2")
vlogger.log_state("T1 Sequence Index", "N/A", t1_idx)
vlogger.log_state("T2 Sequence Index", "N/A", t2_idx)
assert t1_idx < t2_idx
# T3 can be anywhere relative to T1 and T2, but T1 < T2 is mandatory
assert "T3" in calls
vlogger.finalize("Dynamic track parsing and dependency execution", "PASS", "Dependency chain T1 -> T2 honored.")
def test_run_worker_lifecycle_pushes_response_via_queue(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle pushes a 'response' event with the correct stream_id
via _queue_put when event_queue and loop are provided.
"""
import asyncio
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
mock_event_queue = MagicMock()
mock_loop = MagicMock(spec=asyncio.AbstractEventLoop)
mock_send = MagicMock(return_value="Task complete.")
monkeypatch.setattr(ai_client, 'send', mock_send)
monkeypatch.setattr(ai_client, 'reset_session', MagicMock())
from src.multi_agent_conductor import run_worker_lifecycle
with patch("src.multi_agent_conductor.confirm_spawn") as mock_spawn, \
patch("src.multi_agent_conductor._queue_put") as mock_queue_put:
mock_spawn.return_value = (True, "prompt", "context")
run_worker_lifecycle(ticket, context, event_queue=mock_event_queue, loop=mock_loop)
mock_queue_put.assert_called_once()
call_args = mock_queue_put.call_args[0]
assert call_args[2] == "response"
assert call_args[3]["stream_id"] == "Tier 3 (Worker): T1"
assert call_args[3]["text"] == "Task complete."
assert call_args[3]["status"] == "done"
assert ticket.status == "completed"
def test_run_worker_lifecycle_token_usage_from_comms_log(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test that run_worker_lifecycle reads token usage from the comms log and
updates engine.tier_usage['Tier 3'] with real input/output token counts.
"""
ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1")
context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[])
fake_comms = [
{"direction": "OUT", "kind": "request", "payload": {"message": "hello"}},
{"direction": "IN", "kind": "response", "payload": {"usage": {"input_tokens": 120, "output_tokens": 45}}},
]
monkeypatch.setattr(ai_client, 'send', MagicMock(return_value="Done."))
monkeypatch.setattr(ai_client, 'reset_session', MagicMock())
monkeypatch.setattr(ai_client, 'get_comms_log', MagicMock(side_effect=[
[], # baseline call (before send)
fake_comms, # after-send call
]))
from src.multi_agent_conductor import run_worker_lifecycle, ConductorEngine
from src.models import Track
track = Track(id="test_track", description="Test")
engine = ConductorEngine(track=track, auto_queue=True)
with patch("src.multi_agent_conductor.confirm_spawn") as mock_spawn, \
patch("src.multi_agent_conductor._queue_put"):
mock_spawn.return_value = (True, "prompt", "ctx")
run_worker_lifecycle(ticket, context, event_queue=MagicMock(), loop=MagicMock(), engine=engine)
assert engine.tier_usage["Tier 3"]["input"] == 120
assert engine.tier_usage["Tier 3"]["output"] == 45

View File

@@ -1,73 +0,0 @@
import pytest
from unittest.mock import MagicMock, patch
from gui_2 import App
from src.events import UserRequestEvent
@pytest.fixture
def mock_gui() -> App:
with (
patch('src.models.load_config', return_value={
"ai": {"provider": "gemini", "model": "model-1"},
"projects": {"paths": [], "active": ""},
"gui": {"show_windows": {}}
}),
patch('gui_2.project_manager.load_project', return_value={}),
patch('gui_2.project_manager.migrate_from_legacy_config', return_value={}),
patch('gui_2.project_manager.save_project'),
patch('gui_2.session_logger.open_session'),
patch('src.app_controller.AppController._init_ai_and_hooks'),
patch('src.app_controller.AppController._fetch_models')
):
gui = App()
return gui
def test_handle_generate_send_pushes_event(mock_gui: App) -> None:
mock_gui._do_generate = MagicMock(return_value=(
"full_md", "path", [], "stable_md", "disc_text"
))
mock_gui.ui_ai_input = "test prompt"
mock_gui.ui_files_base_dir = "."
# Mock event_queue.put
mock_gui.event_queue.put = MagicMock()
# We need to mock asyncio.run_coroutine_threadsafe to immediately execute
with patch('asyncio.run_coroutine_threadsafe') as mock_run:
mock_gui._handle_generate_send()
# Verify run_coroutine_threadsafe was called
assert mock_run.called
# Verify the call to event_queue.put was correct
# This is a bit tricky since the first arg to run_coroutine_threadsafe
# is the coroutine returned by event_queue.put().
# Let's verify that the call to put occurred.
mock_gui.event_queue.put.assert_called_once()
args, kwargs = mock_gui.event_queue.put.call_args
assert args[0] == "user_request"
event = args[1]
assert isinstance(event, UserRequestEvent)
assert event.prompt == "test prompt"
assert event.stable_md == "stable_md"
assert event.disc_text == "disc_text"
assert event.base_dir == "."
def test_user_request_event_payload() -> None:
payload = UserRequestEvent(
prompt="hello",
stable_md="md",
file_items=[],
disc_text="disc",
base_dir="."
)
d = payload.to_dict()
assert d["prompt"] == "hello"
assert d["stable_md"] == "md"
assert d["file_items"] == []
assert d["disc_text"] == "disc"
assert d["base_dir"] == "."
@pytest.mark.asyncio
async def test_async_event_queue() -> None:
from events import AsyncEventQueue
q = AsyncEventQueue()
await q.put("test_event", {"data": 123})
name, payload = await q.get()
assert name == "test_event"
assert payload["data"] == 123

View File

@@ -1,18 +0,0 @@
import sys
import os
from unittest.mock import patch
from gui_2 import App
# Ensure project root is in path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")))
def test_gui_updates_on_event(app_instance: App) -> None:
app_instance.last_md = "mock_md"
with patch.object(app_instance.controller, '_refresh_api_metrics') as mock_refresh:
# Simulate event (bypassing events.emit since _init_ai_and_hooks is mocked)
app_instance._on_api_event(payload={"text": "test"})
# Process tasks manually
app_instance._process_pending_gui_tasks()
# Verify that _refresh_api_metrics was called
mock_refresh.assert_called_once_with({"text": "test"}, md_content="mock_md")

View File

@@ -1,7 +1,6 @@
import pytest
from unittest.mock import MagicMock, patch
from gui_2 import App
from src.gui_2 import App
from src.models import Track
@pytest.fixture(autouse=True)
@@ -17,7 +16,7 @@ def setup_mock_app(mock_app: App):
def test_add_ticket_logic(mock_app: App):
# Mock imgui calls to simulate clicking "Create" in the form
with patch('gui_2.imgui') as mock_imgui:
with patch('src.gui_2.imgui') as mock_imgui:
# Default return for any checkbox/input
mock_imgui.checkbox.side_effect = lambda label, value: (False, value)
mock_imgui.input_text.side_effect = lambda label, value, **kwargs: (False, value)
@@ -71,7 +70,7 @@ def test_delete_ticket_logic(mock_app: App):
children_map = {"T-001": ["T-002"]}
rendered = set()
with patch('gui_2.imgui') as mock_imgui:
with patch('src.gui_2.imgui') as mock_imgui:
# Configure mock_imgui.button to return True only for "Delete##T-001"
def button_side_effect(label):
return label == "Delete##T-001"
@@ -93,7 +92,7 @@ def test_delete_ticket_logic(mock_app: App):
mock_imgui.ImVec2 = MagicMock
mock_imgui.ImVec4 = MagicMock
with patch('gui_2.C_LBL', MagicMock()), patch.object(mock_app.controller, '_push_mma_state_update') as mock_push:
with patch('src.gui_2.C_LBL', MagicMock()), patch.object(mock_app.controller, '_push_mma_state_update') as mock_push:
# Render T-001
mock_app._render_ticket_dag_node(mock_app.active_tickets[0], tickets_by_id, children_map, rendered)
@@ -107,9 +106,9 @@ def test_delete_ticket_logic(mock_app: App):
def test_track_discussion_toggle(mock_app: App):
with (
patch('gui_2.imgui') as mock_imgui,
patch('gui_2.project_manager.load_track_history', return_value=["@2026-03-01 12:00:00\n[User]\nTrack Hello"]) as mock_load,
patch('gui_2.project_manager.str_to_entry', side_effect=lambda s, roles: {"ts": "12:00", "role": "User", "content": s.split("\n")[-1]}),
patch('src.gui_2.imgui') as mock_imgui,
patch('src.gui_2.project_manager.load_track_history', return_value=["@2026-03-01 12:00:00\n[User]\nTrack Hello"]) as mock_load,
patch('src.gui_2.project_manager.str_to_entry', side_effect=lambda s, roles: {"ts": "12:00", "role": "User", "content": s.split("\n")[-1]}),
patch.object(mock_app.controller, '_flush_disc_entries_to_project') as mock_flush,
patch.object(mock_app.controller, '_switch_discussion') as mock_switch
):

View File

@@ -1,117 +0,0 @@
import pytest
from gui_2 import App
@pytest.mark.asyncio
async def test_mma_stream_event_routing(app_instance: App):
"""Verifies that 'mma_stream' events from AsyncEventQueue reach mma_streams."""
# 1. Mock received chunks from a Tier 3 worker
stream_id = "Tier 3 (Worker): T-001"
chunks = ["Thinking... ", "I will ", "list files."]
for chunk in chunks:
# Simulate receiving an 'mma_stream' event in the background asyncio worker
payload = {"stream_id": stream_id, "text": chunk}
# We manually trigger the logic inside _process_event_queue for this test
# to avoid dealing with the background thread's lifecycle.
with app_instance._pending_gui_tasks_lock:
app_instance._pending_gui_tasks.append({
"action": "mma_stream_append",
"payload": payload
})
# 2. Simulate GUI frame processing
app_instance._process_pending_gui_tasks()
# 3. Verify final state
expected_text = "".join(chunks)
assert app_instance.mma_streams.get(stream_id) == expected_text
@pytest.mark.asyncio
async def test_mma_stream_multiple_workers(app_instance: App):
"""Verifies that streaming works for multiple concurrent workers."""
s1 = "Tier 3 (Worker): T-001"
s2 = "Tier 3 (Worker): T-002"
# Interleaved chunks
events_to_simulate = [
(s1, "T1 start. "),
(s2, "T2 start. "),
(s1, "T1 middle. "),
(s2, "T2 middle. "),
(s1, "T1 end."),
(s2, "T2 end.")
]
for sid, txt in events_to_simulate:
with app_instance._pending_gui_tasks_lock:
app_instance._pending_gui_tasks.append({
"action": "mma_stream_append",
"payload": {"stream_id": sid, "text": txt}
})
app_instance._process_pending_gui_tasks()
assert app_instance.mma_streams[s1] == "T1 start. T1 middle. T1 end."
assert app_instance.mma_streams[s2] == "T2 start. T2 middle. T2 end."
def test_handle_ai_response_resets_stream(app_instance: App):
"""Verifies that the final handle_ai_response (status=done) replaces/finalizes the stream."""
stream_id = "Tier 3 (Worker): T-001"
# Part 1: Some streaming progress
with app_instance._pending_gui_tasks_lock:
app_instance._pending_gui_tasks.append({
"action": "mma_stream_append",
"payload": {"stream_id": stream_id, "text": "Partially streamed..."}
})
app_instance._process_pending_gui_tasks()
assert app_instance.mma_streams[stream_id] == "Partially streamed..."
# Part 2: Final response arrives (full text)
with app_instance._pending_gui_tasks_lock:
app_instance._pending_gui_tasks.append({
"action": "handle_ai_response",
"payload": {
"stream_id": stream_id,
"text": "Final complete response.",
"status": "done"
}
})
app_instance._process_pending_gui_tasks()
# In our current implementation, handle_ai_response OVERWRITES.
# This is good because it ensures we have the exact final text from the model
# (sometimes streaming chunks don't perfectly match final text if there are
# tool calls or specific SDK behaviors).
assert app_instance.mma_streams[stream_id] == "Final complete response."
def test_handle_ai_response_streaming(app_instance: App):
"""Verifies that 'handle_ai_response' with status='streaming...' appends to mma_streams."""
stream_id = "Tier 3 (Worker): T-001"
# 1. First chunk
with app_instance._pending_gui_tasks_lock:
app_instance._pending_gui_tasks.append({
"action": "handle_ai_response",
"payload": {
"stream_id": stream_id,
"text": "Chunk 1. ",
"status": "streaming..."
}
})
app_instance._process_pending_gui_tasks()
assert app_instance.mma_streams[stream_id] == "Chunk 1. "
# 2. Second chunk
with app_instance._pending_gui_tasks_lock:
app_instance._pending_gui_tasks.append({
"action": "handle_ai_response",
"payload": {
"stream_id": stream_id,
"text": "Chunk 2.",
"status": "streaming..."
}
})
app_instance._process_pending_gui_tasks()
# 3. Verify final state
assert app_instance.mma_streams[stream_id] == "Chunk 1. Chunk 2."

View File

@@ -1,109 +0,0 @@
import pytest
from unittest.mock import patch, ANY
import asyncio
import time
from gui_2 import App
from events import UserRequestEvent
from api_hook_client import ApiHookClient
@pytest.mark.timeout(10)
@pytest.mark.asyncio
async def test_user_request_integration_flow(mock_app: App) -> None:
"""
Verifies that pushing a UserRequestEvent to the event_queue:
1. Triggers ai_client.send
2. Results in a 'response' event back to the queue
3. Eventually updates the UI state (ai_response, ai_status) after processing GUI tasks.
"""
app = mock_app
# Mock all ai_client methods called during _handle_request_event
mock_response = "This is a test AI response"
with (
patch('ai_client.send', return_value=mock_response) as mock_send,
patch('ai_client.set_custom_system_prompt'),
patch('ai_client.set_model_params'),
patch('ai_client.set_agent_tools')
):
# 1. Create and push a UserRequestEvent
event = UserRequestEvent(
prompt="Hello AI",
stable_md="Context",
file_items=[],
disc_text="History",
base_dir="."
)
# 2. Call the handler directly since start_services is mocked (no event loop thread)
app.controller._handle_request_event(event)
# 3. Verify ai_client.send was called
assert mock_send.called, "ai_client.send was not called"
mock_send.assert_called_once_with(
"Context", "Hello AI", ".", [], "History",
pre_tool_callback=ANY,
qa_callback=ANY,
stream=ANY,
stream_callback=ANY
)
# 4. Wait for the response to propagate to _pending_gui_tasks and update UI
# We call _process_pending_gui_tasks manually to simulate a GUI frame update.
start_time = time.time()
success = False
while time.time() - start_time < 3:
app._process_pending_gui_tasks()
if app.ai_response == mock_response and app.ai_status == "done":
success = True
break
await asyncio.sleep(0.1)
assert success, f"UI state was not updated. ai_response: '{app.ai_response}', status: '{app.ai_status}'"
assert app.ai_response == mock_response
assert app.ai_status == "done"
@pytest.mark.timeout(10)
@pytest.mark.asyncio
async def test_user_request_error_handling(mock_app: App) -> None:
"""
Verifies that if ai_client.send raises an exception, the UI is updated with the error state.
"""
app = mock_app
with (
patch('ai_client.send', side_effect=Exception("API Failure")),
patch('ai_client.set_custom_system_prompt'),
patch('ai_client.set_model_params'),
patch('ai_client.set_agent_tools')
):
event = UserRequestEvent(
prompt="Trigger Error",
stable_md="",
file_items=[],
disc_text="",
base_dir="."
)
app.controller._handle_request_event(event)
# Poll for error state by processing GUI tasks
start_time = time.time()
success = False
while time.time() - start_time < 5:
app._process_pending_gui_tasks()
if app.ai_status == "error" and "ERROR: API Failure" in app.ai_response:
success = True
break
await asyncio.sleep(0.1)
assert success, f"Error state was not reflected in UI. status: {app.ai_status}, response: {app.ai_response}"
def test_api_gui_state_live(live_gui) -> None:
client = ApiHookClient()
client.set_value('current_provider', 'anthropic')
client.set_value('current_model', 'claude-3-haiku-20240307')
start_time = time.time()
success = False
while time.time() - start_time < 10:
state = client.get_gui_state()
if state and state.get('current_provider') == 'anthropic' and state.get('current_model') == 'claude-3-haiku-20240307':
success = True
break
time.sleep(0.5)
assert success, f"GUI state did not update. Got: {client.get_gui_state()}"
final_state = client.get_gui_state()
assert final_state['current_provider'] == 'anthropic'
assert final_state['current_model'] == 'claude-3-haiku-20240307'

View File

@@ -44,7 +44,6 @@ def test_full_live_workflow(live_gui) -> None:
success = False
for _ in range(10):
proj = client.get_project()
# check if name matches 'temp_project'
if proj.get('project', {}).get('project', {}).get('name') == 'temp_project':
success = True
break
@@ -61,14 +60,10 @@ def test_full_live_workflow(live_gui) -> None:
# Enable auto-add so the response ends up in history
client.set_value("auto_add_history", True)
client.set_value("current_provider", "gemini_cli")
client.set_value("current_provider", "gemini")
mock_path = f'"{sys.executable}" "{os.path.abspath("tests/mock_gemini_cli.py")}"'
print(f"[TEST] Setting gcli_path to {mock_path}...")
client.set_value("gcli_path", mock_path)
assert wait_for_value(client, "gcli_path", mock_path)
client.set_value("current_model", "gemini-2.0-flash")
# USE gemini-2.5-flash-lite
client.set_value("current_model", "gemini-2.5-flash-lite")
time.sleep(1)
# 3. Discussion Turn
@@ -105,7 +100,9 @@ def test_full_live_workflow(live_gui) -> None:
for i in range(60):
session = client.get_session()
entries = session.get('session', {}).get('entries', [])
if any(e.get('role') == 'AI' for e in entries):
# Check for AI role. The entries are objects with a 'role' key.
found_ai = any(str(e.get('role', '')).upper() == 'AI' for e in entries)
if found_ai:
success = True
print(f" AI response found in history after {i}s")
break
@@ -116,7 +113,7 @@ def test_full_live_workflow(live_gui) -> None:
pytest.fail(f"AI Status went to error during response wait. Response: {state.get('ai_response')}")
time.sleep(1)
assert success, "AI failed to respond or response not added to history"
assert success, f"AI failed to respond or response not added to history. Entries: {client.get_session()}"
# 5. Switch Discussion
print("[TEST] Creating new discussion 'AutoDisc'...")

View File

@@ -1,27 +1,17 @@
from unittest.mock import patch
from gui_2 import App
from src.gui_2 import App
def test_cb_ticket_retry(app_instance: App) -> None:
ticket_id = "test_ticket_1"
app_instance.active_tickets = [{"id": ticket_id, "status": "failed"}]
with patch('asyncio.run_coroutine_threadsafe') as mock_run_safe:
app_instance._cb_ticket_retry(ticket_id)
# Verify status update
assert app_instance.active_tickets[0]['status'] == 'todo'
# Verify event pushed
mock_run_safe.assert_called_once()
# First arg is the coroutine (event_queue.put), second is self._loop
args, _ = mock_run_safe.call_args
assert args[1] == app_instance._loop
# Synchronous implementation does not use asyncio.run_coroutine_threadsafe
app_instance.controller._cb_ticket_retry(ticket_id)
# Verify status update
assert app_instance.active_tickets[0]['status'] == 'todo'
def test_cb_ticket_skip(app_instance: App) -> None:
ticket_id = "test_ticket_1"
ticket_id = "test_ticket_2"
app_instance.active_tickets = [{"id": ticket_id, "status": "todo"}]
with patch('asyncio.run_coroutine_threadsafe') as mock_run_safe:
app_instance._cb_ticket_skip(ticket_id)
# Verify status update
assert app_instance.active_tickets[0]['status'] == 'skipped'
# Verify event pushed
mock_run_safe.assert_called_once()
args, _ = mock_run_safe.call_args
assert args[1] == app_instance._loop
app_instance.controller._cb_ticket_skip(ticket_id)
# Verify status update
assert app_instance.active_tickets[0]['status'] == 'skipped'

View File

@@ -1,81 +0,0 @@
import pytest
from unittest.mock import MagicMock, patch
import multi_agent_conductor
from src.models import Ticket, WorkerContext
import events
import asyncio
import concurrent.futures
class MockDialog:
def __init__(self, approved: bool, final_payload: dict | None = None) -> None:
self.approved = approved
self.final_payload = final_payload
def wait(self) -> dict:
res = {'approved': self.approved, 'abort': False}
if self.final_payload:
res.update(self.final_payload)
return res
@pytest.fixture
def mock_ai_client() -> None:
with patch("ai_client.send") as mock_send:
mock_send.return_value = "Task completed"
yield mock_send
@pytest.mark.asyncio
async def test_confirm_spawn_pushed_to_queue() -> None:
event_queue = events.AsyncEventQueue()
ticket_id = "T1"
role = "Tier 3 Worker"
prompt = "Original Prompt"
context_md = "Original Context"
# Start confirm_spawn in a thread since it blocks with time.sleep
loop = asyncio.get_running_loop()
def run_confirm():
return multi_agent_conductor.confirm_spawn(role, prompt, context_md, event_queue, ticket_id, loop=loop)
with concurrent.futures.ThreadPoolExecutor() as executor:
future = loop.run_in_executor(executor, run_confirm)
# Wait for the event to appear in the queue
event_name, payload = await event_queue.get()
assert event_name == "mma_spawn_approval"
assert payload["ticket_id"] == ticket_id
assert payload["role"] == role
assert payload["prompt"] == prompt
assert payload["context_md"] == context_md
assert "dialog_container" in payload
# Simulate GUI injecting a dialog
payload["dialog_container"][0] = MockDialog(True, {"prompt": "Modified Prompt", "context_md": "Modified Context"})
approved, final_prompt, final_context = await future
assert approved is True
assert final_prompt == "Modified Prompt"
assert final_context == "Modified Context"
@patch("multi_agent_conductor.confirm_spawn")
def test_run_worker_lifecycle_approved(mock_confirm: MagicMock, mock_ai_client: MagicMock, app_instance) -> None:
ticket = Ticket(id="T1", description="desc", status="todo", assigned_to="user")
context = WorkerContext(ticket_id="T1", model_name="model", messages=[])
event_queue = app_instance.event_queue
mock_confirm.return_value = (True, "Modified Prompt", "Modified Context")
multi_agent_conductor.run_worker_lifecycle(ticket, context, event_queue=event_queue, loop=app_instance._loop)
mock_confirm.assert_called_once()
# Check that ai_client.send was called with modified values
args, kwargs = mock_ai_client.call_args
assert kwargs["user_message"] == "Modified Prompt"
assert kwargs["md_content"] == "Modified Context"
assert ticket.status == "completed"
@patch("multi_agent_conductor.confirm_spawn")
def test_run_worker_lifecycle_rejected(mock_confirm: MagicMock, mock_ai_client: MagicMock, app_instance) -> None:
ticket = Ticket(id="T1", description="desc", status="todo", assigned_to="user")
context = WorkerContext(ticket_id="T1", model_name="model", messages=[])
event_queue = app_instance.event_queue
mock_confirm.return_value = (False, "Original Prompt", "Original Context")
result = multi_agent_conductor.run_worker_lifecycle(ticket, context, event_queue=event_queue, loop=app_instance._loop)
mock_confirm.assert_called_once()
mock_ai_client.assert_not_called()
assert ticket.status == "blocked"
assert "Spawn rejected by user" in ticket.blocked_reason
assert "BLOCKED" in result