import pytest from unittest.mock import MagicMock, patch from models import Ticket, Track, WorkerContext import ai_client # These tests define the expected interface for multi_agent_conductor.py # which will be implemented in the next phase of TDD. def test_conductor_engine_initialization() -> None: """ Test that ConductorEngine can be initialized with a Track. """ track = Track(id="test_track", description="Test Track") from multi_agent_conductor import ConductorEngine engine = ConductorEngine(track=track) assert engine.track == track @pytest.mark.asyncio async def test_conductor_engine_run_linear_executes_tickets_in_order(monkeypatch): """ Test that run_linear iterates through executable tickets and calls the worker lifecycle. """ ticket1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1") ticket2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker2", depends_on=["T1"]) track = Track(id="track1", description="Track 1", tickets=[ticket1, ticket2]) from multi_agent_conductor import ConductorEngine engine = ConductorEngine(track=track) # Mock ai_client.send using monkeypatch mock_send = MagicMock() monkeypatch.setattr(ai_client, 'send', mock_send) # We mock run_worker_lifecycle as it is expected to be in the same module with patch("multi_agent_conductor.run_worker_lifecycle") as mock_lifecycle: # Mocking lifecycle to mark ticket as complete so dependencies can be resolved def side_effect(ticket, context, *args, **kwargs): ticket.mark_complete() return "Success" mock_lifecycle.side_effect = side_effect await engine.run_linear() # Track.get_executable_tickets() should be called repeatedly until all are done # T1 should run first, then T2. assert mock_lifecycle.call_count == 2 assert ticket1.status == "completed" assert ticket2.status == "completed" # Verify sequence: T1 before T2 calls = mock_lifecycle.call_args_list assert calls[0][0][0].id == "T1" assert calls[1][0][0].id == "T2" @pytest.mark.asyncio async def test_run_worker_lifecycle_calls_ai_client_send(monkeypatch): """ Test that run_worker_lifecycle triggers the AI client and updates ticket status on success. """ ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1") context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[]) from multi_agent_conductor import run_worker_lifecycle # Mock ai_client.send using monkeypatch mock_send = MagicMock() monkeypatch.setattr(ai_client, 'send', mock_send) mock_send.return_value = "Task complete. I have updated the file." result = run_worker_lifecycle(ticket, context) assert result == "Task complete. I have updated the file." assert ticket.status == "completed" mock_send.assert_called_once() # Check if description was passed to send() args, kwargs = mock_send.call_args # user_message is passed as a keyword argument assert ticket.description in kwargs["user_message"] @pytest.mark.asyncio async def test_run_worker_lifecycle_context_injection(monkeypatch): """ Test that run_worker_lifecycle can take a context_files list and injects AST views into the prompt. """ ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1") context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[]) context_files = ["primary.py", "secondary.py"] from multi_agent_conductor import run_worker_lifecycle # Mock ai_client.send using monkeypatch mock_send = MagicMock() monkeypatch.setattr(ai_client, 'send', mock_send) # We mock ASTParser which is expected to be imported in multi_agent_conductor with patch("multi_agent_conductor.ASTParser") as mock_ast_parser_class, \ patch("builtins.open", new_callable=MagicMock) as mock_open: # Setup open mock to return different content for different files file_contents = { "primary.py": "def primary(): pass", "secondary.py": "def secondary(): pass" } def mock_open_side_effect(file, *args, **kwargs): content = file_contents.get(file, "") mock_file = MagicMock() mock_file.read.return_value = content mock_file.__enter__.return_value = mock_file return mock_file mock_open.side_effect = mock_open_side_effect # Setup ASTParser mock mock_ast_parser = mock_ast_parser_class.return_value mock_ast_parser.get_curated_view.return_value = "CURATED VIEW" mock_ast_parser.get_skeleton.return_value = "SKELETON VIEW" mock_send.return_value = "Success" run_worker_lifecycle(ticket, context, context_files=context_files) # Verify ASTParser calls: # First file (primary) should get curated view, others (secondary) get skeleton mock_ast_parser.get_curated_view.assert_called_once_with("def primary(): pass") mock_ast_parser.get_skeleton.assert_called_once_with("def secondary(): pass") # Verify user_message contains the views _, kwargs = mock_send.call_args user_message = kwargs["user_message"] assert "CURATED VIEW" in user_message assert "SKELETON VIEW" in user_message assert "primary.py" in user_message assert "secondary.py" in user_message @pytest.mark.asyncio async def test_run_worker_lifecycle_handles_blocked_response(monkeypatch): """ Test that run_worker_lifecycle marks the ticket as blocked if the AI indicates it cannot proceed. """ ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1") context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[]) from multi_agent_conductor import run_worker_lifecycle # Mock ai_client.send using monkeypatch mock_send = MagicMock() monkeypatch.setattr(ai_client, 'send', mock_send) # Simulate a response indicating a block mock_send.return_value = "I am BLOCKED because I don't have enough information." run_worker_lifecycle(ticket, context) assert ticket.status == "blocked" assert "BLOCKED" in ticket.blocked_reason @pytest.mark.asyncio async def test_run_worker_lifecycle_step_mode_confirmation(monkeypatch): """ Test that run_worker_lifecycle passes confirm_execution to ai_client.send when step_mode is True. Verify that if confirm_execution is called (simulated by mocking ai_client.send to call its callback), the flow works as expected. """ ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1", step_mode=True) context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[]) from multi_agent_conductor import run_worker_lifecycle # Mock ai_client.send using monkeypatch mock_send = MagicMock() monkeypatch.setattr(ai_client, 'send', mock_send) with patch("multi_agent_conductor.confirm_execution") as mock_confirm: # We simulate ai_client.send by making it call the pre_tool_callback it received def mock_send_side_effect(md_content, user_message, **kwargs): callback = kwargs.get("pre_tool_callback") if callback: # Simulate calling it with some payload callback('{"tool": "read_file", "args": {"path": "test.txt"}}') return "Success" mock_send.side_effect = mock_send_side_effect mock_confirm.return_value = True mock_event_queue = MagicMock() run_worker_lifecycle(ticket, context, event_queue=mock_event_queue) # Verify confirm_execution was called mock_confirm.assert_called_once() assert ticket.status == "completed" @pytest.mark.asyncio async def test_run_worker_lifecycle_step_mode_rejection(monkeypatch): """ Verify that if confirm_execution returns False, the logic (in ai_client, which we simulate here) would prevent execution. In run_worker_lifecycle, we just check if it's passed. """ ticket = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker1", step_mode=True) context = WorkerContext(ticket_id="T1", model_name="test-model", messages=[]) from multi_agent_conductor import run_worker_lifecycle # Mock ai_client.send using monkeypatch mock_send = MagicMock() monkeypatch.setattr(ai_client, 'send', mock_send) with patch("multi_agent_conductor.confirm_execution") as mock_confirm: mock_confirm.return_value = False mock_send.return_value = "Task failed because tool execution was rejected." run_worker_lifecycle(ticket, context) # Verify it was passed to send args, kwargs = mock_send.call_args assert kwargs["pre_tool_callback"] is not None # Since we've already tested ai_client's implementation of pre_tool_callback (mentally or via other tests), # here we just verify the wiring. @pytest.mark.asyncio async def test_conductor_engine_dynamic_parsing_and_execution(monkeypatch): """ Test that parse_json_tickets correctly populates the track and run_linear executes them in dependency order. """ import json from multi_agent_conductor import ConductorEngine track = Track(id="dynamic_track", description="Dynamic Track") engine = ConductorEngine(track=track) tickets_json = json.dumps([ { "id": "T1", "description": "Initial task", "status": "todo", "assigned_to": "worker1", "depends_on": [] }, { "id": "T2", "description": "Dependent task", "status": "todo", "assigned_to": "worker2", "depends_on": ["T1"] }, { "id": "T3", "description": "Another initial task", "status": "todo", "assigned_to": "worker3", "depends_on": [] } ]) engine.parse_json_tickets(tickets_json) assert len(engine.track.tickets) == 3 assert engine.track.tickets[0].id == "T1" assert engine.track.tickets[1].id == "T2" assert engine.track.tickets[2].id == "T3" # Mock ai_client.send using monkeypatch mock_send = MagicMock() monkeypatch.setattr(ai_client, 'send', mock_send) # Mock run_worker_lifecycle to mark tickets as complete with patch("multi_agent_conductor.run_worker_lifecycle") as mock_lifecycle: def side_effect(ticket, context, *args, **kwargs): ticket.mark_complete() return "Success" mock_lifecycle.side_effect = side_effect await engine.run_linear() assert mock_lifecycle.call_count == 3 # Verify dependency order: T1 must be called before T2 calls = [call[0][0].id for call in mock_lifecycle.call_args_list] t1_idx = calls.index("T1") t2_idx = calls.index("T2") assert t1_idx < t2_idx # T3 can be anywhere relative to T1 and T2, but T1 < T2 is mandatory assert "T3" in calls