diff --git a/conductor/tests/test_mma_exec.py b/conductor/tests/test_mma_exec.py index f382db2..f6e393a 100644 --- a/conductor/tests/test_mma_exec.py +++ b/conductor/tests/test_mma_exec.py @@ -1,5 +1,6 @@ import pytest import os +from pathlib import Path from unittest.mock import patch, MagicMock from scripts.mma_exec import create_parser, get_role_documents, execute_agent, get_model_for_role, get_dependencies @@ -80,7 +81,7 @@ def test_execute_agent() -> None: assert kwargs.get("text") is True assert result == mock_stdout -def test_get_dependencies(tmp_path): +def test_get_dependencies(tmp_path: Path) -> None: content = ( "import os\n" "import sys\n" @@ -94,7 +95,7 @@ def test_get_dependencies(tmp_path): import re -def test_execute_agent_logging(tmp_path): +def test_execute_agent_logging(tmp_path: Path) -> None: log_file = tmp_path / "mma_delegation.log" # mma_exec now uses logs/agents/ for individual logs and logs/mma_delegation.log for master # We will patch LOG_FILE to point to our temp location @@ -113,7 +114,7 @@ def test_execute_agent_logging(tmp_path): assert test_prompt in log_content # Master log should now have the summary prompt assert re.search(r"\d{4}-\d{2}-\d{2}", log_content) -def test_execute_agent_tier3_injection(tmp_path): +def test_execute_agent_tier3_injection(tmp_path: Path) -> None: main_content = "import dependency\n\ndef run():\n dependency.do_work()\n" main_file = tmp_path / "main.py" main_file.write_text(main_content) diff --git a/tests/test_api_hook_extensions.py b/tests/test_api_hook_extensions.py index 7f109ba..5b794b1 100644 --- a/tests/test_api_hook_extensions.py +++ b/tests/test_api_hook_extensions.py @@ -1,6 +1,7 @@ import pytest import sys import os +from typing import Any # Ensure project root is in path for imports sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) @@ -13,20 +14,20 @@ def test_api_client_has_extensions() -> None: assert hasattr(client, 'select_tab') assert hasattr(client, 'select_list_item') -def test_select_tab_integration(live_gui): +def test_select_tab_integration(live_gui: Any) -> None: client = ApiHookClient() # We'll need to make sure the tags exist in gui_legacy.py # For now, this is a placeholder for the integration test response = client.select_tab("operations_tabs", "tab_tool") assert response == {'status': 'queued'} -def test_select_list_item_integration(live_gui): +def test_select_list_item_integration(live_gui: Any) -> None: client = ApiHookClient() # Assuming 'Default' discussion exists or we can just test that it queues response = client.select_list_item("disc_listbox", "Default") assert response == {'status': 'queued'} -def test_get_indicator_state_integration(live_gui): +def test_get_indicator_state_integration(live_gui: Any) -> None: client = ApiHookClient() # thinking_indicator is usually hidden unless AI is running response = client.get_indicator_state("thinking_indicator") diff --git a/tests/test_conductor_api_hook_integration.py b/tests/test_conductor_api_hook_integration.py index 1fb10fa..19ba95e 100644 --- a/tests/test_conductor_api_hook_integration.py +++ b/tests/test_conductor_api_hook_integration.py @@ -6,13 +6,14 @@ import time import json import requests import sys +from typing import Any # Ensure project root is in path sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from api_hook_client import ApiHookClient -def simulate_conductor_phase_completion(client: ApiHookClient): +def simulate_conductor_phase_completion(client: ApiHookClient) -> dict[str, Any]: """ Simulates the Conductor agent's logic for phase completion using ApiHookClient. """ @@ -33,7 +34,7 @@ def simulate_conductor_phase_completion(client: ApiHookClient): results["verification_message"] = f"Automated verification failed: {e}" return results -def test_conductor_integrates_api_hook_client_for_verification(live_gui): +def test_conductor_integrates_api_hook_client_for_verification(live_gui: Any) -> None: """ Verify that Conductor's simulated phase completion logic properly integrates and uses the ApiHookClient for verification against the live GUI. @@ -43,7 +44,7 @@ def test_conductor_integrates_api_hook_client_for_verification(live_gui): assert results["verification_successful"] is True assert "successfully" in results["verification_message"] -def test_conductor_handles_api_hook_failure(live_gui): +def test_conductor_handles_api_hook_failure(live_gui: Any) -> None: """ Verify Conductor handles a simulated API hook verification failure. We patch the client's get_status to simulate failure even with live GUI. diff --git a/tests/test_gemini_cli_edge_cases.py b/tests/test_gemini_cli_edge_cases.py index 3acf1bb..46d4d05 100644 --- a/tests/test_gemini_cli_edge_cases.py +++ b/tests/test_gemini_cli_edge_cases.py @@ -4,9 +4,10 @@ import os import sys import requests import json +from typing import Any from api_hook_client import ApiHookClient -def test_gemini_cli_context_bleed_prevention(live_gui): +def test_gemini_cli_context_bleed_prevention(live_gui: Any) -> None: """ Test that the GeminiCliAdapter correctly filters out echoed 'user' messages and only shows assistant content in the GUI history. @@ -39,7 +40,7 @@ print(json.dumps({"type": "result", "stats": {"total_tokens": 10}}), flush=True) assert "echoing you" not in ai_entries[0].get("content") os.remove(bleed_mock) -def test_gemini_cli_parameter_resilience(live_gui): +def test_gemini_cli_parameter_resilience(live_gui: Any) -> None: """ Test that mcp_client correctly handles 'file_path' and 'dir_path' aliases sent by the AI instead of 'path'. @@ -106,7 +107,7 @@ else: assert found, "Tool result indicating success not found in history" os.remove(alias_mock) -def test_gemini_cli_loop_termination(live_gui): +def test_gemini_cli_loop_termination(live_gui: Any) -> None: """ Test that multi-round tool calling correctly terminates and preserves payload (session context) between rounds. diff --git a/tests/test_gemini_cli_parity_regression.py b/tests/test_gemini_cli_parity_regression.py index 5c75b23..d40fd22 100644 --- a/tests/test_gemini_cli_parity_regression.py +++ b/tests/test_gemini_cli_parity_regression.py @@ -2,6 +2,7 @@ import pytest from unittest.mock import patch, MagicMock import sys import os +from typing import Any # Add project root to sys.path sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) @@ -19,7 +20,7 @@ def setup_ai_client() -> None: @patch('ai_client.GeminiCliAdapter') @patch('ai_client._get_combined_system_prompt') -def test_send_invokes_adapter_send(mock_prompt, mock_adapter_class): +def test_send_invokes_adapter_send(mock_prompt: Any, mock_adapter_class: Any) -> None: mock_prompt.return_value = "Mocked Prompt" mock_instance = mock_adapter_class.return_value mock_instance.send.return_value = {"text": "Done", "tool_calls": []} @@ -34,7 +35,7 @@ def test_send_invokes_adapter_send(mock_prompt, mock_adapter_class): assert kwargs['system_instruction'] == "Mocked Prompt\n\n\ncontext\n" @patch('ai_client.GeminiCliAdapter') -def test_get_history_bleed_stats(mock_adapter_class): +def test_get_history_bleed_stats(mock_adapter_class: Any) -> None: mock_instance = mock_adapter_class.return_value mock_instance.send.return_value = {"text": "txt", "tool_calls": []} mock_instance.last_usage = {"input_tokens": 1500} diff --git a/tests/test_gui_diagnostics.py b/tests/test_gui_diagnostics.py index 7a8f29b..bdb19f4 100644 --- a/tests/test_gui_diagnostics.py +++ b/tests/test_gui_diagnostics.py @@ -2,6 +2,7 @@ import pytest from unittest.mock import patch, MagicMock import importlib.util import sys +from typing import Any import dearpygui.dearpygui as dpg # Load gui.py as a module for testing @@ -29,14 +30,13 @@ def app_instance() -> None: yield app dpg.destroy_context() -def test_diagnostics_panel_initialization(app_instance): +def test_diagnostics_panel_initialization(app_instance: Any) -> None: assert "Diagnostics" in app_instance.window_info assert app_instance.window_info["Diagnostics"] == "win_diagnostics" assert "frame_time" in app_instance.perf_history assert len(app_instance.perf_history["frame_time"]) == 100 -def test_diagnostics_panel_updates(app_instance): -# Mock dependencies +def test_diagnostics_panel_updates(app_instance: Any) -> None: mock_metrics = { 'last_frame_time_ms': 10.0, 'fps': 100.0, diff --git a/tests/test_gui_updates.py b/tests/test_gui_updates.py index 841fc83..cf52ed2 100644 --- a/tests/test_gui_updates.py +++ b/tests/test_gui_updates.py @@ -3,6 +3,7 @@ from unittest.mock import patch, MagicMock import importlib.util import sys import os +from typing import Any import dearpygui.dearpygui as dpg # Ensure project root is in path for imports @@ -40,7 +41,7 @@ def app_instance() -> None: yield app dpg.destroy_context() -def test_telemetry_panel_updates_correctly(app_instance): +def test_telemetry_panel_updates_correctly(app_instance: Any) -> None: """ Tests that the _update_performance_diagnostics method correctly updates DPG widgets based on the stats from ai_client. @@ -71,7 +72,7 @@ def test_telemetry_panel_updates_correctly(app_instance): # Assert Gemini-specific widget was hidden mock_configure_item.assert_any_call("gemini_cache_label", show=False) -def test_cache_data_display_updates_correctly(app_instance): +def test_cache_data_display_updates_correctly(app_instance: Any) -> None: """ Tests that the _update_performance_diagnostics method correctly updates the GUI with Gemini cache statistics when the provider is set to Gemini. diff --git a/tests/test_logging_e2e.py b/tests/test_logging_e2e.py index f5dede9..8a2121d 100644 --- a/tests/test_logging_e2e.py +++ b/tests/test_logging_e2e.py @@ -1,6 +1,7 @@ import os import shutil import pytest +from typing import Any from pathlib import Path from datetime import datetime, timedelta from unittest.mock import patch @@ -10,7 +11,7 @@ from log_registry import LogRegistry from log_pruner import LogPruner @pytest.fixture -def e2e_setup(tmp_path, monkeypatch): +def e2e_setup(tmp_path: Path, monkeypatch: Any) -> Any: # Ensure closed before starting session_logger.close_session() monkeypatch.setattr(session_logger, "_comms_fh", None) @@ -29,7 +30,7 @@ def e2e_setup(tmp_path, monkeypatch): session_logger._LOG_DIR = original_log_dir session_logger._SCRIPTS_DIR = original_scripts_dir -def test_logging_e2e(e2e_setup): +def test_logging_e2e(e2e_setup: Any) -> None: tmp_path = e2e_setup logs_dir = tmp_path / "logs" # Step 1: Initialize (open_session) diff --git a/tests/test_mma_dashboard_refresh.py b/tests/test_mma_dashboard_refresh.py index cae99dd..d7c4bdd 100644 --- a/tests/test_mma_dashboard_refresh.py +++ b/tests/test_mma_dashboard_refresh.py @@ -1,9 +1,10 @@ import pytest from unittest.mock import patch, MagicMock +from typing import Any from gui_2 import App @pytest.fixture -def app_instance(): +def app_instance() -> Any: # We patch the dependencies of App.__init__ to avoid side effects with ( patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}), @@ -23,7 +24,7 @@ def app_instance(): # Return the app and the mock_pm for use in tests yield app, mock_pm -def test_mma_dashboard_refresh(app_instance): +def test_mma_dashboard_refresh(app_instance: Any) -> None: app, mock_pm = app_instance # 1. Define mock tracks mock_tracks = [ @@ -43,7 +44,7 @@ def test_mma_dashboard_refresh(app_instance): # Verify get_all_tracks was called with the correct base_dir mock_pm.get_all_tracks.assert_called_with(app.ui_files_base_dir) -def test_mma_dashboard_initialization_refresh(app_instance): +def test_mma_dashboard_initialization_refresh(app_instance: Any) -> None: """ Checks that _refresh_from_project is called during initialization if _load_active_project is NOT mocked to skip it (but here it IS mocked in fixture). diff --git a/tests/test_orchestration_logic.py b/tests/test_orchestration_logic.py index 0b04fc2..f65014c 100644 --- a/tests/test_orchestration_logic.py +++ b/tests/test_orchestration_logic.py @@ -1,17 +1,18 @@ import pytest from unittest.mock import MagicMock, patch import json +from typing import Any import orchestrator_pm import conductor_tech_lead import multi_agent_conductor from models import Track, Ticket @pytest.fixture -def mock_ai_client() -> None: +def mock_ai_client() -> Any: with patch("ai_client.send") as mock_send: yield mock_send -def test_generate_tracks(mock_ai_client): +def test_generate_tracks(mock_ai_client: Any) -> None: # Tier 1 (PM) response mock mock_ai_client.return_value = json.dumps([ {"id": "track_1", "title": "Infrastructure Setup", "description": "Setup basic project structure"}, @@ -26,8 +27,7 @@ def test_generate_tracks(mock_ai_client): assert tracks[1]["id"] == "track_2" mock_ai_client.assert_called_once() -def test_generate_tickets(mock_ai_client): -# Tier 2 (Tech Lead) response mock +def test_generate_tickets(mock_ai_client: Any) -> None: mock_ai_client.return_value = json.dumps([ {"id": "T-001", "description": "Define interfaces", "depends_on": []}, {"id": "T-002", "description": "Implement interfaces", "depends_on": ["T-001"]} @@ -102,7 +102,7 @@ def test_conductor_engine_parse_json_tickets() -> None: assert track.tickets[1].id == "T2" assert track.tickets[1].depends_on == ["T1"] -def test_run_worker_lifecycle_blocked(mock_ai_client): +def test_run_worker_lifecycle_blocked(mock_ai_client: Any) -> None: ticket = Ticket(id="T1", description="desc", status="todo", assigned_to="user") context = multi_agent_conductor.WorkerContext(ticket_id="T1", model_name="model", messages=[]) mock_ai_client.return_value = "BLOCKED because of missing info"