refactor(tests): Add strict type hints to fifth batch of test files

This commit is contained in:
2026-02-28 19:24:02 -05:00
parent ee2d6f4234
commit cc806d2cc6
9 changed files with 20 additions and 21 deletions

View File

@@ -2,7 +2,7 @@ import subprocess
import sys
import os
def run_diag(role, prompt):
def run_diag(role: str, prompt: str) -> str:
print(f"--- Running Diag for {role} ---")
cmd = [sys.executable, "scripts/mma_exec.py", "--role", role, prompt]
try:

View File

@@ -2,7 +2,7 @@ import subprocess
import pytest
import os
def run_ps_script(role, prompt):
def run_ps_script(role: str, prompt: str) -> subprocess.CompletedProcess:
"""Helper to run the run_subagent.ps1 script."""
# Using -File is safer and handles arguments better
cmd = [

View File

@@ -5,6 +5,7 @@ import requests
import os
import signal
import sys
from typing import Generator
import os
# Ensure project root is in path
@@ -14,14 +15,14 @@ from api_hook_client import ApiHookClient
import ai_client
@pytest.fixture(autouse=True)
def reset_ai_client() -> None:
def reset_ai_client() -> Generator[None, None, None]:
"""Reset ai_client global state between every test to prevent state pollution."""
ai_client.reset_session()
# Default to a safe model
ai_client.set_provider("gemini", "gemini-2.5-flash-lite")
yield
def kill_process_tree(pid):
def kill_process_tree(pid: int | None) -> None:
"""Robustly kills a process and all its children."""
if pid is None:
return
@@ -41,7 +42,7 @@ def kill_process_tree(pid):
print(f"[Fixture] Error killing process tree {pid}: {e}")
@pytest.fixture(scope="session")
def live_gui() -> None:
def live_gui() -> Generator[tuple[subprocess.Popen, str], None, None]:
"""
Session-scoped fixture that starts gui_2.py with --enable-test-hooks.
"""

View File

@@ -8,14 +8,11 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from ai_client import set_agent_tools, _build_anthropic_tools
def test_set_agent_tools():
# Correct usage: pass a dict
def test_set_agent_tools() -> None:
agent_tools = {"read_file": True, "list_directory": False}
set_agent_tools(agent_tools)
def test_build_anthropic_tools_conversion():
# _build_anthropic_tools takes no arguments and uses the global _agent_tools
# We set a tool to True and check if it appears in the output
def test_build_anthropic_tools_conversion() -> None:
set_agent_tools({"read_file": True})
anthropic_tools = _build_anthropic_tools()
tool_names = [t["name"] for t in anthropic_tools]

View File

@@ -1,11 +1,12 @@
import pytest
from unittest.mock import MagicMock, patch
from typing import Generator
from gui_2 import App
import ai_client
from events import EventEmitter
@pytest.fixture
def app_instance() -> None:
def app_instance() -> Generator[type[App], None, None]:
"""
Fixture to create an instance of the gui_2.App class for testing.
It mocks functions that would render a window or block execution.
@@ -25,7 +26,7 @@ def app_instance() -> None:
):
yield App
def test_app_subscribes_to_events(app_instance):
def test_app_subscribes_to_events(app_instance: type[App]) -> None:
"""
This test checks that the App's __init__ method subscribes the necessary
event handlers to the ai_client.events emitter.

View File

@@ -1,11 +1,12 @@
import pytest
from unittest.mock import patch, MagicMock
from typing import Generator
from gui_2 import App
import ai_client
from events import EventEmitter
@pytest.fixture
def app_instance() -> None:
def app_instance() -> Generator[App, None, None]:
if not hasattr(ai_client, 'events') or ai_client.events is None:
ai_client.events = EventEmitter()
with (
@@ -21,7 +22,7 @@ def app_instance() -> None:
):
yield App()
def test_mcp_tool_call_is_dispatched(app_instance):
def test_mcp_tool_call_is_dispatched(app_instance: App) -> None:
"""
This test verifies that when the AI returns a tool call for an MCP function,
the ai_client correctly dispatches it to mcp_client.

View File

@@ -11,7 +11,7 @@ from api_hook_client import ApiHookClient
# Session-wide storage for comparing metrics across parameterized fixture runs
_shared_metrics = {}
def test_performance_benchmarking(live_gui):
def test_performance_benchmarking(live_gui: tuple) -> None:
"""
Collects performance metrics for the current GUI script (parameterized as gui.py and gui_2.py).
"""

View File

@@ -5,7 +5,7 @@ from gui_2 import App
from events import UserRequestEvent
@pytest.fixture
def mock_gui():
def mock_gui() -> App:
with (
patch('gui_2.load_config', return_value={
"ai": {"provider": "gemini", "model": "model-1"},
@@ -22,8 +22,7 @@ def mock_gui():
gui = App()
return gui
def test_handle_generate_send_pushes_event(mock_gui):
# Mock _do_generate to return sample data
def test_handle_generate_send_pushes_event(mock_gui: App) -> None:
mock_gui._do_generate = MagicMock(return_value=(
"full_md", "path", [], "stable_md", "disc_text"
))

View File

@@ -1,13 +1,14 @@
import pytest
from unittest.mock import MagicMock, patch
from typing import Generator
import dearpygui.dearpygui as dpg
import gui_legacy
from gui_legacy import App
import ai_client
@pytest.fixture
def app_instance() -> None:
def app_instance() -> Generator[App, None, None]:
"""
Fixture to create an instance of the App class for testing.
It creates a real DPG context but mocks functions that would
@@ -33,8 +34,7 @@ def app_instance() -> None:
yield app
dpg.destroy_context()
def test_gui_updates_on_event(app_instance):
# Patch dependencies for the test
def test_gui_updates_on_event(app_instance: App) -> None:
with patch('dearpygui.dearpygui.set_value') as mock_set_value, \
patch('dearpygui.dearpygui.does_item_exist', return_value=True), \
patch('dearpygui.dearpygui.configure_item'), \