refactor(types): auto -> None sweep across entire codebase

Applied 236 return type annotations to functions with no return values
across 100+ files (core modules, tests, scripts, simulations).
Added Phase 4 to python_style_refactor track for remaining 597 items
(untyped params, vars, and functions with return values).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-28 11:16:56 -05:00
parent 07f4e36016
commit 60396f03f8
98 changed files with 311 additions and 240 deletions

View File

@@ -14,7 +14,7 @@ from api_hook_client import ApiHookClient
import ai_client
@pytest.fixture(autouse=True)
def reset_ai_client():
def reset_ai_client() -> None:
"""Reset ai_client global state between every test to prevent state pollution."""
ai_client.reset_session()
# Default to a safe model
@@ -41,7 +41,7 @@ def kill_process_tree(pid):
print(f"[Fixture] Error killing process tree {pid}: {e}")
@pytest.fixture(scope="session")
def live_gui():
def live_gui() -> None:
"""
Session-scoped fixture that starts gui_2.py with --enable-test-hooks.
"""

View File

@@ -2,7 +2,7 @@ import pytest
from unittest.mock import MagicMock, patch
import ai_client
def test_ai_client_send_gemini_cli():
def test_ai_client_send_gemini_cli() -> None:
"""
Verifies that 'ai_client.send' correctly interacts with 'GeminiCliAdapter'
when the 'gemini_cli' provider is specified.

View File

@@ -2,7 +2,7 @@ import pytest
from unittest.mock import patch, MagicMock
import ai_client
def test_list_models_gemini_cli():
def test_list_models_gemini_cli() -> None:
"""
Verifies that 'ai_client.list_models' correctly returns a list of models
for the 'gemini_cli' provider.

View File

@@ -2,7 +2,7 @@ import pytest
import textwrap
from scripts.ai_style_formatter import format_code
def test_basic_indentation():
def test_basic_indentation() -> None:
source = textwrap.dedent("""\
def hello():
print("world")
@@ -17,7 +17,7 @@ def test_basic_indentation():
)
assert format_code(source) == expected
def test_top_level_blank_lines():
def test_top_level_blank_lines() -> None:
source = textwrap.dedent("""\
def a():
pass
@@ -35,7 +35,7 @@ def test_top_level_blank_lines():
)
assert format_code(source) == expected
def test_inner_blank_lines():
def test_inner_blank_lines() -> None:
source = textwrap.dedent("""\
def a():
print("start")
@@ -49,7 +49,7 @@ def test_inner_blank_lines():
)
assert format_code(source) == expected
def test_multiline_string_safety():
def test_multiline_string_safety() -> None:
source = textwrap.dedent("""\
def a():
'''
@@ -72,7 +72,7 @@ def test_multiline_string_safety():
assert " This is a multiline" in result
assert result.startswith("def a():\n '''")
def test_continuation_indentation():
def test_continuation_indentation() -> None:
source = textwrap.dedent("""\
def long_func(
a,
@@ -95,7 +95,7 @@ def test_continuation_indentation():
)
assert format_code(source) == expected
def test_multiple_top_level_definitions():
def test_multiple_top_level_definitions() -> None:
source = textwrap.dedent("""\
class MyClass:
def __init__(self):

View File

@@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch
import ai_client
class MockUsage:
def __init__(self):
def __init__(self) -> None:
self.prompt_token_count = 10
self.candidates_token_count = 5
self.total_token_count = 15
@@ -28,13 +28,13 @@ def test_ai_client_event_emitter_exists():
# This should fail initially because 'events' won't exist on ai_client
assert hasattr(ai_client, 'events')
def test_event_emission():
def test_event_emission() -> None:
callback = MagicMock()
ai_client.events.on("test_event", callback)
ai_client.events.emit("test_event", payload={"data": 123})
callback.assert_called_once_with(payload={"data": 123})
def test_send_emits_events():
def test_send_emits_events() -> None:
with patch("ai_client._send_gemini") as mock_send_gemini, \
patch("ai_client._send_anthropic") as mock_send_anthropic:
mock_send_gemini.return_value = "gemini response"
@@ -50,7 +50,7 @@ def test_send_emits_events():
# Let's mock _gemini_client instead to let _send_gemini run and emit events.
pass
def test_send_emits_events_proper():
def test_send_emits_events_proper() -> None:
with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client") as mock_client:
mock_chat = MagicMock()
@@ -70,7 +70,7 @@ def test_send_emits_events_proper():
args, kwargs = start_callback.call_args
assert kwargs['payload']['provider'] == 'gemini'
def test_send_emits_tool_events():
def test_send_emits_tool_events() -> None:
import mcp_client
with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client") as mock_client, \

View File

@@ -56,7 +56,7 @@ def test_get_performance_success(live_gui):
response = client.get_performance()
assert "performance" in response
def test_unsupported_method_error():
def test_unsupported_method_error() -> None:
"""
Test that calling an unsupported HTTP method raises a ValueError.
"""
@@ -64,7 +64,7 @@ def test_unsupported_method_error():
with pytest.raises(ValueError, match="Unsupported HTTP method"):
client._make_request('PUT', '/some_endpoint', data={'key': 'value'})
def test_get_text_value():
def test_get_text_value() -> None:
"""
Test retrieval of string representation using get_text_value.
"""
@@ -74,7 +74,7 @@ def test_get_text_value():
with patch.object(client, 'get_value', return_value=None):
assert client.get_text_value("dummy_tag") is None
def test_get_node_status():
def test_get_node_status() -> None:
"""
Test retrieval of DAG node status using get_node_status.
"""

View File

@@ -7,7 +7,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
def test_api_client_has_extensions():
def test_api_client_has_extensions() -> None:
client = ApiHookClient()
# These should fail initially as they are not implemented
assert hasattr(client, 'select_tab')
@@ -33,7 +33,7 @@ def test_get_indicator_state_integration(live_gui):
assert 'shown' in response
assert response['tag'] == "thinking_indicator"
def test_app_processes_new_actions():
def test_app_processes_new_actions() -> None:
import gui_legacy
from unittest.mock import MagicMock, patch
import dearpygui.dearpygui as dpg

View File

@@ -2,12 +2,12 @@ import pytest
import tree_sitter
from file_cache import ASTParser
def test_ast_parser_initialization():
def test_ast_parser_initialization() -> None:
"""Verify that ASTParser can be initialized with a language string."""
parser = ASTParser("python")
assert parser.language_name == "python"
def test_ast_parser_parse():
def test_ast_parser_parse() -> None:
"""Verify that the parse method returns a tree_sitter.Tree."""
parser = ASTParser("python")
code = """def example_func():
@@ -17,7 +17,7 @@ def test_ast_parser_parse():
# Basic check that it parsed something
assert tree.root_node.type == "module"
def test_ast_parser_get_skeleton_python():
def test_ast_parser_get_skeleton_python() -> None:
"""Verify that get_skeleton replaces function bodies with '...' while preserving docstrings."""
parser = ASTParser("python")
code = '''
@@ -51,14 +51,14 @@ class MyClass:
assert "return result" not in skeleton
assert 'print("doing something")' not in skeleton
def test_ast_parser_invalid_language():
def test_ast_parser_invalid_language() -> None:
"""Verify handling of unsupported or invalid languages."""
# This might raise an error or return a default, depending on implementation
# For now, we expect it to either fail gracefully or raise an exception we can catch
with pytest.raises(Exception):
ASTParser("not-a-language")
def test_ast_parser_get_curated_view():
def test_ast_parser_get_curated_view() -> None:
"""Verify that get_curated_view preserves function bodies with @core_logic or # [HOT]."""
parser = ASTParser("python")
code = '''

View File

@@ -1,7 +1,7 @@
import pytest
from file_cache import ASTParser
def test_ast_parser_get_curated_view():
def test_ast_parser_get_curated_view() -> None:
parser = ASTParser("python")
code = '''
@core_logic

View File

@@ -2,7 +2,7 @@ import asyncio
import pytest
from events import AsyncEventQueue
def test_async_event_queue_put_get():
def test_async_event_queue_put_get() -> None:
"""Verify that an event can be asynchronously put and retrieved from the queue."""
async def run_test():
@@ -15,7 +15,7 @@ def test_async_event_queue_put_get():
assert ret_payload == payload
asyncio.run(run_test())
def test_async_event_queue_multiple():
def test_async_event_queue_multiple() -> None:
"""Verify that multiple events can be asynchronously put and retrieved in order."""
async def run_test():
@@ -30,7 +30,7 @@ def test_async_event_queue_multiple():
assert val2 == 2
asyncio.run(run_test())
def test_async_event_queue_none_payload():
def test_async_event_queue_none_payload() -> None:
"""Verify that an event with None payload works correctly."""
async def run_test():

View File

@@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from scripts.cli_tool_bridge import main
class TestCliToolBridge(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
os.environ['GEMINI_CLI_HOOK_CONTEXT'] = 'manual_slop'
self.tool_call = {
'tool_name': 'read_file',

View File

@@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from scripts.cli_tool_bridge import main
class TestCliToolBridgeMapping(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
os.environ['GEMINI_CLI_HOOK_CONTEXT'] = 'manual_slop'
@patch('sys.stdin', new_callable=io.StringIO)

View File

@@ -55,7 +55,7 @@ def test_conductor_handles_api_hook_failure(live_gui):
assert results["verification_successful"] is False
assert "failed" in results["verification_message"]
def test_conductor_handles_api_hook_connection_error():
def test_conductor_handles_api_hook_connection_error() -> None:
"""
Verify Conductor handles a simulated API hook connection error (server down).
"""

View File

@@ -6,7 +6,7 @@ import ai_client
# These tests define the expected interface for multi_agent_conductor.py
# which will be implemented in the next phase of TDD.
def test_conductor_engine_initialization():
def test_conductor_engine_initialization() -> None:
"""
Test that ConductorEngine can be initialized with a Track.
"""

View File

@@ -48,12 +48,12 @@ class TestConductorTechLead(unittest.TestCase):
self.assertEqual(tickets, [])
class TestTopologicalSort(unittest.TestCase):
def test_topological_sort_empty(self):
def test_topological_sort_empty(self) -> None:
tickets = []
sorted_tickets = conductor_tech_lead.topological_sort(tickets)
self.assertEqual(sorted_tickets, [])
def test_topological_sort_linear(self):
def test_topological_sort_linear(self) -> None:
tickets = [
{"id": "t2", "depends_on": ["t1"]},
{"id": "t1", "depends_on": []},
@@ -82,7 +82,7 @@ class TestTopologicalSort(unittest.TestCase):
self.assertEqual(ids[-1], "t4")
self.assertSetEqual(set(ids[1:3]), {"t2", "t3"})
def test_topological_sort_cycle(self):
def test_topological_sort_cycle(self) -> None:
tickets = [
{"id": "t1", "depends_on": ["t2"]},
{"id": "t2", "depends_on": ["t1"]},

View File

@@ -2,7 +2,7 @@ import pytest
from models import Ticket
from dag_engine import TrackDAG
def test_get_ready_tasks_linear():
def test_get_ready_tasks_linear() -> None:
t1 = Ticket(id="T1", description="Task 1", status="completed", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T2"])
@@ -11,7 +11,7 @@ def test_get_ready_tasks_linear():
assert len(ready) == 1
assert ready[0].id == "T2"
def test_get_ready_tasks_branching():
def test_get_ready_tasks_branching() -> None:
t1 = Ticket(id="T1", description="Task 1", status="completed", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T1"])
@@ -21,19 +21,19 @@ def test_get_ready_tasks_branching():
ready_ids = {t.id for t in ready}
assert ready_ids == {"T2", "T3"}
def test_has_cycle_no_cycle():
def test_has_cycle_no_cycle() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
dag = TrackDAG([t1, t2])
assert not dag.has_cycle()
def test_has_cycle_direct_cycle():
def test_has_cycle_direct_cycle() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", depends_on=["T2"])
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
dag = TrackDAG([t1, t2])
assert dag.has_cycle()
def test_has_cycle_indirect_cycle():
def test_has_cycle_indirect_cycle() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", depends_on=["T2"])
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T3"])
t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T1"])
@@ -49,7 +49,7 @@ def test_has_cycle_complex_no_cycle():
dag = TrackDAG([t1, t2, t3, t4])
assert not dag.has_cycle()
def test_get_ready_tasks_multiple_deps():
def test_get_ready_tasks_multiple_deps() -> None:
t1 = Ticket(id="T1", description="T1", status="completed", assigned_to="worker")
t2 = Ticket(id="T2", description="T2", status="completed", assigned_to="worker")
t3 = Ticket(id="T3", description="T3", status="todo", assigned_to="worker", depends_on=["T1", "T2"])
@@ -58,7 +58,7 @@ def test_get_ready_tasks_multiple_deps():
t2.status = "todo"
assert [t.id for t in dag.get_ready_tasks()] == ["T2"]
def test_topological_sort():
def test_topological_sort() -> None:
t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="worker")
t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="worker", depends_on=["T1"])
t3 = Ticket(id="T3", description="T3", status="todo", assigned_to="worker", depends_on=["T2"])
@@ -66,7 +66,7 @@ def test_topological_sort():
sort = dag.topological_sort()
assert sort == ["T1", "T2", "T3"]
def test_topological_sort_cycle():
def test_topological_sort_cycle() -> None:
t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="worker", depends_on=["T2"])
t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="worker", depends_on=["T1"])
dag = TrackDAG([t1, t2])

View File

@@ -24,7 +24,7 @@ def test_credentials_error_mentions_deepseek(monkeypatch):
assert "[deepseek]" in err_msg
assert "api_key" in err_msg
def test_default_project_includes_reasoning_role():
def test_default_project_includes_reasoning_role() -> None:
"""
Verify that 'Reasoning' is included in the default discussion roles
to support DeepSeek-R1 reasoning traces.
@@ -33,14 +33,14 @@ def test_default_project_includes_reasoning_role():
roles = proj["discussion"]["roles"]
assert "Reasoning" in roles
def test_gui_providers_list():
def test_gui_providers_list() -> None:
"""
Check if 'deepseek' is in the GUI's provider list.
"""
import gui_2
assert "deepseek" in gui_2.PROVIDERS
def test_deepseek_model_listing():
def test_deepseek_model_listing() -> None:
"""
Verify that list_models for deepseek returns expected models.
"""

View File

@@ -2,7 +2,7 @@ import pytest
from unittest.mock import patch, MagicMock
import ai_client
def test_deepseek_model_selection():
def test_deepseek_model_selection() -> None:
"""
Verifies that ai_client.set_provider('deepseek', 'deepseek-chat') correctly updates the internal state.
"""
@@ -10,7 +10,7 @@ def test_deepseek_model_selection():
assert ai_client._provider == "deepseek"
assert ai_client._model == "deepseek-chat"
def test_deepseek_completion_logic():
def test_deepseek_completion_logic() -> None:
"""
Verifies that ai_client.send() correctly calls the DeepSeek API and returns content.
"""
@@ -30,7 +30,7 @@ def test_deepseek_completion_logic():
assert result == "DeepSeek Response"
assert mock_post.called
def test_deepseek_reasoning_logic():
def test_deepseek_reasoning_logic() -> None:
"""
Verifies that reasoning_content is captured and wrapped in <thinking> tags.
"""
@@ -54,7 +54,7 @@ def test_deepseek_reasoning_logic():
assert "<thinking>\nChain of thought\n</thinking>" in result
assert "Final Answer" in result
def test_deepseek_tool_calling():
def test_deepseek_tool_calling() -> None:
"""
Verifies that DeepSeek provider correctly identifies and executes tool calls.
"""
@@ -103,7 +103,7 @@ def test_deepseek_tool_calling():
assert mock_dispatch.call_args[0][0] == "read_file"
assert mock_dispatch.call_args[0][1] == {"path": "test.txt"}
def test_deepseek_streaming():
def test_deepseek_streaming() -> None:
"""
Verifies that DeepSeek provider correctly aggregates streaming chunks.
"""

View File

@@ -39,13 +39,13 @@ def test_execution_engine_basic_flow():
ready = engine.tick()
assert len(ready) == 0
def test_execution_engine_update_nonexistent_task():
def test_execution_engine_update_nonexistent_task() -> None:
dag = TrackDAG([])
engine = ExecutionEngine(dag)
# Should not raise error, or handle gracefully
engine.update_task_status("NONEXISTENT", "completed")
def test_execution_engine_status_persistence():
def test_execution_engine_status_persistence() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
dag = TrackDAG([t1])
engine = ExecutionEngine(dag)
@@ -54,7 +54,7 @@ def test_execution_engine_status_persistence():
ready = engine.tick()
assert len(ready) == 0 # Only 'todo' tasks should be returned by tick() if they are ready
def test_execution_engine_auto_queue():
def test_execution_engine_auto_queue() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
dag = TrackDAG([t1, t2])
@@ -76,7 +76,7 @@ def test_execution_engine_auto_queue():
assert ready[0].id == "T2"
assert t2.status == "in_progress"
def test_execution_engine_step_mode():
def test_execution_engine_step_mode() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", step_mode=True)
dag = TrackDAG([t1])
engine = ExecutionEngine(dag, auto_queue=True)
@@ -92,7 +92,7 @@ def test_execution_engine_step_mode():
ready = engine.tick()
assert len(ready) == 0
def test_execution_engine_approve_task():
def test_execution_engine_approve_task() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
dag = TrackDAG([t1])
engine = ExecutionEngine(dag, auto_queue=False)

View File

@@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from gemini_cli_adapter import GeminiCliAdapter
class TestGeminiCliAdapter(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
self.adapter = GeminiCliAdapter(binary_path="gemini")
@patch('subprocess.Popen')

View File

@@ -15,7 +15,7 @@ from gemini_cli_adapter import GeminiCliAdapter
class TestGeminiCliAdapterParity(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
"""Set up a fresh adapter instance and reset session state for each test."""
# Patch session_logger to prevent file operations during tests
self.session_logger_patcher = patch('gemini_cli_adapter.session_logger')
@@ -25,7 +25,7 @@ class TestGeminiCliAdapterParity(unittest.TestCase):
self.adapter.last_usage = None
self.adapter.last_latency = 0.0
def tearDown(self):
def tearDown(self) -> None:
self.session_logger_patcher.stop()
@patch('subprocess.Popen')

View File

@@ -9,7 +9,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import ai_client
@pytest.fixture(autouse=True)
def setup_ai_client():
def setup_ai_client() -> None:
ai_client.reset_session()
ai_client.set_provider("gemini_cli", "gemini-2.5-flash")
ai_client.confirm_and_run_callback = lambda script, base_dir: "Mocked execution"

View File

@@ -9,7 +9,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Import the necessary functions from ai_client, including the reset helper
from ai_client import get_gemini_cache_stats, reset_session
def test_get_gemini_cache_stats_with_mock_client():
def test_get_gemini_cache_stats_with_mock_client() -> None:
"""
Test that get_gemini_cache_stats correctly processes cache lists
from a mocked client instance.

View File

@@ -5,7 +5,7 @@ import ai_client
from events import EventEmitter
@pytest.fixture
def app_instance():
def app_instance() -> None:
"""
Fixture to create an instance of the gui_2.App class for testing.
It mocks functions that would render a window or block execution.

View File

@@ -3,7 +3,7 @@ from unittest.mock import patch
from gui_2 import App
@pytest.fixture
def app_instance():
def app_instance() -> None:
with (
patch('gui_2.load_config', return_value={'gui': {'show_windows': {}}}),
patch('gui_2.save_config'),

View File

@@ -5,7 +5,7 @@ import ai_client
from events import EventEmitter
@pytest.fixture
def app_instance():
def app_instance() -> None:
if not hasattr(ai_client, 'events') or ai_client.events is None:
ai_client.events = EventEmitter()
with (

View File

@@ -14,7 +14,7 @@ from api_hook_client import ApiHookClient
TEST_CALLBACK_FILE = Path("temp_callback_output.txt")
@pytest.fixture(scope="function", autouse=True)
def cleanup_callback_file():
def cleanup_callback_file() -> None:
"""Ensures the test callback file is cleaned up before and after each test."""
if TEST_CALLBACK_FILE.exists():
TEST_CALLBACK_FILE.unlink()

View File

@@ -55,7 +55,7 @@ def test_performance_benchmarking(live_gui):
assert avg_fps >= 30, f"{gui_script} FPS {avg_fps:.2f} is below 30 FPS threshold"
assert avg_ft <= 33.3, f"{gui_script} Frame time {avg_ft:.2f}ms is above 33.3ms threshold"
def test_performance_parity():
def test_performance_parity() -> None:
"""
Compare the metrics collected in the parameterized test_performance_benchmarking.
"""

View File

@@ -50,7 +50,7 @@ def test_handle_generate_send_pushes_event(mock_gui):
assert event.disc_text == "disc_text"
assert event.base_dir == "."
def test_user_request_event_payload():
def test_user_request_event_payload() -> None:
payload = UserRequestEvent(
prompt="hello",
stable_md="md",
@@ -66,7 +66,7 @@ def test_user_request_event_payload():
assert d["base_dir"] == "."
@pytest.mark.asyncio
async def test_async_event_queue():
async def test_async_event_queue() -> None:
from events import AsyncEventQueue
q = AsyncEventQueue()
await q.put("test_event", {"data": 123})

View File

@@ -12,7 +12,7 @@ spec.loader.exec_module(gui_legacy)
from gui_legacy import App
@pytest.fixture
def app_instance():
def app_instance() -> None:
dpg.create_context()
with patch('dearpygui.dearpygui.create_viewport'), \
patch('dearpygui.dearpygui.setup_dearpygui'), \

View File

@@ -7,7 +7,7 @@ from gui_legacy import App
import ai_client
@pytest.fixture
def app_instance():
def app_instance() -> None:
"""
Fixture to create an instance of the App class for testing.
It creates a real DPG context but mocks functions that would

View File

@@ -16,7 +16,7 @@ spec.loader.exec_module(gui_legacy)
from gui_legacy import App
@pytest.fixture
def app_instance():
def app_instance() -> None:
"""
Fixture to create an instance of the App class for testing.
It creates a real DPG context but mocks functions that would

View File

@@ -24,7 +24,7 @@ class TestHeadlessAPI(unittest.TestCase):
self.api = self.app_instance.create_api()
self.client = TestClient(self.api)
def test_health_endpoint(self):
def test_health_endpoint(self) -> None:
response = self.client.get("/health")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"status": "ok"})
@@ -42,7 +42,7 @@ class TestHeadlessAPI(unittest.TestCase):
response = self.client.get("/status", headers=headers)
self.assertEqual(response.status_code, 200)
def test_generate_endpoint(self):
def test_generate_endpoint(self) -> None:
payload = {
"prompt": "Hello AI"
}
@@ -100,7 +100,7 @@ class TestHeadlessAPI(unittest.TestCase):
if dummy_log.exists():
dummy_log.unlink()
def test_get_context_endpoint(self):
def test_get_context_endpoint(self) -> None:
response = self.client.get("/api/v1/context", headers=self.headers)
self.assertEqual(response.status_code, 200)
data = response.json()
@@ -152,14 +152,14 @@ class TestHeadlessStartup(unittest.TestCase):
app.run()
mock_immapp_run.assert_called_once()
def test_fastapi_installed():
def test_fastapi_installed() -> None:
"""Verify that fastapi is installed."""
try:
importlib.import_module("fastapi")
except ImportError:
pytest.fail("fastapi is not installed")
def test_uvicorn_installed():
def test_uvicorn_installed() -> None:
"""Verify that uvicorn is installed."""
try:
importlib.import_module("uvicorn")

View File

@@ -6,7 +6,7 @@ import ai_client
import json
@pytest.mark.asyncio
async def test_headless_verification_full_run():
async def test_headless_verification_full_run() -> None:
"""
1. Initialize a ConductorEngine with a Track containing multiple dependent Tickets.
2. Simulate a full execution run using engine.run_linear().

View File

@@ -164,7 +164,7 @@ def test_history_persistence_across_turns(tmp_path):
assert len(proj_final["discussion"]["discussions"]["main"]["history"]) == 2
# --- Tests for AI Client History Management ---
def test_get_history_bleed_stats_basic():
def test_get_history_bleed_stats_basic() -> None:
"""
Tests basic retrieval of history bleed statistics from the AI client.
"""

View File

@@ -11,12 +11,12 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient
import gui_legacy
def test_hooks_enabled_via_cli():
def test_hooks_enabled_via_cli() -> None:
with patch.object(sys, 'argv', ['gui_legacy.py', '--enable-test-hooks']):
app = gui_legacy.App()
assert app.test_hooks_enabled is True
def test_hooks_disabled_by_default():
def test_hooks_disabled_by_default() -> None:
with patch.object(sys, 'argv', ['gui_legacy.py']):
if 'SLOP_TEST_HOOKS' in os.environ:
del os.environ['SLOP_TEST_HOOKS']

View File

@@ -13,7 +13,7 @@ sys.modules["gui_legacy"] = gui_legacy
spec.loader.exec_module(gui_legacy)
from gui_legacy import App
def test_new_hubs_defined_in_window_info():
def test_new_hubs_defined_in_window_info() -> None:
"""
Verifies that the new consolidated Hub windows are defined in the App's window_info.
This ensures they will be available in the 'Windows' menu.

View File

@@ -7,7 +7,7 @@ from events import UserRequestEvent
import ai_client
@pytest.fixture
def mock_app():
def mock_app() -> None:
with (
patch('gui_2.load_config', return_value={
"ai": {"provider": "gemini", "model": "model-1", "temperature": 0.0, "max_tokens": 100, "history_trunc_limit": 1000},

View File

@@ -8,7 +8,7 @@ from log_registry import LogRegistry
class TestLogRegistry(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
"""Set up a temporary directory and registry file for each test."""
self.temp_dir = tempfile.TemporaryDirectory()
self.registry_path = os.path.join(self.temp_dir.name, "registry.toml")
@@ -19,11 +19,11 @@ class TestLogRegistry(unittest.TestCase):
# Instantiate LogRegistry. This will load from the empty file.
self.registry = LogRegistry(self.registry_path)
def tearDown(self):
def tearDown(self) -> None:
"""Clean up the temporary directory and its contents after each test."""
self.temp_dir.cleanup()
def test_instantiation(self):
def test_instantiation(self) -> None:
"""Test LogRegistry instantiation with a file path."""
self.assertIsInstance(self.registry, LogRegistry)
self.assertEqual(self.registry.registry_path, self.registry_path)
@@ -31,7 +31,7 @@ class TestLogRegistry(unittest.TestCase):
self.assertTrue(os.path.exists(self.registry_path))
# We will verify content in other tests that explicitly save and reload.
def test_register_session(self):
def test_register_session(self) -> None:
"""Test registering a new session."""
session_id = "session-123"
path = "/path/to/session/123"
@@ -53,7 +53,7 @@ class TestLogRegistry(unittest.TestCase):
reloaded_start_time = datetime.fromisoformat(reloaded_session_data['start_time'])
self.assertAlmostEqual(reloaded_start_time, start_time, delta=timedelta(seconds=1))
def test_update_session_metadata(self):
def test_update_session_metadata(self) -> None:
"""Test updating session metadata."""
session_id = "session-456"
path = "/path/to/session/456"
@@ -84,7 +84,7 @@ class TestLogRegistry(unittest.TestCase):
self.assertTrue(reloaded_session_data.get('metadata', {}).get('whitelisted', False))
self.assertTrue(reloaded_session_data.get('whitelisted', False)) # Check main flag too
def test_is_session_whitelisted(self):
def test_is_session_whitelisted(self) -> None:
"""Test checking if a session is whitelisted."""
session_id_whitelisted = "session-789-whitelisted"
path_w = "/path/to/session/789"
@@ -102,7 +102,7 @@ class TestLogRegistry(unittest.TestCase):
# Test for a non-existent session, should be treated as not whitelisted
self.assertFalse(self.registry.is_session_whitelisted("non-existent-session"))
def test_get_old_non_whitelisted_sessions(self):
def test_get_old_non_whitelisted_sessions(self) -> None:
"""Test retrieving old, non-whitelisted sessions."""
now = datetime.utcnow()
# Define a cutoff time that is 7 days ago

View File

@@ -1,7 +1,7 @@
import pytest
from models import Ticket, Track, WorkerContext
def test_ticket_instantiation():
def test_ticket_instantiation() -> None:
"""
Verifies that a Ticket can be instantiated with its required fields:
id, description, status, assigned_to.
@@ -22,7 +22,7 @@ def test_ticket_instantiation():
assert ticket.assigned_to == assigned_to
assert ticket.depends_on == []
def test_ticket_with_dependencies():
def test_ticket_with_dependencies() -> None:
"""
Verifies that a Ticket can store dependencies.
"""
@@ -35,7 +35,7 @@ def test_ticket_with_dependencies():
)
assert ticket.depends_on == ["T1"]
def test_track_instantiation():
def test_track_instantiation() -> None:
"""
Verifies that a Track can be instantiated with its required fields:
id, description, and a list of Tickets.
@@ -56,14 +56,14 @@ def test_track_instantiation():
assert track.tickets[0].id == "T1"
assert track.tickets[1].id == "T2"
def test_track_can_handle_empty_tickets():
def test_track_can_handle_empty_tickets() -> None:
"""
Verifies that a Track can be instantiated with an empty list of tickets.
"""
track = Track(id="TRACK-2", description="Empty Track", tickets=[])
assert track.tickets == []
def test_worker_context_instantiation():
def test_worker_context_instantiation() -> None:
"""
Verifies that a WorkerContext can be instantiated with ticket_id,
model_name, and messages.
@@ -83,7 +83,7 @@ def test_worker_context_instantiation():
assert context.model_name == model_name
assert context.messages == messages
def test_ticket_mark_blocked():
def test_ticket_mark_blocked() -> None:
"""
Verifies that ticket.mark_blocked(reason) sets the status to 'blocked'.
Note: The reason field might need to be added to the Ticket class.
@@ -92,7 +92,7 @@ def test_ticket_mark_blocked():
ticket.mark_blocked("Waiting for API key")
assert ticket.status == "blocked"
def test_ticket_mark_complete():
def test_ticket_mark_complete() -> None:
"""
Verifies that ticket.mark_complete() sets the status to 'completed'.
"""
@@ -100,7 +100,7 @@ def test_ticket_mark_complete():
ticket.mark_complete()
assert ticket.status == "completed"
def test_track_get_executable_tickets():
def test_track_get_executable_tickets() -> None:
"""
Verifies that track.get_executable_tickets() returns only 'todo' tickets
whose dependencies are all 'completed'.
@@ -124,7 +124,7 @@ def test_track_get_executable_tickets():
assert "T6" in executable_ids
assert len(executable_ids) == 2
def test_track_get_executable_tickets_complex():
def test_track_get_executable_tickets_complex() -> None:
"""
Verifies executable tickets with complex dependency chains.
Chain: T1 (comp) -> T2 (todo) -> T3 (todo)

View File

@@ -6,7 +6,7 @@ import time
from gui_2 import App
@pytest.fixture
def app_instance():
def app_instance() -> None:
with (
patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}),
patch('gui_2.save_config'),

View File

@@ -1,7 +1,7 @@
import pytest
from mma_prompts import PROMPTS
def test_tier1_epic_init_constraints():
def test_tier1_epic_init_constraints() -> None:
prompt = PROMPTS["tier1_epic_init"]
assert "Godot ECS Flat List format" in prompt
assert "JSON array" in prompt
@@ -9,19 +9,19 @@ def test_tier1_epic_init_constraints():
assert "severity" in prompt
assert "IGNORE all source code" in prompt
def test_tier1_track_delegation_constraints():
def test_tier1_track_delegation_constraints() -> None:
prompt = PROMPTS["tier1_track_delegation"]
assert "Track Brief" in prompt
assert "AST Skeleton View" in prompt
assert "IGNORE unrelated module docs" in prompt
def test_tier1_macro_merge_constraints():
def test_tier1_macro_merge_constraints() -> None:
prompt = PROMPTS["tier1_macro_merge"]
assert "Macro-Merge" in prompt
assert "Macro-Diff" in prompt
assert "IGNORE Tier 3 trial-and-error" in prompt
def test_tier2_sprint_planning_constraints():
def test_tier2_sprint_planning_constraints() -> None:
prompt = PROMPTS["tier2_sprint_planning"]
assert "Tickets" in prompt
assert "Godot ECS Flat List format" in prompt
@@ -30,20 +30,20 @@ def test_tier2_sprint_planning_constraints():
assert "Skeleton View" in prompt
assert "Curated Implementation View" in prompt
def test_tier2_code_review_constraints():
def test_tier2_code_review_constraints() -> None:
prompt = PROMPTS["tier2_code_review"]
assert "Code Review" in prompt
assert "IGNORE the Contributor's internal trial-and-error" in prompt
assert "Tier 4 (QA) logs" in prompt
def test_tier2_track_finalization_constraints():
def test_tier2_track_finalization_constraints() -> None:
prompt = PROMPTS["tier2_track_finalization"]
assert "Track Finalization" in prompt
assert "Executive Summary" in prompt
assert "Macro-Diff" in prompt
assert "Dependency Delta" in prompt
def test_tier2_contract_first_constraints():
def test_tier2_contract_first_constraints() -> None:
prompt = PROMPTS["tier2_contract_first"]
assert "Stub Ticket" in prompt
assert "Consumer Ticket" in prompt

View File

@@ -4,7 +4,7 @@ import asyncio
from gui_2 import App
@pytest.fixture
def app_instance():
def app_instance() -> None:
with (
patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}),
patch('gui_2.save_config'),

View File

@@ -7,7 +7,7 @@ import multi_agent_conductor
from models import Track, Ticket
@pytest.fixture
def mock_ai_client():
def mock_ai_client() -> None:
with patch("ai_client.send") as mock_send:
yield mock_send
@@ -40,7 +40,7 @@ def test_generate_tickets(mock_ai_client):
assert tickets[1]["id"] == "T-002"
assert tickets[1]["depends_on"] == ["T-001"]
def test_topological_sort():
def test_topological_sort() -> None:
tickets = [
{"id": "T-002", "description": "Dep on 001", "depends_on": ["T-001"]},
{"id": "T-001", "description": "Base", "depends_on": []},
@@ -51,7 +51,7 @@ def test_topological_sort():
assert sorted_tickets[1]["id"] == "T-002"
assert sorted_tickets[2]["id"] == "T-003"
def test_topological_sort_circular():
def test_topological_sort_circular() -> None:
tickets = [
{"id": "T-001", "depends_on": ["T-002"]},
{"id": "T-002", "depends_on": ["T-001"]}
@@ -59,7 +59,7 @@ def test_topological_sort_circular():
with pytest.raises(ValueError, match="Circular dependency detected"):
conductor_tech_lead.topological_sort(tickets)
def test_track_executable_tickets():
def test_track_executable_tickets() -> None:
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="user")
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="user", depends_on=["T1"])
track = Track(id="track_1", description="desc", tickets=[t1, t2])
@@ -73,7 +73,7 @@ def test_track_executable_tickets():
assert executable[0].id == "T2"
@pytest.mark.asyncio
async def test_conductor_engine_run_linear():
async def test_conductor_engine_run_linear() -> None:
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="user")
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="user", depends_on=["T1"])
track = Track(id="track_1", description="desc", tickets=[t1, t2])
@@ -89,7 +89,7 @@ async def test_conductor_engine_run_linear():
assert t2.status == "completed"
assert mock_worker.call_count == 2
def test_conductor_engine_parse_json_tickets():
def test_conductor_engine_parse_json_tickets() -> None:
track = Track(id="track_1", description="desc")
engine = multi_agent_conductor.ConductorEngine(track)
json_data = json.dumps([

View File

@@ -7,7 +7,7 @@ from pathlib import Path
import orchestrator_pm
class TestOrchestratorPMHistory(unittest.TestCase):
def setUp(self):
def setUp(self) -> None:
self.test_dir = Path("test_conductor")
self.test_dir.mkdir(exist_ok=True)
self.archive_dir = self.test_dir / "archive"
@@ -15,7 +15,7 @@ class TestOrchestratorPMHistory(unittest.TestCase):
self.archive_dir.mkdir(exist_ok=True)
self.tracks_dir.mkdir(exist_ok=True)
def tearDown(self):
def tearDown(self) -> None:
if self.test_dir.exists():
shutil.rmtree(self.test_dir)

View File

@@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from performance_monitor import PerformanceMonitor
def test_perf_monitor_basic_timing():
def test_perf_monitor_basic_timing() -> None:
pm = PerformanceMonitor()
pm.start_frame()
time.sleep(0.02) # 20ms
@@ -17,7 +17,7 @@ def test_perf_monitor_basic_timing():
assert metrics['last_frame_time_ms'] >= 20.0
pm.stop()
def test_perf_monitor_component_timing():
def test_perf_monitor_component_timing() -> None:
pm = PerformanceMonitor()
pm.start_component("test_comp")
time.sleep(0.01)

View File

@@ -4,7 +4,7 @@ import ai_client
from gui_2 import App
@pytest.fixture
def app_instance():
def app_instance() -> None:
with (
patch('gui_2.load_config', return_value={'ai': {'provider': 'gemini', 'model': 'gemini-2.5-flash-lite'}, 'projects': {}}),
patch('gui_2.save_config'),

View File

@@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_base import BaseSimulation
def test_base_simulation_init():
def test_base_simulation_init() -> None:
with patch('simulation.sim_base.ApiHookClient') as mock_client_class:
mock_client = MagicMock()
mock_client_class.return_value = mock_client
@@ -16,7 +16,7 @@ def test_base_simulation_init():
assert sim.client == mock_client
assert sim.sim is not None
def test_base_simulation_setup():
def test_base_simulation_setup() -> None:
mock_client = MagicMock()
mock_client.wait_for_server.return_value = True
with patch('simulation.sim_base.WorkflowSimulator') as mock_sim_class:

View File

@@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_context import ContextSimulation
def test_context_simulation_run():
def test_context_simulation_run() -> None:
mock_client = MagicMock()
mock_client.wait_for_server.return_value = True
# Mock project config

View File

@@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_tools import ToolsSimulation
def test_tools_simulation_run():
def test_tools_simulation_run() -> None:
mock_client = MagicMock()
mock_client.wait_for_server.return_value = True
# Mock session entries with tool output

View File

@@ -19,7 +19,7 @@ class MockDialog:
return res
@pytest.fixture
def mock_ai_client():
def mock_ai_client() -> None:
with patch("ai_client.send") as mock_send:
mock_send.return_value = "Task completed"
yield mock_send

View File

@@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch
import subprocess
from shell_runner import run_powershell
def test_run_powershell_qa_callback_on_failure():
def test_run_powershell_qa_callback_on_failure() -> None:
"""
Test that qa_callback is called when a powershell command fails (non-zero exit code).
The result of the callback should be appended to the output.
@@ -27,7 +27,7 @@ def test_run_powershell_qa_callback_on_failure():
assert "STDERR:\nsomething went wrong" in output
assert "EXIT CODE: 1" in output
def test_run_powershell_qa_callback_on_stderr_only():
def test_run_powershell_qa_callback_on_stderr_only() -> None:
"""
Test that qa_callback is called when a command has stderr even if exit code is 0.
"""
@@ -45,7 +45,7 @@ def test_run_powershell_qa_callback_on_stderr_only():
assert "QA ANALYSIS: Ignorable warning." in output
assert "STDOUT:\nSuccess" in output
def test_run_powershell_no_qa_callback_on_success():
def test_run_powershell_no_qa_callback_on_success() -> None:
"""
Test that qa_callback is NOT called when the command succeeds without stderr.
"""
@@ -64,7 +64,7 @@ def test_run_powershell_no_qa_callback_on_success():
assert "EXIT CODE: 0" in output
assert "QA ANALYSIS" not in output
def test_run_powershell_optional_qa_callback():
def test_run_powershell_optional_qa_callback() -> None:
"""
Test that run_powershell still works without providing a qa_callback.
"""
@@ -81,7 +81,7 @@ def test_run_powershell_optional_qa_callback():
assert "STDERR:\nerror" in output
assert "EXIT CODE: 1" in output
def test_end_to_end_tier4_integration():
def test_end_to_end_tier4_integration() -> None:
"""
Verifies that shell_runner.run_powershell correctly uses ai_client.run_tier4_analysis.
"""
@@ -101,7 +101,7 @@ def test_end_to_end_tier4_integration():
mock_analysis.assert_called_once_with(stderr_content)
assert f"QA ANALYSIS:\n{expected_analysis}" in output
def test_ai_client_passes_qa_callback():
def test_ai_client_passes_qa_callback() -> None:
"""
Verifies that ai_client.send passes the qa_callback down to the provider function.
"""
@@ -123,7 +123,7 @@ def test_ai_client_passes_qa_callback():
# qa_callback is the 7th positional argument in _send_gemini
assert args[6] == qa_callback
def test_gemini_provider_passes_qa_callback_to_run_script():
def test_gemini_provider_passes_qa_callback_to_run_script() -> None:
"""
Verifies that _send_gemini passes the qa_callback to _run_script.
"""

View File

@@ -14,7 +14,7 @@ def test_build_tier1_context_exists():
# other.py should be summarized, not full content in a code block
assert "Other content" not in result or "Summarized" in result # Assuming summary format
def test_build_tier2_context_exists():
def test_build_tier2_context_exists() -> None:
file_items = [
{"path": Path("other.py"), "entry": "other.py", "content": "Other content", "error": False}
]
@@ -44,7 +44,7 @@ def test_build_tier3_context_ast_skeleton(monkeypatch):
mock_parser_class.assert_called_once_with("python")
mock_parser_instance.get_skeleton.assert_called_once_with("def other():\n pass")
def test_build_tier3_context_exists():
def test_build_tier3_context_exists() -> None:
file_items = [
{"path": Path("focus.py"), "entry": "focus.py", "content": "def focus():\n pass", "error": False},
{"path": Path("other.py"), "entry": "other.py", "content": "def other():\n pass", "error": False}
@@ -91,7 +91,7 @@ def test_build_files_section_with_dicts(tmp_path):
assert "content1" in result
assert "file1.txt" in result
def test_tiered_context_by_tier_field():
def test_tiered_context_by_tier_field() -> None:
file_items = [
{"path": Path("tier1_file.txt"), "entry": "tier1_file.txt", "content": "Full Tier 1 Content\nLine 2", "tier": 1},
{"path": Path("tier3_file.txt"), "entry": "tier3_file.txt", "content": "Full Tier 3 Content\nLine 2\nLine 3\nLine 4\nLine 5\nLine 6\nLine 7\nLine 8\nLine 9\nLine 10", "tier": 3},

View File

@@ -7,7 +7,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import ai_client
def test_token_usage_tracking():
def test_token_usage_tracking() -> None:
ai_client.reset_session()
# Mock an API response with token usage
usage = {"prompt_tokens": 100, "candidates_tokens": 50, "total_tokens": 150}

View File

@@ -6,7 +6,7 @@ from models import Metadata, TrackState, Ticket
# --- Pytest Tests ---
def test_track_state_instantiation():
def test_track_state_instantiation() -> None:
"""Test creating a TrackState object."""
now = datetime.now(timezone.utc)
metadata = Metadata(
@@ -37,7 +37,7 @@ def test_track_state_instantiation():
assert track_state.tasks[0].description == "Design UI"
assert track_state.tasks[0].assigned_to == "dev1"
def test_track_state_to_dict():
def test_track_state_to_dict() -> None:
"""Test the to_dict() method for serialization."""
now = datetime.now(timezone.utc)
metadata = Metadata(
@@ -72,7 +72,7 @@ def test_track_state_to_dict():
assert track_dict["tasks"][0]["description"] == "Add feature X"
assert track_dict["tasks"][0]["assigned_to"] == "dev3"
def test_track_state_from_dict():
def test_track_state_from_dict() -> None:
"""Test the from_dict() class method for deserialization."""
now = datetime.now(timezone.utc)
track_dict_data = {
@@ -106,7 +106,7 @@ def test_track_state_from_dict():
assert track_state.tasks[0].assigned_to == "ops1"
# Test case for empty lists and missing keys for robustness
def test_track_state_from_dict_empty_and_missing():
def test_track_state_from_dict_empty_and_missing() -> None:
"""Test from_dict with empty lists and missing optional keys."""
track_dict_data = {
"metadata": {
@@ -128,7 +128,7 @@ def test_track_state_from_dict_empty_and_missing():
assert len(track_state.tasks) == 0
# Test case for to_dict with None values or missing optional data
def test_track_state_to_dict_with_none():
def test_track_state_to_dict_with_none() -> None:
"""Test to_dict with None values in optional fields."""
now = datetime.now(timezone.utc)
metadata = Metadata(

View File

@@ -1,7 +1,7 @@
import tree_sitter_python as tspython
from tree_sitter import Language, Parser
def test_tree_sitter_python_setup():
def test_tree_sitter_python_setup() -> None:
"""
Verifies that tree-sitter and tree-sitter-python are correctly installed
and can parse a simple Python function string.

View File

@@ -7,11 +7,11 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.user_agent import UserSimAgent
def test_user_agent_instantiation():
def test_user_agent_instantiation() -> None:
agent = UserSimAgent(hook_client=None)
assert agent is not None
def test_perform_action_with_delay():
def test_perform_action_with_delay() -> None:
agent = UserSimAgent(hook_client=None)
called = False

View File

@@ -8,12 +8,12 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.workflow_sim import WorkflowSimulator
def test_simulator_instantiation():
def test_simulator_instantiation() -> None:
client = MagicMock()
sim = WorkflowSimulator(client)
assert sim is not None
def test_setup_new_project():
def test_setup_new_project() -> None:
client = MagicMock()
sim = WorkflowSimulator(client)
# Mock responses for wait_for_server
@@ -24,7 +24,7 @@ def test_setup_new_project():
client.set_value.assert_any_call("project_git_dir", "/tmp/test_git")
client.click.assert_any_call("btn_project_save")
def test_discussion_switching():
def test_discussion_switching() -> None:
client = MagicMock()
sim = WorkflowSimulator(client)
sim.create_discussion("NewDisc")
@@ -33,7 +33,7 @@ def test_discussion_switching():
sim.switch_discussion("NewDisc")
client.select_list_item.assert_called_with("disc_listbox", "NewDisc")
def test_history_truncation():
def test_history_truncation() -> None:
client = MagicMock()
sim = WorkflowSimulator(client)
sim.truncate_history(3)

View File

@@ -33,12 +33,12 @@ class TestMMAGUIRobust(unittest.TestCase):
print("GUI started.")
@classmethod
def tearDownClass(cls):
def tearDownClass(cls) -> None:
if cls.gui_process:
cls.gui_process.terminate()
cls.gui_process.wait(timeout=5)
def test_mma_state_ingestion(self):
def test_mma_state_ingestion(self) -> None:
"""Verify that mma_state_update event correctly updates GUI state."""
track_data = {
"id": "robust_test_track",
@@ -69,7 +69,7 @@ class TestMMAGUIRobust(unittest.TestCase):
self.assertEqual(status["active_tickets"][2]["status"], "complete")
print("MMA state ingestion verified successfully.")
def test_mma_step_approval_trigger(self):
def test_mma_step_approval_trigger(self) -> None:
"""Verify that mma_step_approval event sets the pending approval flag."""
payload = {
"ticket_id": "T2",

View File

@@ -9,7 +9,7 @@ if PROJECT_ROOT not in sys.path:
from api_hook_client import ApiHookClient
def diag_run():
def diag_run() -> None:
print("Launching GUI for manual inspection + automated hooks...")
# Use a log file for GUI output
with open("gui_diag.log", "w") as log_file:

View File

@@ -23,7 +23,7 @@ except ImportError as e:
print(f"Import error: {e}")
sys.exit(1)
def run_visual_mma_verification():
def run_visual_mma_verification() -> None:
print("Starting visual MMA verification test...")
# Change current directory to project root
original_dir = os.getcwd()