refactor(types): auto -> None sweep across entire codebase

Applied 236 return type annotations to functions with no return values
across 100+ files (core modules, tests, scripts, simulations).
Added Phase 4 to python_style_refactor track for remaining 597 items
(untyped params, vars, and functions with return values).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-28 11:16:56 -05:00
parent 07f4e36016
commit 60396f03f8
98 changed files with 311 additions and 240 deletions

View File

@@ -9,11 +9,18 @@
### 1. AI-Optimized Python Style Refactor ### 1. AI-Optimized Python Style Refactor
**Track**: `conductor/tracks/python_style_refactor_20260227/` **Track**: `conductor/tracks/python_style_refactor_20260227/`
**Status**: COMPLETE **Status**: IN_PROGRESS — Phase 4
**Completed**: **Completed**:
- Phase 1: Research and Pilot Tooling [checkpoint: c75b926] - Phase 1: Research and Pilot Tooling [checkpoint: c75b926]
- Phase 2: Core Refactor - Indentation and Newlines [checkpoint: db65162] - Phase 2: Core Refactor - Indentation and Newlines [checkpoint: db65162]
- Phase 3: AI-Optimized Metadata and Final Cleanup [checkpoint: 3216e87] - Phase 3: AI-Optimized Metadata and Final Cleanup [checkpoint: 3216e87]
**Remaining in Phase 4** (Codebase-Wide Type Hint Sweep):
- [ ] Core modules (18 files, ~200 items)
- [ ] Variable-only files (ai_client, mcp_client, mma_prompts)
- [ ] Scripts (~15 files)
- [ ] Simulation modules (~10 files)
- [ ] Test files (~80 files, ~400 items)
- [ ] Verification
### 2. Robust Live Simulation Verification ### 2. Robust Live Simulation Verification
**Track**: `conductor/tracks/robust_live_simulation_verification/` **Track**: `conductor/tracks/robust_live_simulation_verification/`

View File

@@ -15,7 +15,7 @@ class HookServerInstance(ThreadingHTTPServer):
class HookHandler(BaseHTTPRequestHandler): class HookHandler(BaseHTTPRequestHandler):
"""Handles incoming HTTP requests for the API hooks.""" """Handles incoming HTTP requests for the API hooks."""
def do_GET(self): def do_GET(self) -> None:
app = self.server.app app = self.server.app
session_logger.log_api_hook("GET", self.path, "") session_logger.log_api_hook("GET", self.path, "")
if self.path == '/status': if self.path == '/status':
@@ -175,7 +175,7 @@ class HookHandler(BaseHTTPRequestHandler):
self.send_response(404) self.send_response(404)
self.end_headers() self.end_headers()
def do_POST(self): def do_POST(self) -> None:
app = self.server.app app = self.server.app
content_length = int(self.headers.get('Content-Length', 0)) content_length = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_length) body = self.rfile.read(content_length)
@@ -283,7 +283,7 @@ class HookServer:
self.server = None self.server = None
self.thread = None self.thread = None
def start(self): def start(self) -> None:
if self.thread and self.thread.is_alive(): if self.thread and self.thread.is_alive():
return return
is_gemini_cli = getattr(self.app, 'current_provider', '') == 'gemini_cli' is_gemini_cli = getattr(self.app, 'current_provider', '') == 'gemini_cli'
@@ -309,7 +309,7 @@ class HookServer:
self.thread.start() self.thread.start()
logging.info(f"Hook server started on port {self.port}") logging.info(f"Hook server started on port {self.port}")
def stop(self): def stop(self) -> None:
if self.server: if self.server:
self.server.shutdown() self.server.shutdown()
self.server.server_close() self.server.server_close()

View File

@@ -18,7 +18,7 @@ def run_ps_script(role, prompt):
print(f"\n[Sub-Agent {role} Error]:\n{result.stderr}") print(f"\n[Sub-Agent {role} Error]:\n{result.stderr}")
return result return result
def test_subagent_script_qa_live(): def test_subagent_script_qa_live() -> None:
"""Verify that the QA role works and returns a compressed fix.""" """Verify that the QA role works and returns a compressed fix."""
prompt = "Traceback (most recent call last): File 'test.py', line 1, in <module> 1/0 ZeroDivisionError: division by zero" prompt = "Traceback (most recent call last): File 'test.py', line 1, in <module> 1/0 ZeroDivisionError: division by zero"
result = run_ps_script("QA", prompt) result = run_ps_script("QA", prompt)
@@ -28,7 +28,7 @@ def test_subagent_script_qa_live():
# It should be short (QA agents compress) # It should be short (QA agents compress)
assert len(result.stdout.split()) < 40 assert len(result.stdout.split()) < 40
def test_subagent_script_worker_live(): def test_subagent_script_worker_live() -> None:
"""Verify that the Worker role works and returns code.""" """Verify that the Worker role works and returns code."""
prompt = "Write a python function that returns 'hello world'" prompt = "Write a python function that returns 'hello world'"
result = run_ps_script("Worker", prompt) result = run_ps_script("Worker", prompt)
@@ -36,14 +36,14 @@ def test_subagent_script_worker_live():
assert "def" in result.stdout.lower() assert "def" in result.stdout.lower()
assert "hello" in result.stdout.lower() assert "hello" in result.stdout.lower()
def test_subagent_script_utility_live(): def test_subagent_script_utility_live() -> None:
"""Verify that the Utility role works.""" """Verify that the Utility role works."""
prompt = "Tell me 'True' if 1+1=2, otherwise 'False'" prompt = "Tell me 'True' if 1+1=2, otherwise 'False'"
result = run_ps_script("Utility", prompt) result = run_ps_script("Utility", prompt)
assert result.returncode == 0 assert result.returncode == 0
assert "true" in result.stdout.lower() assert "true" in result.stdout.lower()
def test_subagent_isolation_live(): def test_subagent_isolation_live() -> None:
"""Verify that the sub-agent is stateless and does not see the parent's conversation context.""" """Verify that the sub-agent is stateless and does not see the parent's conversation context."""
# This prompt asks the sub-agent about a 'secret' mentioned only here, not in its prompt. # This prompt asks the sub-agent about a 'secret' mentioned only here, not in its prompt.
prompt = "What is the secret code I just told you? If I didn't tell you, say 'UNKNOWN'." prompt = "What is the secret code I just told you? If I didn't tell you, say 'UNKNOWN'."

View File

@@ -3,7 +3,7 @@ import os
from unittest.mock import patch, MagicMock from unittest.mock import patch, MagicMock
from scripts.mma_exec import create_parser, get_role_documents, execute_agent, get_model_for_role, get_dependencies from scripts.mma_exec import create_parser, get_role_documents, execute_agent, get_model_for_role, get_dependencies
def test_parser_role_choices(): def test_parser_role_choices() -> None:
"""Test that the parser accepts valid roles and the prompt argument.""" """Test that the parser accepts valid roles and the prompt argument."""
parser = create_parser() parser = create_parser()
valid_roles = ['tier1', 'tier2', 'tier3', 'tier4'] valid_roles = ['tier1', 'tier2', 'tier3', 'tier4']
@@ -13,13 +13,13 @@ def test_parser_role_choices():
assert args.role == role assert args.role == role
assert args.prompt == test_prompt assert args.prompt == test_prompt
def test_parser_invalid_role(): def test_parser_invalid_role() -> None:
"""Test that the parser rejects roles outside the specified choices.""" """Test that the parser rejects roles outside the specified choices."""
parser = create_parser() parser = create_parser()
with pytest.raises(SystemExit): with pytest.raises(SystemExit):
parser.parse_args(['--role', 'tier5', 'Some prompt']) parser.parse_args(['--role', 'tier5', 'Some prompt'])
def test_parser_prompt_optional(): def test_parser_prompt_optional() -> None:
"""Test that the prompt argument is optional if role is provided (or handled in main).""" """Test that the prompt argument is optional if role is provided (or handled in main)."""
parser = create_parser() parser = create_parser()
# Prompt is now optional (nargs='?') # Prompt is now optional (nargs='?')
@@ -27,28 +27,28 @@ def test_parser_prompt_optional():
assert args.role == 'tier3' assert args.role == 'tier3'
assert args.prompt is None assert args.prompt is None
def test_parser_help(): def test_parser_help() -> None:
"""Test that the help flag works without raising errors (exits with 0).""" """Test that the help flag works without raising errors (exits with 0)."""
parser = create_parser() parser = create_parser()
with pytest.raises(SystemExit) as excinfo: with pytest.raises(SystemExit) as excinfo:
parser.parse_args(['--help']) parser.parse_args(['--help'])
assert excinfo.value.code == 0 assert excinfo.value.code == 0
def test_get_role_documents(): def test_get_role_documents() -> None:
"""Test that get_role_documents returns the correct documentation paths for each tier.""" """Test that get_role_documents returns the correct documentation paths for each tier."""
assert get_role_documents('tier1') == ['conductor/product.md', 'conductor/product-guidelines.md'] assert get_role_documents('tier1') == ['conductor/product.md', 'conductor/product-guidelines.md']
assert get_role_documents('tier2') == ['conductor/tech-stack.md', 'conductor/workflow.md'] assert get_role_documents('tier2') == ['conductor/tech-stack.md', 'conductor/workflow.md']
assert get_role_documents('tier3') == ['conductor/workflow.md'] assert get_role_documents('tier3') == ['conductor/workflow.md']
assert get_role_documents('tier4') == [] assert get_role_documents('tier4') == []
def test_get_model_for_role(): def test_get_model_for_role() -> None:
"""Test that get_model_for_role returns the correct model for each role.""" """Test that get_model_for_role returns the correct model for each role."""
assert get_model_for_role('tier1-orchestrator') == 'gemini-3.1-pro-preview' assert get_model_for_role('tier1-orchestrator') == 'gemini-3.1-pro-preview'
assert get_model_for_role('tier2-tech-lead') == 'gemini-2.5-flash-lite' assert get_model_for_role('tier2-tech-lead') == 'gemini-2.5-flash-lite'
assert get_model_for_role('tier3-worker') == 'gemini-2.5-flash-lite' assert get_model_for_role('tier3-worker') == 'gemini-2.5-flash-lite'
assert get_model_for_role('tier4-qa') == 'gemini-2.5-flash-lite' assert get_model_for_role('tier4-qa') == 'gemini-2.5-flash-lite'
def test_execute_agent(): def test_execute_agent() -> None:
""" """
Test that execute_agent calls subprocess.run with powershell and the correct gemini CLI arguments Test that execute_agent calls subprocess.run with powershell and the correct gemini CLI arguments
including the model specified for the role. including the model specified for the role.

View File

@@ -1,7 +1,7 @@
import pytest import pytest
from scripts.mma_exec import generate_skeleton from scripts.mma_exec import generate_skeleton
def test_generate_skeleton(): def test_generate_skeleton() -> None:
sample_code = ''' sample_code = '''
class Calculator: class Calculator:
"""Performs basic math operations.""" """Performs basic math operations."""

View File

@@ -21,6 +21,14 @@
- [x] Task: Conductor - Update `conductor/code_styleguides/python.md` with the new AI-optimized standard. [602cea6] - [x] Task: Conductor - Update `conductor/code_styleguides/python.md` with the new AI-optimized standard. [602cea6]
- [x] Task: Conductor - User Manual Verification 'Phase 3: Metadata and Final Documentation' (Protocol in workflow.md) - [x] Task: Conductor - User Manual Verification 'Phase 3: Metadata and Final Documentation' (Protocol in workflow.md)
## Phase 4: Codebase-Wide Type Hint Sweep
- [ ] Task: Conductor - Type hint pass on core modules (`api_hook_client.py`, `api_hooks.py`, `log_registry.py`, `performance_monitor.py`, `theme.py`, `theme_2.py`, `gemini_cli_adapter.py`, `multi_agent_conductor.py`, `dag_engine.py`, `events.py`, `file_cache.py`, `models.py`, `log_pruner.py`, `gemini.py`, `orchestrator_pm.py`, `conductor_tech_lead.py`, `outline_tool.py`, `summarize.py`)
- [ ] Task: Conductor - Type hint pass on remaining variable-only files (`ai_client.py` vars, `mcp_client.py` vars, `mma_prompts.py` vars)
- [ ] Task: Conductor - Type hint pass on scripts (`scripts/*.py`)
- [ ] Task: Conductor - Type hint pass on simulation modules (`simulation/*.py`)
- [ ] Task: Conductor - Type hint pass on test files (`tests/*.py`, `conductor/tests/*.py`)
- [ ] Task: Conductor - User Manual Verification 'Phase 4: Codebase-Wide Type Hint Sweep' (Protocol in workflow.md)
--- ---
**Protocol Note:** Each task will follow the Standard Task Workflow (Red/Green phases with Tier 3 Worker delegation). Phase completion will trigger the mandatory Verification and Checkpointing protocol. **Protocol Note:** Each task will follow the Standard Task Workflow (Red/Green phases with Tier 3 Worker delegation). Phase completion will trigger the mandatory Verification and Checkpointing protocol.

View File

@@ -7,7 +7,7 @@ class TrackDAG:
Provides methods for dependency resolution, cycle detection, and topological sorting. Provides methods for dependency resolution, cycle detection, and topological sorting.
""" """
def __init__(self, tickets: List[Ticket]): def __init__(self, tickets: List[Ticket]) -> None:
""" """
Initializes the TrackDAG with a list of Ticket objects. Initializes the TrackDAG with a list of Ticket objects.
Args: Args:
@@ -99,7 +99,7 @@ class ExecutionEngine:
Handles automatic queueing and manual task approval. Handles automatic queueing and manual task approval.
""" """
def __init__(self, dag: TrackDAG, auto_queue: bool = False): def __init__(self, dag: TrackDAG, auto_queue: bool = False) -> None:
""" """
Initializes the ExecutionEngine. Initializes the ExecutionEngine.
Args: Args:
@@ -123,7 +123,7 @@ class ExecutionEngine:
ticket.status = "in_progress" ticket.status = "in_progress"
return ready return ready
def approve_task(self, task_id: str): def approve_task(self, task_id: str) -> None:
""" """
Manually transitions a task from 'todo' to 'in_progress' if its dependencies are met. Manually transitions a task from 'todo' to 'in_progress' if its dependencies are met.
Args: Args:
@@ -141,7 +141,7 @@ class ExecutionEngine:
if all_done: if all_done:
ticket.status = "in_progress" ticket.status = "in_progress"
def update_task_status(self, task_id: str, status: str): def update_task_status(self, task_id: str, status: str) -> None:
""" """
Force-updates the status of a specific task. Force-updates the status of a specific task.
Args: Args:

View File

@@ -2,7 +2,7 @@ import tree_sitter
import tree_sitter_python import tree_sitter_python
class ASTParser: class ASTParser:
def __init__(self, language: str): def __init__(self, language: str) -> None:
self.language = tree_sitter.Language(tree_sitter_python.language()) self.language = tree_sitter.Language(tree_sitter_python.language())
self.parser = tree_sitter.Parser(self.language) self.parser = tree_sitter.Parser(self.language)

View File

@@ -9,11 +9,11 @@ class EventEmitter:
Simple event emitter for decoupled communication between modules. Simple event emitter for decoupled communication between modules.
""" """
def __init__(self): def __init__(self) -> None:
"""Initializes the EventEmitter with an empty listener map.""" """Initializes the EventEmitter with an empty listener map."""
self._listeners: Dict[str, List[Callable]] = {} self._listeners: Dict[str, List[Callable]] = {}
def on(self, event_name: str, callback: Callable): def on(self, event_name: str, callback: Callable) -> None:
""" """
Registers a callback for a specific event. Registers a callback for a specific event.
@@ -25,7 +25,7 @@ class EventEmitter:
self._listeners[event_name] = [] self._listeners[event_name] = []
self._listeners[event_name].append(callback) self._listeners[event_name].append(callback)
def emit(self, event_name: str, *args: Any, **kwargs: Any): def emit(self, event_name: str, *args: Any, **kwargs: Any) -> None:
""" """
Emits an event, calling all registered callbacks. Emits an event, calling all registered callbacks.
@@ -43,11 +43,11 @@ class AsyncEventQueue:
Asynchronous event queue for decoupled communication using asyncio.Queue. Asynchronous event queue for decoupled communication using asyncio.Queue.
""" """
def __init__(self): def __init__(self) -> None:
"""Initializes the AsyncEventQueue with an internal asyncio.Queue.""" """Initializes the AsyncEventQueue with an internal asyncio.Queue."""
self._queue: asyncio.Queue = asyncio.Queue() self._queue: asyncio.Queue = asyncio.Queue()
async def put(self, event_name: str, payload: Any = None): async def put(self, event_name: str, payload: Any = None) -> None:
""" """
Puts an event into the queue. Puts an event into the queue.
@@ -71,7 +71,7 @@ class UserRequestEvent:
Payload for a user request event. Payload for a user request event.
""" """
def __init__(self, prompt: str, stable_md: str, file_items: List[Any], disc_text: str, base_dir: str): def __init__(self, prompt: str, stable_md: str, file_items: List[Any], disc_text: str, base_dir: str) -> None:
self.prompt = prompt self.prompt = prompt
self.stable_md = stable_md self.stable_md = stable_md
self.file_items = file_items self.file_items = file_items

View File

@@ -1,4 +1,4 @@
# file_cache.py # file_cache.py
""" """
Stub — the Anthropic Files API path has been removed. Stub — the Anthropic Files API path has been removed.
All context is now sent as inline chunked text via _send_anthropic_chunked. All context is now sent as inline chunked text via _send_anthropic_chunked.
@@ -16,7 +16,7 @@ class ASTParser:
Currently supports Python. Currently supports Python.
""" """
def __init__(self, language: str): def __init__(self, language: str) -> None:
if language != "python": if language != "python":
raise ValueError(f"Language '{language}' not supported yet.") raise ValueError(f"Language '{language}' not supported yet.")
self.language_name = language self.language_name = language
@@ -141,7 +141,7 @@ class ASTParser:
code_bytes[start:end] = bytes(replacement, "utf8") code_bytes[start:end] = bytes(replacement, "utf8")
return code_bytes.decode("utf8") return code_bytes.decode("utf8")
def reset_client(): def reset_client() -> None:
pass pass
def content_block_type(path: Path) -> str: def content_block_type(path: Path) -> str:
@@ -150,7 +150,7 @@ def content_block_type(path: Path) -> str:
def get_file_id(path: Path) -> Optional[str]: def get_file_id(path: Path) -> Optional[str]:
return None return None
def evict(path: Path): def evict(path: Path) -> None:
pass pass
def list_cached() -> list[dict]: def list_cached() -> list[dict]:

View File

@@ -11,12 +11,12 @@ def _load_key() -> str:
with open("credentials.toml", "rb") as f: with open("credentials.toml", "rb") as f:
return tomllib.load(f)["gemini"]["api_key"] return tomllib.load(f)["gemini"]["api_key"]
def _ensure_client(): def _ensure_client() -> None:
global _client global _client
if _client is None: if _client is None:
_client = genai.Client(api_key=_load_key()) _client = genai.Client(api_key=_load_key())
def _ensure_chat(): def _ensure_chat() -> None:
global _chat global _chat
if _chat is None: if _chat is None:
_ensure_client() _ensure_client()
@@ -29,7 +29,7 @@ def send(md_content: str, user_message: str) -> str:
response = _chat.send_message(full_message) response = _chat.send_message(full_message)
return response.text return response.text
def reset_session(): def reset_session() -> None:
global _client, _chat global _client, _chat
_client = None _client = None
_chat = None _chat = None

View File

@@ -10,7 +10,7 @@ class LogPruner:
are preserved long-term. are preserved long-term.
""" """
def __init__(self, log_registry: LogRegistry, logs_dir: str): def __init__(self, log_registry: LogRegistry, logs_dir: str) -> None:
""" """
Initializes the LogPruner. Initializes the LogPruner.
@@ -21,7 +21,7 @@ class LogPruner:
self.log_registry = log_registry self.log_registry = log_registry
self.logs_dir = logs_dir self.logs_dir = logs_dir
def prune(self): def prune(self) -> None:
""" """
Prunes old and small session directories from the logs directory. Prunes old and small session directories from the logs directory.

View File

@@ -20,7 +20,7 @@ class LogRegistry:
self.data = {} self.data = {}
self.load_registry() self.load_registry()
def load_registry(self): def load_registry(self) -> None:
""" """
Loads the registry data from the TOML file into memory. Loads the registry data from the TOML file into memory.
Handles date/time conversions from TOML-native formats to strings for consistency. Handles date/time conversions from TOML-native formats to strings for consistency.
@@ -48,7 +48,7 @@ class LogRegistry:
else: else:
self.data = {} self.data = {}
def save_registry(self): def save_registry(self) -> None:
""" """
Serializes and saves the current registry data to the TOML file. Serializes and saves the current registry data to the TOML file.
Converts internal datetime objects to ISO format strings for compatibility. Converts internal datetime objects to ISO format strings for compatibility.
@@ -151,7 +151,7 @@ class LogRegistry:
# Check the top-level 'whitelisted' flag. If it's not set or False, it's not whitelisted. # Check the top-level 'whitelisted' flag. If it's not set or False, it's not whitelisted.
return session_data.get('whitelisted', False) return session_data.get('whitelisted', False)
def update_auto_whitelist_status(self, session_id: str): def update_auto_whitelist_status(self, session_id: str) -> None:
""" """
Analyzes session logs and updates whitelisting status based on heuristics. Analyzes session logs and updates whitelisting status based on heuristics.
Sessions are automatically whitelisted if they contain error keywords, Sessions are automatically whitelisted if they contain error keywords,

View File

@@ -17,12 +17,12 @@ class Ticket:
blocked_reason: Optional[str] = None blocked_reason: Optional[str] = None
step_mode: bool = False step_mode: bool = False
def mark_blocked(self, reason: str): def mark_blocked(self, reason: str) -> None:
"""Sets the ticket status to 'blocked' and records the reason.""" """Sets the ticket status to 'blocked' and records the reason."""
self.status = "blocked" self.status = "blocked"
self.blocked_reason = reason self.blocked_reason = reason
def mark_complete(self): def mark_complete(self) -> None:
"""Sets the ticket status to 'completed'.""" """Sets the ticket status to 'completed'."""
self.status = "completed" self.status = "completed"

View File

@@ -17,7 +17,7 @@ class ConductorEngine:
Orchestrates the execution of tickets within a track. Orchestrates the execution of tickets within a track.
""" """
def __init__(self, track: Track, event_queue: Optional[events.AsyncEventQueue] = None, auto_queue: bool = False): def __init__(self, track: Track, event_queue: Optional[events.AsyncEventQueue] = None, auto_queue: bool = False) -> None:
self.track = track self.track = track
self.event_queue = event_queue self.event_queue = event_queue
self.tier_usage = { self.tier_usage = {
@@ -29,7 +29,7 @@ class ConductorEngine:
self.dag = TrackDAG(self.track.tickets) self.dag = TrackDAG(self.track.tickets)
self.engine = ExecutionEngine(self.dag, auto_queue=auto_queue) self.engine = ExecutionEngine(self.dag, auto_queue=auto_queue)
async def _push_state(self, status: str = "running", active_tier: str = None): async def _push_state(self, status: str = "running", active_tier: str = None) -> None:
if not self.event_queue: if not self.event_queue:
return return
payload = { payload = {
@@ -44,7 +44,7 @@ class ConductorEngine:
} }
await self.event_queue.put("mma_state_update", payload) await self.event_queue.put("mma_state_update", payload)
def parse_json_tickets(self, json_str: str): def parse_json_tickets(self, json_str: str) -> None:
""" """
Parses a JSON string of ticket definitions (Godot ECS Flat List format) Parses a JSON string of ticket definitions (Godot ECS Flat List format)
and populates the Track's ticket list. and populates the Track's ticket list.
@@ -73,7 +73,7 @@ class ConductorEngine:
except KeyError as e: except KeyError as e:
print(f"Missing required field in ticket definition: {e}") print(f"Missing required field in ticket definition: {e}")
async def run(self, md_content: str = ""): async def run(self, md_content: str = "") -> None:
""" """
Main execution loop using the DAG engine. Main execution loop using the DAG engine.
Args: Args:

View File

@@ -2,7 +2,7 @@ import ast
from pathlib import Path from pathlib import Path
class CodeOutliner: class CodeOutliner:
def __init__(self): def __init__(self) -> None:
pass pass
def outline(self, code: str) -> str: def outline(self, code: str) -> str:

View File

@@ -3,7 +3,7 @@ import psutil
import threading import threading
class PerformanceMonitor: class PerformanceMonitor:
def __init__(self): def __init__(self) -> None:
self._start_time = None self._start_time = None
self._last_frame_time = 0.0 self._last_frame_time = 0.0
self._fps = 0.0 self._fps = 0.0
@@ -32,7 +32,7 @@ class PerformanceMonitor:
self._cpu_thread = threading.Thread(target=self._monitor_cpu, daemon=True) self._cpu_thread = threading.Thread(target=self._monitor_cpu, daemon=True)
self._cpu_thread.start() self._cpu_thread.start()
def _monitor_cpu(self): def _monitor_cpu(self) -> None:
while not self._stop_event.is_set(): while not self._stop_event.is_set():
# psutil.cpu_percent with interval=1.0 is blocking for 1 second. # psutil.cpu_percent with interval=1.0 is blocking for 1 second.
# To be responsive to stop_event, we use a smaller interval or no interval # To be responsive to stop_event, we use a smaller interval or no interval
@@ -49,21 +49,21 @@ class PerformanceMonitor:
break break
time.sleep(0.1) time.sleep(0.1)
def start_frame(self): def start_frame(self) -> None:
self._start_time = time.time() self._start_time = time.time()
def record_input_event(self): def record_input_event(self) -> None:
self._last_input_time = time.time() self._last_input_time = time.time()
def start_component(self, name: str): def start_component(self, name: str) -> None:
self._comp_start[name] = time.time() self._comp_start[name] = time.time()
def end_component(self, name: str): def end_component(self, name: str) -> None:
if name in self._comp_start: if name in self._comp_start:
elapsed = (time.time() - self._comp_start[name]) * 1000.0 elapsed = (time.time() - self._comp_start[name]) * 1000.0
self._component_timings[name] = elapsed self._component_timings[name] = elapsed
def end_frame(self): def end_frame(self) -> None:
if self._start_time is None: if self._start_time is None:
return return
end_time = time.time() end_time = time.time()
@@ -80,7 +80,7 @@ class PerformanceMonitor:
self._frame_count = 0 self._frame_count = 0
self._fps_last_time = end_time self._fps_last_time = end_time
def _check_alerts(self): def _check_alerts(self) -> None:
if not self.alert_callback: if not self.alert_callback:
return return
now = time.time() now = time.time()
@@ -114,6 +114,6 @@ class PerformanceMonitor:
metrics[f'time_{name}_ms'] = elapsed metrics[f'time_{name}_ms'] = elapsed
return metrics return metrics
def stop(self): def stop(self) -> None:
self._stop_event.set() self._stop_event.set()
self._cpu_thread.join(timeout=2.0) self._cpu_thread.join(timeout=2.0)

View File

@@ -2,7 +2,7 @@ import pytest
from models import Ticket from models import Ticket
from dag_engine import TrackDAG, ExecutionEngine from dag_engine import TrackDAG, ExecutionEngine
def test_auto_queue_and_step_mode(): def test_auto_queue_and_step_mode() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", step_mode=True) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", step_mode=True)
dag = TrackDAG([t1, t2]) dag = TrackDAG([t1, t2])

View File

@@ -1,7 +1,7 @@
import subprocess import subprocess
import sys import sys
def test_type_hints(): def test_type_hints() -> None:
files = ["project_manager.py", "session_logger.py"] files = ["project_manager.py", "session_logger.py"]
all_missing = [] all_missing = []
for f in files: for f in files:

View File

@@ -45,7 +45,7 @@ def get_test_files(manifest: Dict[str, Any], category: str) -> List[str]:
print(f"DEBUG: Found test files for category '{category}': {files}", file=sys.stderr) print(f"DEBUG: Found test files for category '{category}': {files}", file=sys.stderr)
return files return files
def main(): def main() -> None:
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Run tests with optional manifest and category filtering, passing additional pytest arguments.", description="Run tests with optional manifest and category filtering, passing additional pytest arguments.",
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,

View File

@@ -253,7 +253,7 @@ def create_parser():
return parser return parser
def main(): def main() -> None:
parser = create_parser() parser = create_parser()
args = parser.parse_args() args = parser.parse_args()
role = args.role role = args.role

View File

@@ -15,7 +15,7 @@ except ImportError:
sys.exit(1) sys.exit(1)
def main(): def main() -> None:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stderr) logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stderr)
logging.debug("Claude Tool Bridge script started.") logging.debug("Claude Tool Bridge script started.")
try: try:

View File

@@ -78,7 +78,7 @@ async def call_tool(name: str, arguments: dict) -> list[TextContent]:
return [TextContent(type="text", text=f"ERROR: {e}")] return [TextContent(type="text", text=f"ERROR: {e}")]
async def main(): async def main() -> None:
async with stdio_server() as (read_stream, write_stream): async with stdio_server() as (read_stream, write_stream):
await server.run( await server.run(
read_stream, read_stream,

View File

@@ -239,7 +239,7 @@ def create_parser():
) )
return parser return parser
def main(): def main() -> None:
parser = create_parser() parser = create_parser()
args = parser.parse_args() args = parser.parse_args()
role = args.role role = args.role

56
scripts/scan_all_hints.py Normal file
View File

@@ -0,0 +1,56 @@
"""Scan all .py files for missing type hints. Writes scan_report.txt."""
import ast, os
SKIP = {'.git', '__pycache__', '.venv', 'venv', 'node_modules', '.claude', '.gemini'}
BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
os.chdir(BASE)
results = {}
for root, dirs, files in os.walk('.'):
dirs[:] = [d for d in dirs if d not in SKIP]
for f in files:
if not f.endswith('.py'):
continue
path = os.path.join(root, f).replace('\\', '/')
try:
with open(path, 'r', encoding='utf-8-sig') as fh:
tree = ast.parse(fh.read())
except Exception:
continue
counts = [0, 0, 0] # nr, up, uv
def scan(scope, prefix=''):
for node in ast.iter_child_nodes(scope):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
if node.returns is None:
counts[0] += 1
for arg in node.args.args:
if arg.arg not in ('self', 'cls') and arg.annotation is None:
counts[1] += 1
if isinstance(node, ast.Assign):
for t in node.targets:
if isinstance(t, ast.Name):
counts[2] += 1
if isinstance(node, ast.ClassDef):
scan(node, prefix=f'{node.name}.')
scan(tree)
nr, up, uv = counts
total = nr + up + uv
if total > 0:
results[path] = (nr, up, uv, total)
lines = []
lines.append(f'Files with untyped items: {len(results)}')
lines.append('')
lines.append(f'{"File":<58} {"NoRet":>6} {"Params":>7} {"Vars":>5} {"Total":>6}')
lines.append('-' * 85)
gt = 0
for path in sorted(results, key=lambda x: results[x][3], reverse=True):
nr, up, uv, t = results[path]
lines.append(f'{path:<58} {nr:>6} {up:>7} {uv:>5} {t:>6}')
gt += t
lines.append('-' * 85)
lines.append(f'{"TOTAL":<58} {"":>6} {"":>7} {"":>5} {gt:>6}')
report = '\n'.join(lines)
with open('scan_report.txt', 'w', encoding='utf-8') as f:
f.write(report)

View File

@@ -17,7 +17,7 @@ except ImportError:
print(json.dumps({"error": "Failed to import required modules"})) print(json.dumps({"error": "Failed to import required modules"}))
sys.exit(1) sys.exit(1)
def main(): def main() -> None:
if len(sys.argv) < 2: if len(sys.argv) < 2:
print(json.dumps({"error": "No tool name provided"})) print(json.dumps({"error": "No tool name provided"}))
sys.exit(1) sys.exit(1)

View File

@@ -13,7 +13,7 @@ except ImportError as e:
print("[]") print("[]")
sys.exit(0) sys.exit(0)
def main(): def main() -> None:
specs = list(mcp_client.MCP_TOOL_SPECS) specs = list(mcp_client.MCP_TOOL_SPECS)
# Add run_powershell (manually define to match ai_client.py) # Add run_powershell (manually define to match ai_client.py)
specs.append({ specs.append({

View File

@@ -5,7 +5,7 @@ import random
from api_hook_client import ApiHookClient from api_hook_client import ApiHookClient
from simulation.workflow_sim import WorkflowSimulator from simulation.workflow_sim import WorkflowSimulator
def main(): def main() -> None:
client = ApiHookClient() client = ApiHookClient()
print("=== Manual Slop: Live UX Walkthrough ===") print("=== Manual Slop: Live UX Walkthrough ===")
print("Connecting to GUI...") print("Connecting to GUI...")

View File

@@ -4,7 +4,7 @@ import time
from simulation.sim_base import BaseSimulation, run_sim from simulation.sim_base import BaseSimulation, run_sim
class AISettingsSimulation(BaseSimulation): class AISettingsSimulation(BaseSimulation):
def run(self): def run(self) -> None:
print("\n--- Running AI Settings Simulation (Gemini Only) ---") print("\n--- Running AI Settings Simulation (Gemini Only) ---")
# 1. Verify initial model # 1. Verify initial model
provider = self.client.get_value("current_provider") provider = self.client.get_value("current_provider")

View File

@@ -9,7 +9,7 @@ from simulation.workflow_sim import WorkflowSimulator
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
class BaseSimulation: class BaseSimulation:
def __init__(self, client: ApiHookClient = None): def __init__(self, client: ApiHookClient = None) -> None:
if client is None: if client is None:
self.client = ApiHookClient() self.client = ApiHookClient()
else: else:
@@ -36,7 +36,7 @@ class BaseSimulation:
self.client.set_value("current_model", "gemini-2.5-flash-lite") self.client.set_value("current_model", "gemini-2.5-flash-lite")
time.sleep(0.2) time.sleep(0.2)
def teardown(self): def teardown(self) -> None:
if self.project_path and os.path.exists(self.project_path): if self.project_path and os.path.exists(self.project_path):
# We keep it for debugging if it failed, but usually we'd clean up # We keep it for debugging if it failed, but usually we'd clean up
# os.remove(self.project_path) # os.remove(self.project_path)

View File

@@ -4,7 +4,7 @@ import time
from simulation.sim_base import BaseSimulation, run_sim from simulation.sim_base import BaseSimulation, run_sim
class ContextSimulation(BaseSimulation): class ContextSimulation(BaseSimulation):
def run(self): def run(self) -> None:
print("\n--- Running Context & Chat Simulation ---") print("\n--- Running Context & Chat Simulation ---")
# 1. Test Discussion Creation # 1. Test Discussion Creation
disc_name = f"TestDisc_{int(time.time())}" disc_name = f"TestDisc_{int(time.time())}"

View File

@@ -9,7 +9,7 @@ class ExecutionSimulation(BaseSimulation):
if os.path.exists("hello.ps1"): if os.path.exists("hello.ps1"):
os.remove("hello.ps1") os.remove("hello.ps1")
def run(self): def run(self) -> None:
print("\n--- Running Execution & Modals Simulation ---") print("\n--- Running Execution & Modals Simulation ---")
# 1. Trigger script generation (Async so we don't block on the wait loop) # 1. Trigger script generation (Async so we don't block on the wait loop)
msg = "Create a hello.ps1 script that prints 'Simulation Test' and execute it." msg = "Create a hello.ps1 script that prints 'Simulation Test' and execute it."

View File

@@ -4,7 +4,7 @@ import time
from simulation.sim_base import BaseSimulation, run_sim from simulation.sim_base import BaseSimulation, run_sim
class ToolsSimulation(BaseSimulation): class ToolsSimulation(BaseSimulation):
def run(self): def run(self) -> None:
print("\n--- Running Tools Simulation ---") print("\n--- Running Tools Simulation ---")
# 1. Trigger list_directory tool # 1. Trigger list_directory tool
msg = "List the files in the current directory." msg = "List the files in the current directory."

View File

@@ -4,7 +4,7 @@ from api_hook_client import ApiHookClient
from simulation.user_agent import UserSimAgent from simulation.user_agent import UserSimAgent
class WorkflowSimulator: class WorkflowSimulator:
def __init__(self, hook_client: ApiHookClient): def __init__(self, hook_client: ApiHookClient) -> None:
self.client = hook_client self.client = hook_client
self.user_agent = UserSimAgent(hook_client) self.user_agent = UserSimAgent(hook_client)
@@ -30,7 +30,7 @@ class WorkflowSimulator:
self.client.select_list_item("disc_listbox", name) self.client.select_list_item("disc_listbox", name)
time.sleep(1) time.sleep(1)
def load_prior_log(self): def load_prior_log(self) -> None:
print("Loading prior log") print("Loading prior log")
self.client.click("btn_load_log") self.client.click("btn_load_log")
# This usually opens a file dialog which we can't easily automate from here # This usually opens a file dialog which we can't easily automate from here

View File

@@ -6,12 +6,12 @@ import project_manager
from models import Track, Ticket from models import Track, Ticket
class TestMMAPersistence(unittest.TestCase): class TestMMAPersistence(unittest.TestCase):
def test_default_project_has_mma(self): def test_default_project_has_mma(self) -> None:
proj = project_manager.default_project("test") proj = project_manager.default_project("test")
self.assertIn("mma", proj) self.assertIn("mma", proj)
self.assertEqual(proj["mma"], {"epic": "", "active_track_id": "", "tracks": []}) self.assertEqual(proj["mma"], {"epic": "", "active_track_id": "", "tracks": []})
def test_save_load_mma(self): def test_save_load_mma(self) -> None:
proj = project_manager.default_project("test") proj = project_manager.default_project("test")
proj["mma"] = {"epic": "Test Epic", "tracks": [{"id": "track_1"}]} proj["mma"] = {"epic": "Test Epic", "tracks": [{"id": "track_1"}]}
test_file = Path("test_mma_proj.toml") test_file = Path("test_mma_proj.toml")

View File

@@ -14,7 +14,7 @@ from api_hook_client import ApiHookClient
import ai_client import ai_client
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def reset_ai_client(): def reset_ai_client() -> None:
"""Reset ai_client global state between every test to prevent state pollution.""" """Reset ai_client global state between every test to prevent state pollution."""
ai_client.reset_session() ai_client.reset_session()
# Default to a safe model # Default to a safe model
@@ -41,7 +41,7 @@ def kill_process_tree(pid):
print(f"[Fixture] Error killing process tree {pid}: {e}") print(f"[Fixture] Error killing process tree {pid}: {e}")
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def live_gui(): def live_gui() -> None:
""" """
Session-scoped fixture that starts gui_2.py with --enable-test-hooks. Session-scoped fixture that starts gui_2.py with --enable-test-hooks.
""" """

View File

@@ -2,7 +2,7 @@ import pytest
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import ai_client import ai_client
def test_ai_client_send_gemini_cli(): def test_ai_client_send_gemini_cli() -> None:
""" """
Verifies that 'ai_client.send' correctly interacts with 'GeminiCliAdapter' Verifies that 'ai_client.send' correctly interacts with 'GeminiCliAdapter'
when the 'gemini_cli' provider is specified. when the 'gemini_cli' provider is specified.

View File

@@ -2,7 +2,7 @@ import pytest
from unittest.mock import patch, MagicMock from unittest.mock import patch, MagicMock
import ai_client import ai_client
def test_list_models_gemini_cli(): def test_list_models_gemini_cli() -> None:
""" """
Verifies that 'ai_client.list_models' correctly returns a list of models Verifies that 'ai_client.list_models' correctly returns a list of models
for the 'gemini_cli' provider. for the 'gemini_cli' provider.

View File

@@ -2,7 +2,7 @@ import pytest
import textwrap import textwrap
from scripts.ai_style_formatter import format_code from scripts.ai_style_formatter import format_code
def test_basic_indentation(): def test_basic_indentation() -> None:
source = textwrap.dedent("""\ source = textwrap.dedent("""\
def hello(): def hello():
print("world") print("world")
@@ -17,7 +17,7 @@ def test_basic_indentation():
) )
assert format_code(source) == expected assert format_code(source) == expected
def test_top_level_blank_lines(): def test_top_level_blank_lines() -> None:
source = textwrap.dedent("""\ source = textwrap.dedent("""\
def a(): def a():
pass pass
@@ -35,7 +35,7 @@ def test_top_level_blank_lines():
) )
assert format_code(source) == expected assert format_code(source) == expected
def test_inner_blank_lines(): def test_inner_blank_lines() -> None:
source = textwrap.dedent("""\ source = textwrap.dedent("""\
def a(): def a():
print("start") print("start")
@@ -49,7 +49,7 @@ def test_inner_blank_lines():
) )
assert format_code(source) == expected assert format_code(source) == expected
def test_multiline_string_safety(): def test_multiline_string_safety() -> None:
source = textwrap.dedent("""\ source = textwrap.dedent("""\
def a(): def a():
''' '''
@@ -72,7 +72,7 @@ def test_multiline_string_safety():
assert " This is a multiline" in result assert " This is a multiline" in result
assert result.startswith("def a():\n '''") assert result.startswith("def a():\n '''")
def test_continuation_indentation(): def test_continuation_indentation() -> None:
source = textwrap.dedent("""\ source = textwrap.dedent("""\
def long_func( def long_func(
a, a,
@@ -95,7 +95,7 @@ def test_continuation_indentation():
) )
assert format_code(source) == expected assert format_code(source) == expected
def test_multiple_top_level_definitions(): def test_multiple_top_level_definitions() -> None:
source = textwrap.dedent("""\ source = textwrap.dedent("""\
class MyClass: class MyClass:
def __init__(self): def __init__(self):

View File

@@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch
import ai_client import ai_client
class MockUsage: class MockUsage:
def __init__(self): def __init__(self) -> None:
self.prompt_token_count = 10 self.prompt_token_count = 10
self.candidates_token_count = 5 self.candidates_token_count = 5
self.total_token_count = 15 self.total_token_count = 15
@@ -28,13 +28,13 @@ def test_ai_client_event_emitter_exists():
# This should fail initially because 'events' won't exist on ai_client # This should fail initially because 'events' won't exist on ai_client
assert hasattr(ai_client, 'events') assert hasattr(ai_client, 'events')
def test_event_emission(): def test_event_emission() -> None:
callback = MagicMock() callback = MagicMock()
ai_client.events.on("test_event", callback) ai_client.events.on("test_event", callback)
ai_client.events.emit("test_event", payload={"data": 123}) ai_client.events.emit("test_event", payload={"data": 123})
callback.assert_called_once_with(payload={"data": 123}) callback.assert_called_once_with(payload={"data": 123})
def test_send_emits_events(): def test_send_emits_events() -> None:
with patch("ai_client._send_gemini") as mock_send_gemini, \ with patch("ai_client._send_gemini") as mock_send_gemini, \
patch("ai_client._send_anthropic") as mock_send_anthropic: patch("ai_client._send_anthropic") as mock_send_anthropic:
mock_send_gemini.return_value = "gemini response" mock_send_gemini.return_value = "gemini response"
@@ -50,7 +50,7 @@ def test_send_emits_events():
# Let's mock _gemini_client instead to let _send_gemini run and emit events. # Let's mock _gemini_client instead to let _send_gemini run and emit events.
pass pass
def test_send_emits_events_proper(): def test_send_emits_events_proper() -> None:
with patch("ai_client._ensure_gemini_client"), \ with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client") as mock_client: patch("ai_client._gemini_client") as mock_client:
mock_chat = MagicMock() mock_chat = MagicMock()
@@ -70,7 +70,7 @@ def test_send_emits_events_proper():
args, kwargs = start_callback.call_args args, kwargs = start_callback.call_args
assert kwargs['payload']['provider'] == 'gemini' assert kwargs['payload']['provider'] == 'gemini'
def test_send_emits_tool_events(): def test_send_emits_tool_events() -> None:
import mcp_client import mcp_client
with patch("ai_client._ensure_gemini_client"), \ with patch("ai_client._ensure_gemini_client"), \
patch("ai_client._gemini_client") as mock_client, \ patch("ai_client._gemini_client") as mock_client, \

View File

@@ -56,7 +56,7 @@ def test_get_performance_success(live_gui):
response = client.get_performance() response = client.get_performance()
assert "performance" in response assert "performance" in response
def test_unsupported_method_error(): def test_unsupported_method_error() -> None:
""" """
Test that calling an unsupported HTTP method raises a ValueError. Test that calling an unsupported HTTP method raises a ValueError.
""" """
@@ -64,7 +64,7 @@ def test_unsupported_method_error():
with pytest.raises(ValueError, match="Unsupported HTTP method"): with pytest.raises(ValueError, match="Unsupported HTTP method"):
client._make_request('PUT', '/some_endpoint', data={'key': 'value'}) client._make_request('PUT', '/some_endpoint', data={'key': 'value'})
def test_get_text_value(): def test_get_text_value() -> None:
""" """
Test retrieval of string representation using get_text_value. Test retrieval of string representation using get_text_value.
""" """
@@ -74,7 +74,7 @@ def test_get_text_value():
with patch.object(client, 'get_value', return_value=None): with patch.object(client, 'get_value', return_value=None):
assert client.get_text_value("dummy_tag") is None assert client.get_text_value("dummy_tag") is None
def test_get_node_status(): def test_get_node_status() -> None:
""" """
Test retrieval of DAG node status using get_node_status. Test retrieval of DAG node status using get_node_status.
""" """

View File

@@ -7,7 +7,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient from api_hook_client import ApiHookClient
def test_api_client_has_extensions(): def test_api_client_has_extensions() -> None:
client = ApiHookClient() client = ApiHookClient()
# These should fail initially as they are not implemented # These should fail initially as they are not implemented
assert hasattr(client, 'select_tab') assert hasattr(client, 'select_tab')
@@ -33,7 +33,7 @@ def test_get_indicator_state_integration(live_gui):
assert 'shown' in response assert 'shown' in response
assert response['tag'] == "thinking_indicator" assert response['tag'] == "thinking_indicator"
def test_app_processes_new_actions(): def test_app_processes_new_actions() -> None:
import gui_legacy import gui_legacy
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import dearpygui.dearpygui as dpg import dearpygui.dearpygui as dpg

View File

@@ -2,12 +2,12 @@ import pytest
import tree_sitter import tree_sitter
from file_cache import ASTParser from file_cache import ASTParser
def test_ast_parser_initialization(): def test_ast_parser_initialization() -> None:
"""Verify that ASTParser can be initialized with a language string.""" """Verify that ASTParser can be initialized with a language string."""
parser = ASTParser("python") parser = ASTParser("python")
assert parser.language_name == "python" assert parser.language_name == "python"
def test_ast_parser_parse(): def test_ast_parser_parse() -> None:
"""Verify that the parse method returns a tree_sitter.Tree.""" """Verify that the parse method returns a tree_sitter.Tree."""
parser = ASTParser("python") parser = ASTParser("python")
code = """def example_func(): code = """def example_func():
@@ -17,7 +17,7 @@ def test_ast_parser_parse():
# Basic check that it parsed something # Basic check that it parsed something
assert tree.root_node.type == "module" assert tree.root_node.type == "module"
def test_ast_parser_get_skeleton_python(): def test_ast_parser_get_skeleton_python() -> None:
"""Verify that get_skeleton replaces function bodies with '...' while preserving docstrings.""" """Verify that get_skeleton replaces function bodies with '...' while preserving docstrings."""
parser = ASTParser("python") parser = ASTParser("python")
code = ''' code = '''
@@ -51,14 +51,14 @@ class MyClass:
assert "return result" not in skeleton assert "return result" not in skeleton
assert 'print("doing something")' not in skeleton assert 'print("doing something")' not in skeleton
def test_ast_parser_invalid_language(): def test_ast_parser_invalid_language() -> None:
"""Verify handling of unsupported or invalid languages.""" """Verify handling of unsupported or invalid languages."""
# This might raise an error or return a default, depending on implementation # This might raise an error or return a default, depending on implementation
# For now, we expect it to either fail gracefully or raise an exception we can catch # For now, we expect it to either fail gracefully or raise an exception we can catch
with pytest.raises(Exception): with pytest.raises(Exception):
ASTParser("not-a-language") ASTParser("not-a-language")
def test_ast_parser_get_curated_view(): def test_ast_parser_get_curated_view() -> None:
"""Verify that get_curated_view preserves function bodies with @core_logic or # [HOT].""" """Verify that get_curated_view preserves function bodies with @core_logic or # [HOT]."""
parser = ASTParser("python") parser = ASTParser("python")
code = ''' code = '''

View File

@@ -1,7 +1,7 @@
import pytest import pytest
from file_cache import ASTParser from file_cache import ASTParser
def test_ast_parser_get_curated_view(): def test_ast_parser_get_curated_view() -> None:
parser = ASTParser("python") parser = ASTParser("python")
code = ''' code = '''
@core_logic @core_logic

View File

@@ -2,7 +2,7 @@ import asyncio
import pytest import pytest
from events import AsyncEventQueue from events import AsyncEventQueue
def test_async_event_queue_put_get(): def test_async_event_queue_put_get() -> None:
"""Verify that an event can be asynchronously put and retrieved from the queue.""" """Verify that an event can be asynchronously put and retrieved from the queue."""
async def run_test(): async def run_test():
@@ -15,7 +15,7 @@ def test_async_event_queue_put_get():
assert ret_payload == payload assert ret_payload == payload
asyncio.run(run_test()) asyncio.run(run_test())
def test_async_event_queue_multiple(): def test_async_event_queue_multiple() -> None:
"""Verify that multiple events can be asynchronously put and retrieved in order.""" """Verify that multiple events can be asynchronously put and retrieved in order."""
async def run_test(): async def run_test():
@@ -30,7 +30,7 @@ def test_async_event_queue_multiple():
assert val2 == 2 assert val2 == 2
asyncio.run(run_test()) asyncio.run(run_test())
def test_async_event_queue_none_payload(): def test_async_event_queue_none_payload() -> None:
"""Verify that an event with None payload works correctly.""" """Verify that an event with None payload works correctly."""
async def run_test(): async def run_test():

View File

@@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from scripts.cli_tool_bridge import main from scripts.cli_tool_bridge import main
class TestCliToolBridge(unittest.TestCase): class TestCliToolBridge(unittest.TestCase):
def setUp(self): def setUp(self) -> None:
os.environ['GEMINI_CLI_HOOK_CONTEXT'] = 'manual_slop' os.environ['GEMINI_CLI_HOOK_CONTEXT'] = 'manual_slop'
self.tool_call = { self.tool_call = {
'tool_name': 'read_file', 'tool_name': 'read_file',

View File

@@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from scripts.cli_tool_bridge import main from scripts.cli_tool_bridge import main
class TestCliToolBridgeMapping(unittest.TestCase): class TestCliToolBridgeMapping(unittest.TestCase):
def setUp(self): def setUp(self) -> None:
os.environ['GEMINI_CLI_HOOK_CONTEXT'] = 'manual_slop' os.environ['GEMINI_CLI_HOOK_CONTEXT'] = 'manual_slop'
@patch('sys.stdin', new_callable=io.StringIO) @patch('sys.stdin', new_callable=io.StringIO)

View File

@@ -55,7 +55,7 @@ def test_conductor_handles_api_hook_failure(live_gui):
assert results["verification_successful"] is False assert results["verification_successful"] is False
assert "failed" in results["verification_message"] assert "failed" in results["verification_message"]
def test_conductor_handles_api_hook_connection_error(): def test_conductor_handles_api_hook_connection_error() -> None:
""" """
Verify Conductor handles a simulated API hook connection error (server down). Verify Conductor handles a simulated API hook connection error (server down).
""" """

View File

@@ -6,7 +6,7 @@ import ai_client
# These tests define the expected interface for multi_agent_conductor.py # These tests define the expected interface for multi_agent_conductor.py
# which will be implemented in the next phase of TDD. # which will be implemented in the next phase of TDD.
def test_conductor_engine_initialization(): def test_conductor_engine_initialization() -> None:
""" """
Test that ConductorEngine can be initialized with a Track. Test that ConductorEngine can be initialized with a Track.
""" """

View File

@@ -48,12 +48,12 @@ class TestConductorTechLead(unittest.TestCase):
self.assertEqual(tickets, []) self.assertEqual(tickets, [])
class TestTopologicalSort(unittest.TestCase): class TestTopologicalSort(unittest.TestCase):
def test_topological_sort_empty(self): def test_topological_sort_empty(self) -> None:
tickets = [] tickets = []
sorted_tickets = conductor_tech_lead.topological_sort(tickets) sorted_tickets = conductor_tech_lead.topological_sort(tickets)
self.assertEqual(sorted_tickets, []) self.assertEqual(sorted_tickets, [])
def test_topological_sort_linear(self): def test_topological_sort_linear(self) -> None:
tickets = [ tickets = [
{"id": "t2", "depends_on": ["t1"]}, {"id": "t2", "depends_on": ["t1"]},
{"id": "t1", "depends_on": []}, {"id": "t1", "depends_on": []},
@@ -82,7 +82,7 @@ class TestTopologicalSort(unittest.TestCase):
self.assertEqual(ids[-1], "t4") self.assertEqual(ids[-1], "t4")
self.assertSetEqual(set(ids[1:3]), {"t2", "t3"}) self.assertSetEqual(set(ids[1:3]), {"t2", "t3"})
def test_topological_sort_cycle(self): def test_topological_sort_cycle(self) -> None:
tickets = [ tickets = [
{"id": "t1", "depends_on": ["t2"]}, {"id": "t1", "depends_on": ["t2"]},
{"id": "t2", "depends_on": ["t1"]}, {"id": "t2", "depends_on": ["t1"]},

View File

@@ -2,7 +2,7 @@ import pytest
from models import Ticket from models import Ticket
from dag_engine import TrackDAG from dag_engine import TrackDAG
def test_get_ready_tasks_linear(): def test_get_ready_tasks_linear() -> None:
t1 = Ticket(id="T1", description="Task 1", status="completed", assigned_to="worker") t1 = Ticket(id="T1", description="Task 1", status="completed", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T2"]) t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T2"])
@@ -11,7 +11,7 @@ def test_get_ready_tasks_linear():
assert len(ready) == 1 assert len(ready) == 1
assert ready[0].id == "T2" assert ready[0].id == "T2"
def test_get_ready_tasks_branching(): def test_get_ready_tasks_branching() -> None:
t1 = Ticket(id="T1", description="Task 1", status="completed", assigned_to="worker") t1 = Ticket(id="T1", description="Task 1", status="completed", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T1"]) t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T1"])
@@ -21,19 +21,19 @@ def test_get_ready_tasks_branching():
ready_ids = {t.id for t in ready} ready_ids = {t.id for t in ready}
assert ready_ids == {"T2", "T3"} assert ready_ids == {"T2", "T3"}
def test_has_cycle_no_cycle(): def test_has_cycle_no_cycle() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
dag = TrackDAG([t1, t2]) dag = TrackDAG([t1, t2])
assert not dag.has_cycle() assert not dag.has_cycle()
def test_has_cycle_direct_cycle(): def test_has_cycle_direct_cycle() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", depends_on=["T2"]) t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", depends_on=["T2"])
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
dag = TrackDAG([t1, t2]) dag = TrackDAG([t1, t2])
assert dag.has_cycle() assert dag.has_cycle()
def test_has_cycle_indirect_cycle(): def test_has_cycle_indirect_cycle() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", depends_on=["T2"]) t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", depends_on=["T2"])
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T3"]) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T3"])
t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T1"]) t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T1"])
@@ -49,7 +49,7 @@ def test_has_cycle_complex_no_cycle():
dag = TrackDAG([t1, t2, t3, t4]) dag = TrackDAG([t1, t2, t3, t4])
assert not dag.has_cycle() assert not dag.has_cycle()
def test_get_ready_tasks_multiple_deps(): def test_get_ready_tasks_multiple_deps() -> None:
t1 = Ticket(id="T1", description="T1", status="completed", assigned_to="worker") t1 = Ticket(id="T1", description="T1", status="completed", assigned_to="worker")
t2 = Ticket(id="T2", description="T2", status="completed", assigned_to="worker") t2 = Ticket(id="T2", description="T2", status="completed", assigned_to="worker")
t3 = Ticket(id="T3", description="T3", status="todo", assigned_to="worker", depends_on=["T1", "T2"]) t3 = Ticket(id="T3", description="T3", status="todo", assigned_to="worker", depends_on=["T1", "T2"])
@@ -58,7 +58,7 @@ def test_get_ready_tasks_multiple_deps():
t2.status = "todo" t2.status = "todo"
assert [t.id for t in dag.get_ready_tasks()] == ["T2"] assert [t.id for t in dag.get_ready_tasks()] == ["T2"]
def test_topological_sort(): def test_topological_sort() -> None:
t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="worker") t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="worker")
t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="worker", depends_on=["T1"]) t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="worker", depends_on=["T1"])
t3 = Ticket(id="T3", description="T3", status="todo", assigned_to="worker", depends_on=["T2"]) t3 = Ticket(id="T3", description="T3", status="todo", assigned_to="worker", depends_on=["T2"])
@@ -66,7 +66,7 @@ def test_topological_sort():
sort = dag.topological_sort() sort = dag.topological_sort()
assert sort == ["T1", "T2", "T3"] assert sort == ["T1", "T2", "T3"]
def test_topological_sort_cycle(): def test_topological_sort_cycle() -> None:
t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="worker", depends_on=["T2"]) t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="worker", depends_on=["T2"])
t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="worker", depends_on=["T1"]) t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="worker", depends_on=["T1"])
dag = TrackDAG([t1, t2]) dag = TrackDAG([t1, t2])

View File

@@ -24,7 +24,7 @@ def test_credentials_error_mentions_deepseek(monkeypatch):
assert "[deepseek]" in err_msg assert "[deepseek]" in err_msg
assert "api_key" in err_msg assert "api_key" in err_msg
def test_default_project_includes_reasoning_role(): def test_default_project_includes_reasoning_role() -> None:
""" """
Verify that 'Reasoning' is included in the default discussion roles Verify that 'Reasoning' is included in the default discussion roles
to support DeepSeek-R1 reasoning traces. to support DeepSeek-R1 reasoning traces.
@@ -33,14 +33,14 @@ def test_default_project_includes_reasoning_role():
roles = proj["discussion"]["roles"] roles = proj["discussion"]["roles"]
assert "Reasoning" in roles assert "Reasoning" in roles
def test_gui_providers_list(): def test_gui_providers_list() -> None:
""" """
Check if 'deepseek' is in the GUI's provider list. Check if 'deepseek' is in the GUI's provider list.
""" """
import gui_2 import gui_2
assert "deepseek" in gui_2.PROVIDERS assert "deepseek" in gui_2.PROVIDERS
def test_deepseek_model_listing(): def test_deepseek_model_listing() -> None:
""" """
Verify that list_models for deepseek returns expected models. Verify that list_models for deepseek returns expected models.
""" """

View File

@@ -2,7 +2,7 @@ import pytest
from unittest.mock import patch, MagicMock from unittest.mock import patch, MagicMock
import ai_client import ai_client
def test_deepseek_model_selection(): def test_deepseek_model_selection() -> None:
""" """
Verifies that ai_client.set_provider('deepseek', 'deepseek-chat') correctly updates the internal state. Verifies that ai_client.set_provider('deepseek', 'deepseek-chat') correctly updates the internal state.
""" """
@@ -10,7 +10,7 @@ def test_deepseek_model_selection():
assert ai_client._provider == "deepseek" assert ai_client._provider == "deepseek"
assert ai_client._model == "deepseek-chat" assert ai_client._model == "deepseek-chat"
def test_deepseek_completion_logic(): def test_deepseek_completion_logic() -> None:
""" """
Verifies that ai_client.send() correctly calls the DeepSeek API and returns content. Verifies that ai_client.send() correctly calls the DeepSeek API and returns content.
""" """
@@ -30,7 +30,7 @@ def test_deepseek_completion_logic():
assert result == "DeepSeek Response" assert result == "DeepSeek Response"
assert mock_post.called assert mock_post.called
def test_deepseek_reasoning_logic(): def test_deepseek_reasoning_logic() -> None:
""" """
Verifies that reasoning_content is captured and wrapped in <thinking> tags. Verifies that reasoning_content is captured and wrapped in <thinking> tags.
""" """
@@ -54,7 +54,7 @@ def test_deepseek_reasoning_logic():
assert "<thinking>\nChain of thought\n</thinking>" in result assert "<thinking>\nChain of thought\n</thinking>" in result
assert "Final Answer" in result assert "Final Answer" in result
def test_deepseek_tool_calling(): def test_deepseek_tool_calling() -> None:
""" """
Verifies that DeepSeek provider correctly identifies and executes tool calls. Verifies that DeepSeek provider correctly identifies and executes tool calls.
""" """
@@ -103,7 +103,7 @@ def test_deepseek_tool_calling():
assert mock_dispatch.call_args[0][0] == "read_file" assert mock_dispatch.call_args[0][0] == "read_file"
assert mock_dispatch.call_args[0][1] == {"path": "test.txt"} assert mock_dispatch.call_args[0][1] == {"path": "test.txt"}
def test_deepseek_streaming(): def test_deepseek_streaming() -> None:
""" """
Verifies that DeepSeek provider correctly aggregates streaming chunks. Verifies that DeepSeek provider correctly aggregates streaming chunks.
""" """

View File

@@ -39,13 +39,13 @@ def test_execution_engine_basic_flow():
ready = engine.tick() ready = engine.tick()
assert len(ready) == 0 assert len(ready) == 0
def test_execution_engine_update_nonexistent_task(): def test_execution_engine_update_nonexistent_task() -> None:
dag = TrackDAG([]) dag = TrackDAG([])
engine = ExecutionEngine(dag) engine = ExecutionEngine(dag)
# Should not raise error, or handle gracefully # Should not raise error, or handle gracefully
engine.update_task_status("NONEXISTENT", "completed") engine.update_task_status("NONEXISTENT", "completed")
def test_execution_engine_status_persistence(): def test_execution_engine_status_persistence() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
dag = TrackDAG([t1]) dag = TrackDAG([t1])
engine = ExecutionEngine(dag) engine = ExecutionEngine(dag)
@@ -54,7 +54,7 @@ def test_execution_engine_status_persistence():
ready = engine.tick() ready = engine.tick()
assert len(ready) == 0 # Only 'todo' tasks should be returned by tick() if they are ready assert len(ready) == 0 # Only 'todo' tasks should be returned by tick() if they are ready
def test_execution_engine_auto_queue(): def test_execution_engine_auto_queue() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"])
dag = TrackDAG([t1, t2]) dag = TrackDAG([t1, t2])
@@ -76,7 +76,7 @@ def test_execution_engine_auto_queue():
assert ready[0].id == "T2" assert ready[0].id == "T2"
assert t2.status == "in_progress" assert t2.status == "in_progress"
def test_execution_engine_step_mode(): def test_execution_engine_step_mode() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", step_mode=True) t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", step_mode=True)
dag = TrackDAG([t1]) dag = TrackDAG([t1])
engine = ExecutionEngine(dag, auto_queue=True) engine = ExecutionEngine(dag, auto_queue=True)
@@ -92,7 +92,7 @@ def test_execution_engine_step_mode():
ready = engine.tick() ready = engine.tick()
assert len(ready) == 0 assert len(ready) == 0
def test_execution_engine_approve_task(): def test_execution_engine_approve_task() -> None:
t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker")
dag = TrackDAG([t1]) dag = TrackDAG([t1])
engine = ExecutionEngine(dag, auto_queue=False) engine = ExecutionEngine(dag, auto_queue=False)

View File

@@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from gemini_cli_adapter import GeminiCliAdapter from gemini_cli_adapter import GeminiCliAdapter
class TestGeminiCliAdapter(unittest.TestCase): class TestGeminiCliAdapter(unittest.TestCase):
def setUp(self): def setUp(self) -> None:
self.adapter = GeminiCliAdapter(binary_path="gemini") self.adapter = GeminiCliAdapter(binary_path="gemini")
@patch('subprocess.Popen') @patch('subprocess.Popen')

View File

@@ -15,7 +15,7 @@ from gemini_cli_adapter import GeminiCliAdapter
class TestGeminiCliAdapterParity(unittest.TestCase): class TestGeminiCliAdapterParity(unittest.TestCase):
def setUp(self): def setUp(self) -> None:
"""Set up a fresh adapter instance and reset session state for each test.""" """Set up a fresh adapter instance and reset session state for each test."""
# Patch session_logger to prevent file operations during tests # Patch session_logger to prevent file operations during tests
self.session_logger_patcher = patch('gemini_cli_adapter.session_logger') self.session_logger_patcher = patch('gemini_cli_adapter.session_logger')
@@ -25,7 +25,7 @@ class TestGeminiCliAdapterParity(unittest.TestCase):
self.adapter.last_usage = None self.adapter.last_usage = None
self.adapter.last_latency = 0.0 self.adapter.last_latency = 0.0
def tearDown(self): def tearDown(self) -> None:
self.session_logger_patcher.stop() self.session_logger_patcher.stop()
@patch('subprocess.Popen') @patch('subprocess.Popen')

View File

@@ -9,7 +9,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import ai_client import ai_client
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def setup_ai_client(): def setup_ai_client() -> None:
ai_client.reset_session() ai_client.reset_session()
ai_client.set_provider("gemini_cli", "gemini-2.5-flash") ai_client.set_provider("gemini_cli", "gemini-2.5-flash")
ai_client.confirm_and_run_callback = lambda script, base_dir: "Mocked execution" ai_client.confirm_and_run_callback = lambda script, base_dir: "Mocked execution"

View File

@@ -9,7 +9,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# Import the necessary functions from ai_client, including the reset helper # Import the necessary functions from ai_client, including the reset helper
from ai_client import get_gemini_cache_stats, reset_session from ai_client import get_gemini_cache_stats, reset_session
def test_get_gemini_cache_stats_with_mock_client(): def test_get_gemini_cache_stats_with_mock_client() -> None:
""" """
Test that get_gemini_cache_stats correctly processes cache lists Test that get_gemini_cache_stats correctly processes cache lists
from a mocked client instance. from a mocked client instance.

View File

@@ -5,7 +5,7 @@ import ai_client
from events import EventEmitter from events import EventEmitter
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
""" """
Fixture to create an instance of the gui_2.App class for testing. Fixture to create an instance of the gui_2.App class for testing.
It mocks functions that would render a window or block execution. It mocks functions that would render a window or block execution.

View File

@@ -3,7 +3,7 @@ from unittest.mock import patch
from gui_2 import App from gui_2 import App
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
with ( with (
patch('gui_2.load_config', return_value={'gui': {'show_windows': {}}}), patch('gui_2.load_config', return_value={'gui': {'show_windows': {}}}),
patch('gui_2.save_config'), patch('gui_2.save_config'),

View File

@@ -5,7 +5,7 @@ import ai_client
from events import EventEmitter from events import EventEmitter
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
if not hasattr(ai_client, 'events') or ai_client.events is None: if not hasattr(ai_client, 'events') or ai_client.events is None:
ai_client.events = EventEmitter() ai_client.events = EventEmitter()
with ( with (

View File

@@ -14,7 +14,7 @@ from api_hook_client import ApiHookClient
TEST_CALLBACK_FILE = Path("temp_callback_output.txt") TEST_CALLBACK_FILE = Path("temp_callback_output.txt")
@pytest.fixture(scope="function", autouse=True) @pytest.fixture(scope="function", autouse=True)
def cleanup_callback_file(): def cleanup_callback_file() -> None:
"""Ensures the test callback file is cleaned up before and after each test.""" """Ensures the test callback file is cleaned up before and after each test."""
if TEST_CALLBACK_FILE.exists(): if TEST_CALLBACK_FILE.exists():
TEST_CALLBACK_FILE.unlink() TEST_CALLBACK_FILE.unlink()

View File

@@ -55,7 +55,7 @@ def test_performance_benchmarking(live_gui):
assert avg_fps >= 30, f"{gui_script} FPS {avg_fps:.2f} is below 30 FPS threshold" assert avg_fps >= 30, f"{gui_script} FPS {avg_fps:.2f} is below 30 FPS threshold"
assert avg_ft <= 33.3, f"{gui_script} Frame time {avg_ft:.2f}ms is above 33.3ms threshold" assert avg_ft <= 33.3, f"{gui_script} Frame time {avg_ft:.2f}ms is above 33.3ms threshold"
def test_performance_parity(): def test_performance_parity() -> None:
""" """
Compare the metrics collected in the parameterized test_performance_benchmarking. Compare the metrics collected in the parameterized test_performance_benchmarking.
""" """

View File

@@ -50,7 +50,7 @@ def test_handle_generate_send_pushes_event(mock_gui):
assert event.disc_text == "disc_text" assert event.disc_text == "disc_text"
assert event.base_dir == "." assert event.base_dir == "."
def test_user_request_event_payload(): def test_user_request_event_payload() -> None:
payload = UserRequestEvent( payload = UserRequestEvent(
prompt="hello", prompt="hello",
stable_md="md", stable_md="md",
@@ -66,7 +66,7 @@ def test_user_request_event_payload():
assert d["base_dir"] == "." assert d["base_dir"] == "."
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_async_event_queue(): async def test_async_event_queue() -> None:
from events import AsyncEventQueue from events import AsyncEventQueue
q = AsyncEventQueue() q = AsyncEventQueue()
await q.put("test_event", {"data": 123}) await q.put("test_event", {"data": 123})

View File

@@ -12,7 +12,7 @@ spec.loader.exec_module(gui_legacy)
from gui_legacy import App from gui_legacy import App
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
dpg.create_context() dpg.create_context()
with patch('dearpygui.dearpygui.create_viewport'), \ with patch('dearpygui.dearpygui.create_viewport'), \
patch('dearpygui.dearpygui.setup_dearpygui'), \ patch('dearpygui.dearpygui.setup_dearpygui'), \

View File

@@ -7,7 +7,7 @@ from gui_legacy import App
import ai_client import ai_client
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
""" """
Fixture to create an instance of the App class for testing. Fixture to create an instance of the App class for testing.
It creates a real DPG context but mocks functions that would It creates a real DPG context but mocks functions that would

View File

@@ -16,7 +16,7 @@ spec.loader.exec_module(gui_legacy)
from gui_legacy import App from gui_legacy import App
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
""" """
Fixture to create an instance of the App class for testing. Fixture to create an instance of the App class for testing.
It creates a real DPG context but mocks functions that would It creates a real DPG context but mocks functions that would

View File

@@ -24,7 +24,7 @@ class TestHeadlessAPI(unittest.TestCase):
self.api = self.app_instance.create_api() self.api = self.app_instance.create_api()
self.client = TestClient(self.api) self.client = TestClient(self.api)
def test_health_endpoint(self): def test_health_endpoint(self) -> None:
response = self.client.get("/health") response = self.client.get("/health")
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {"status": "ok"}) self.assertEqual(response.json(), {"status": "ok"})
@@ -42,7 +42,7 @@ class TestHeadlessAPI(unittest.TestCase):
response = self.client.get("/status", headers=headers) response = self.client.get("/status", headers=headers)
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
def test_generate_endpoint(self): def test_generate_endpoint(self) -> None:
payload = { payload = {
"prompt": "Hello AI" "prompt": "Hello AI"
} }
@@ -100,7 +100,7 @@ class TestHeadlessAPI(unittest.TestCase):
if dummy_log.exists(): if dummy_log.exists():
dummy_log.unlink() dummy_log.unlink()
def test_get_context_endpoint(self): def test_get_context_endpoint(self) -> None:
response = self.client.get("/api/v1/context", headers=self.headers) response = self.client.get("/api/v1/context", headers=self.headers)
self.assertEqual(response.status_code, 200) self.assertEqual(response.status_code, 200)
data = response.json() data = response.json()
@@ -152,14 +152,14 @@ class TestHeadlessStartup(unittest.TestCase):
app.run() app.run()
mock_immapp_run.assert_called_once() mock_immapp_run.assert_called_once()
def test_fastapi_installed(): def test_fastapi_installed() -> None:
"""Verify that fastapi is installed.""" """Verify that fastapi is installed."""
try: try:
importlib.import_module("fastapi") importlib.import_module("fastapi")
except ImportError: except ImportError:
pytest.fail("fastapi is not installed") pytest.fail("fastapi is not installed")
def test_uvicorn_installed(): def test_uvicorn_installed() -> None:
"""Verify that uvicorn is installed.""" """Verify that uvicorn is installed."""
try: try:
importlib.import_module("uvicorn") importlib.import_module("uvicorn")

View File

@@ -6,7 +6,7 @@ import ai_client
import json import json
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_headless_verification_full_run(): async def test_headless_verification_full_run() -> None:
""" """
1. Initialize a ConductorEngine with a Track containing multiple dependent Tickets. 1. Initialize a ConductorEngine with a Track containing multiple dependent Tickets.
2. Simulate a full execution run using engine.run_linear(). 2. Simulate a full execution run using engine.run_linear().

View File

@@ -164,7 +164,7 @@ def test_history_persistence_across_turns(tmp_path):
assert len(proj_final["discussion"]["discussions"]["main"]["history"]) == 2 assert len(proj_final["discussion"]["discussions"]["main"]["history"]) == 2
# --- Tests for AI Client History Management --- # --- Tests for AI Client History Management ---
def test_get_history_bleed_stats_basic(): def test_get_history_bleed_stats_basic() -> None:
""" """
Tests basic retrieval of history bleed statistics from the AI client. Tests basic retrieval of history bleed statistics from the AI client.
""" """

View File

@@ -11,12 +11,12 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from api_hook_client import ApiHookClient from api_hook_client import ApiHookClient
import gui_legacy import gui_legacy
def test_hooks_enabled_via_cli(): def test_hooks_enabled_via_cli() -> None:
with patch.object(sys, 'argv', ['gui_legacy.py', '--enable-test-hooks']): with patch.object(sys, 'argv', ['gui_legacy.py', '--enable-test-hooks']):
app = gui_legacy.App() app = gui_legacy.App()
assert app.test_hooks_enabled is True assert app.test_hooks_enabled is True
def test_hooks_disabled_by_default(): def test_hooks_disabled_by_default() -> None:
with patch.object(sys, 'argv', ['gui_legacy.py']): with patch.object(sys, 'argv', ['gui_legacy.py']):
if 'SLOP_TEST_HOOKS' in os.environ: if 'SLOP_TEST_HOOKS' in os.environ:
del os.environ['SLOP_TEST_HOOKS'] del os.environ['SLOP_TEST_HOOKS']

View File

@@ -13,7 +13,7 @@ sys.modules["gui_legacy"] = gui_legacy
spec.loader.exec_module(gui_legacy) spec.loader.exec_module(gui_legacy)
from gui_legacy import App from gui_legacy import App
def test_new_hubs_defined_in_window_info(): def test_new_hubs_defined_in_window_info() -> None:
""" """
Verifies that the new consolidated Hub windows are defined in the App's window_info. Verifies that the new consolidated Hub windows are defined in the App's window_info.
This ensures they will be available in the 'Windows' menu. This ensures they will be available in the 'Windows' menu.

View File

@@ -7,7 +7,7 @@ from events import UserRequestEvent
import ai_client import ai_client
@pytest.fixture @pytest.fixture
def mock_app(): def mock_app() -> None:
with ( with (
patch('gui_2.load_config', return_value={ patch('gui_2.load_config', return_value={
"ai": {"provider": "gemini", "model": "model-1", "temperature": 0.0, "max_tokens": 100, "history_trunc_limit": 1000}, "ai": {"provider": "gemini", "model": "model-1", "temperature": 0.0, "max_tokens": 100, "history_trunc_limit": 1000},

View File

@@ -8,7 +8,7 @@ from log_registry import LogRegistry
class TestLogRegistry(unittest.TestCase): class TestLogRegistry(unittest.TestCase):
def setUp(self): def setUp(self) -> None:
"""Set up a temporary directory and registry file for each test.""" """Set up a temporary directory and registry file for each test."""
self.temp_dir = tempfile.TemporaryDirectory() self.temp_dir = tempfile.TemporaryDirectory()
self.registry_path = os.path.join(self.temp_dir.name, "registry.toml") self.registry_path = os.path.join(self.temp_dir.name, "registry.toml")
@@ -19,11 +19,11 @@ class TestLogRegistry(unittest.TestCase):
# Instantiate LogRegistry. This will load from the empty file. # Instantiate LogRegistry. This will load from the empty file.
self.registry = LogRegistry(self.registry_path) self.registry = LogRegistry(self.registry_path)
def tearDown(self): def tearDown(self) -> None:
"""Clean up the temporary directory and its contents after each test.""" """Clean up the temporary directory and its contents after each test."""
self.temp_dir.cleanup() self.temp_dir.cleanup()
def test_instantiation(self): def test_instantiation(self) -> None:
"""Test LogRegistry instantiation with a file path.""" """Test LogRegistry instantiation with a file path."""
self.assertIsInstance(self.registry, LogRegistry) self.assertIsInstance(self.registry, LogRegistry)
self.assertEqual(self.registry.registry_path, self.registry_path) self.assertEqual(self.registry.registry_path, self.registry_path)
@@ -31,7 +31,7 @@ class TestLogRegistry(unittest.TestCase):
self.assertTrue(os.path.exists(self.registry_path)) self.assertTrue(os.path.exists(self.registry_path))
# We will verify content in other tests that explicitly save and reload. # We will verify content in other tests that explicitly save and reload.
def test_register_session(self): def test_register_session(self) -> None:
"""Test registering a new session.""" """Test registering a new session."""
session_id = "session-123" session_id = "session-123"
path = "/path/to/session/123" path = "/path/to/session/123"
@@ -53,7 +53,7 @@ class TestLogRegistry(unittest.TestCase):
reloaded_start_time = datetime.fromisoformat(reloaded_session_data['start_time']) reloaded_start_time = datetime.fromisoformat(reloaded_session_data['start_time'])
self.assertAlmostEqual(reloaded_start_time, start_time, delta=timedelta(seconds=1)) self.assertAlmostEqual(reloaded_start_time, start_time, delta=timedelta(seconds=1))
def test_update_session_metadata(self): def test_update_session_metadata(self) -> None:
"""Test updating session metadata.""" """Test updating session metadata."""
session_id = "session-456" session_id = "session-456"
path = "/path/to/session/456" path = "/path/to/session/456"
@@ -84,7 +84,7 @@ class TestLogRegistry(unittest.TestCase):
self.assertTrue(reloaded_session_data.get('metadata', {}).get('whitelisted', False)) self.assertTrue(reloaded_session_data.get('metadata', {}).get('whitelisted', False))
self.assertTrue(reloaded_session_data.get('whitelisted', False)) # Check main flag too self.assertTrue(reloaded_session_data.get('whitelisted', False)) # Check main flag too
def test_is_session_whitelisted(self): def test_is_session_whitelisted(self) -> None:
"""Test checking if a session is whitelisted.""" """Test checking if a session is whitelisted."""
session_id_whitelisted = "session-789-whitelisted" session_id_whitelisted = "session-789-whitelisted"
path_w = "/path/to/session/789" path_w = "/path/to/session/789"
@@ -102,7 +102,7 @@ class TestLogRegistry(unittest.TestCase):
# Test for a non-existent session, should be treated as not whitelisted # Test for a non-existent session, should be treated as not whitelisted
self.assertFalse(self.registry.is_session_whitelisted("non-existent-session")) self.assertFalse(self.registry.is_session_whitelisted("non-existent-session"))
def test_get_old_non_whitelisted_sessions(self): def test_get_old_non_whitelisted_sessions(self) -> None:
"""Test retrieving old, non-whitelisted sessions.""" """Test retrieving old, non-whitelisted sessions."""
now = datetime.utcnow() now = datetime.utcnow()
# Define a cutoff time that is 7 days ago # Define a cutoff time that is 7 days ago

View File

@@ -1,7 +1,7 @@
import pytest import pytest
from models import Ticket, Track, WorkerContext from models import Ticket, Track, WorkerContext
def test_ticket_instantiation(): def test_ticket_instantiation() -> None:
""" """
Verifies that a Ticket can be instantiated with its required fields: Verifies that a Ticket can be instantiated with its required fields:
id, description, status, assigned_to. id, description, status, assigned_to.
@@ -22,7 +22,7 @@ def test_ticket_instantiation():
assert ticket.assigned_to == assigned_to assert ticket.assigned_to == assigned_to
assert ticket.depends_on == [] assert ticket.depends_on == []
def test_ticket_with_dependencies(): def test_ticket_with_dependencies() -> None:
""" """
Verifies that a Ticket can store dependencies. Verifies that a Ticket can store dependencies.
""" """
@@ -35,7 +35,7 @@ def test_ticket_with_dependencies():
) )
assert ticket.depends_on == ["T1"] assert ticket.depends_on == ["T1"]
def test_track_instantiation(): def test_track_instantiation() -> None:
""" """
Verifies that a Track can be instantiated with its required fields: Verifies that a Track can be instantiated with its required fields:
id, description, and a list of Tickets. id, description, and a list of Tickets.
@@ -56,14 +56,14 @@ def test_track_instantiation():
assert track.tickets[0].id == "T1" assert track.tickets[0].id == "T1"
assert track.tickets[1].id == "T2" assert track.tickets[1].id == "T2"
def test_track_can_handle_empty_tickets(): def test_track_can_handle_empty_tickets() -> None:
""" """
Verifies that a Track can be instantiated with an empty list of tickets. Verifies that a Track can be instantiated with an empty list of tickets.
""" """
track = Track(id="TRACK-2", description="Empty Track", tickets=[]) track = Track(id="TRACK-2", description="Empty Track", tickets=[])
assert track.tickets == [] assert track.tickets == []
def test_worker_context_instantiation(): def test_worker_context_instantiation() -> None:
""" """
Verifies that a WorkerContext can be instantiated with ticket_id, Verifies that a WorkerContext can be instantiated with ticket_id,
model_name, and messages. model_name, and messages.
@@ -83,7 +83,7 @@ def test_worker_context_instantiation():
assert context.model_name == model_name assert context.model_name == model_name
assert context.messages == messages assert context.messages == messages
def test_ticket_mark_blocked(): def test_ticket_mark_blocked() -> None:
""" """
Verifies that ticket.mark_blocked(reason) sets the status to 'blocked'. Verifies that ticket.mark_blocked(reason) sets the status to 'blocked'.
Note: The reason field might need to be added to the Ticket class. Note: The reason field might need to be added to the Ticket class.
@@ -92,7 +92,7 @@ def test_ticket_mark_blocked():
ticket.mark_blocked("Waiting for API key") ticket.mark_blocked("Waiting for API key")
assert ticket.status == "blocked" assert ticket.status == "blocked"
def test_ticket_mark_complete(): def test_ticket_mark_complete() -> None:
""" """
Verifies that ticket.mark_complete() sets the status to 'completed'. Verifies that ticket.mark_complete() sets the status to 'completed'.
""" """
@@ -100,7 +100,7 @@ def test_ticket_mark_complete():
ticket.mark_complete() ticket.mark_complete()
assert ticket.status == "completed" assert ticket.status == "completed"
def test_track_get_executable_tickets(): def test_track_get_executable_tickets() -> None:
""" """
Verifies that track.get_executable_tickets() returns only 'todo' tickets Verifies that track.get_executable_tickets() returns only 'todo' tickets
whose dependencies are all 'completed'. whose dependencies are all 'completed'.
@@ -124,7 +124,7 @@ def test_track_get_executable_tickets():
assert "T6" in executable_ids assert "T6" in executable_ids
assert len(executable_ids) == 2 assert len(executable_ids) == 2
def test_track_get_executable_tickets_complex(): def test_track_get_executable_tickets_complex() -> None:
""" """
Verifies executable tickets with complex dependency chains. Verifies executable tickets with complex dependency chains.
Chain: T1 (comp) -> T2 (todo) -> T3 (todo) Chain: T1 (comp) -> T2 (todo) -> T3 (todo)

View File

@@ -6,7 +6,7 @@ import time
from gui_2 import App from gui_2 import App
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
with ( with (
patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}), patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}),
patch('gui_2.save_config'), patch('gui_2.save_config'),

View File

@@ -1,7 +1,7 @@
import pytest import pytest
from mma_prompts import PROMPTS from mma_prompts import PROMPTS
def test_tier1_epic_init_constraints(): def test_tier1_epic_init_constraints() -> None:
prompt = PROMPTS["tier1_epic_init"] prompt = PROMPTS["tier1_epic_init"]
assert "Godot ECS Flat List format" in prompt assert "Godot ECS Flat List format" in prompt
assert "JSON array" in prompt assert "JSON array" in prompt
@@ -9,19 +9,19 @@ def test_tier1_epic_init_constraints():
assert "severity" in prompt assert "severity" in prompt
assert "IGNORE all source code" in prompt assert "IGNORE all source code" in prompt
def test_tier1_track_delegation_constraints(): def test_tier1_track_delegation_constraints() -> None:
prompt = PROMPTS["tier1_track_delegation"] prompt = PROMPTS["tier1_track_delegation"]
assert "Track Brief" in prompt assert "Track Brief" in prompt
assert "AST Skeleton View" in prompt assert "AST Skeleton View" in prompt
assert "IGNORE unrelated module docs" in prompt assert "IGNORE unrelated module docs" in prompt
def test_tier1_macro_merge_constraints(): def test_tier1_macro_merge_constraints() -> None:
prompt = PROMPTS["tier1_macro_merge"] prompt = PROMPTS["tier1_macro_merge"]
assert "Macro-Merge" in prompt assert "Macro-Merge" in prompt
assert "Macro-Diff" in prompt assert "Macro-Diff" in prompt
assert "IGNORE Tier 3 trial-and-error" in prompt assert "IGNORE Tier 3 trial-and-error" in prompt
def test_tier2_sprint_planning_constraints(): def test_tier2_sprint_planning_constraints() -> None:
prompt = PROMPTS["tier2_sprint_planning"] prompt = PROMPTS["tier2_sprint_planning"]
assert "Tickets" in prompt assert "Tickets" in prompt
assert "Godot ECS Flat List format" in prompt assert "Godot ECS Flat List format" in prompt
@@ -30,20 +30,20 @@ def test_tier2_sprint_planning_constraints():
assert "Skeleton View" in prompt assert "Skeleton View" in prompt
assert "Curated Implementation View" in prompt assert "Curated Implementation View" in prompt
def test_tier2_code_review_constraints(): def test_tier2_code_review_constraints() -> None:
prompt = PROMPTS["tier2_code_review"] prompt = PROMPTS["tier2_code_review"]
assert "Code Review" in prompt assert "Code Review" in prompt
assert "IGNORE the Contributor's internal trial-and-error" in prompt assert "IGNORE the Contributor's internal trial-and-error" in prompt
assert "Tier 4 (QA) logs" in prompt assert "Tier 4 (QA) logs" in prompt
def test_tier2_track_finalization_constraints(): def test_tier2_track_finalization_constraints() -> None:
prompt = PROMPTS["tier2_track_finalization"] prompt = PROMPTS["tier2_track_finalization"]
assert "Track Finalization" in prompt assert "Track Finalization" in prompt
assert "Executive Summary" in prompt assert "Executive Summary" in prompt
assert "Macro-Diff" in prompt assert "Macro-Diff" in prompt
assert "Dependency Delta" in prompt assert "Dependency Delta" in prompt
def test_tier2_contract_first_constraints(): def test_tier2_contract_first_constraints() -> None:
prompt = PROMPTS["tier2_contract_first"] prompt = PROMPTS["tier2_contract_first"]
assert "Stub Ticket" in prompt assert "Stub Ticket" in prompt
assert "Consumer Ticket" in prompt assert "Consumer Ticket" in prompt

View File

@@ -4,7 +4,7 @@ import asyncio
from gui_2 import App from gui_2 import App
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
with ( with (
patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}), patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}),
patch('gui_2.save_config'), patch('gui_2.save_config'),

View File

@@ -7,7 +7,7 @@ import multi_agent_conductor
from models import Track, Ticket from models import Track, Ticket
@pytest.fixture @pytest.fixture
def mock_ai_client(): def mock_ai_client() -> None:
with patch("ai_client.send") as mock_send: with patch("ai_client.send") as mock_send:
yield mock_send yield mock_send
@@ -40,7 +40,7 @@ def test_generate_tickets(mock_ai_client):
assert tickets[1]["id"] == "T-002" assert tickets[1]["id"] == "T-002"
assert tickets[1]["depends_on"] == ["T-001"] assert tickets[1]["depends_on"] == ["T-001"]
def test_topological_sort(): def test_topological_sort() -> None:
tickets = [ tickets = [
{"id": "T-002", "description": "Dep on 001", "depends_on": ["T-001"]}, {"id": "T-002", "description": "Dep on 001", "depends_on": ["T-001"]},
{"id": "T-001", "description": "Base", "depends_on": []}, {"id": "T-001", "description": "Base", "depends_on": []},
@@ -51,7 +51,7 @@ def test_topological_sort():
assert sorted_tickets[1]["id"] == "T-002" assert sorted_tickets[1]["id"] == "T-002"
assert sorted_tickets[2]["id"] == "T-003" assert sorted_tickets[2]["id"] == "T-003"
def test_topological_sort_circular(): def test_topological_sort_circular() -> None:
tickets = [ tickets = [
{"id": "T-001", "depends_on": ["T-002"]}, {"id": "T-001", "depends_on": ["T-002"]},
{"id": "T-002", "depends_on": ["T-001"]} {"id": "T-002", "depends_on": ["T-001"]}
@@ -59,7 +59,7 @@ def test_topological_sort_circular():
with pytest.raises(ValueError, match="Circular dependency detected"): with pytest.raises(ValueError, match="Circular dependency detected"):
conductor_tech_lead.topological_sort(tickets) conductor_tech_lead.topological_sort(tickets)
def test_track_executable_tickets(): def test_track_executable_tickets() -> None:
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="user") t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="user")
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="user", depends_on=["T1"]) t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="user", depends_on=["T1"])
track = Track(id="track_1", description="desc", tickets=[t1, t2]) track = Track(id="track_1", description="desc", tickets=[t1, t2])
@@ -73,7 +73,7 @@ def test_track_executable_tickets():
assert executable[0].id == "T2" assert executable[0].id == "T2"
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_conductor_engine_run_linear(): async def test_conductor_engine_run_linear() -> None:
t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="user") t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="user")
t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="user", depends_on=["T1"]) t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="user", depends_on=["T1"])
track = Track(id="track_1", description="desc", tickets=[t1, t2]) track = Track(id="track_1", description="desc", tickets=[t1, t2])
@@ -89,7 +89,7 @@ async def test_conductor_engine_run_linear():
assert t2.status == "completed" assert t2.status == "completed"
assert mock_worker.call_count == 2 assert mock_worker.call_count == 2
def test_conductor_engine_parse_json_tickets(): def test_conductor_engine_parse_json_tickets() -> None:
track = Track(id="track_1", description="desc") track = Track(id="track_1", description="desc")
engine = multi_agent_conductor.ConductorEngine(track) engine = multi_agent_conductor.ConductorEngine(track)
json_data = json.dumps([ json_data = json.dumps([

View File

@@ -7,7 +7,7 @@ from pathlib import Path
import orchestrator_pm import orchestrator_pm
class TestOrchestratorPMHistory(unittest.TestCase): class TestOrchestratorPMHistory(unittest.TestCase):
def setUp(self): def setUp(self) -> None:
self.test_dir = Path("test_conductor") self.test_dir = Path("test_conductor")
self.test_dir.mkdir(exist_ok=True) self.test_dir.mkdir(exist_ok=True)
self.archive_dir = self.test_dir / "archive" self.archive_dir = self.test_dir / "archive"
@@ -15,7 +15,7 @@ class TestOrchestratorPMHistory(unittest.TestCase):
self.archive_dir.mkdir(exist_ok=True) self.archive_dir.mkdir(exist_ok=True)
self.tracks_dir.mkdir(exist_ok=True) self.tracks_dir.mkdir(exist_ok=True)
def tearDown(self): def tearDown(self) -> None:
if self.test_dir.exists(): if self.test_dir.exists():
shutil.rmtree(self.test_dir) shutil.rmtree(self.test_dir)

View File

@@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from performance_monitor import PerformanceMonitor from performance_monitor import PerformanceMonitor
def test_perf_monitor_basic_timing(): def test_perf_monitor_basic_timing() -> None:
pm = PerformanceMonitor() pm = PerformanceMonitor()
pm.start_frame() pm.start_frame()
time.sleep(0.02) # 20ms time.sleep(0.02) # 20ms
@@ -17,7 +17,7 @@ def test_perf_monitor_basic_timing():
assert metrics['last_frame_time_ms'] >= 20.0 assert metrics['last_frame_time_ms'] >= 20.0
pm.stop() pm.stop()
def test_perf_monitor_component_timing(): def test_perf_monitor_component_timing() -> None:
pm = PerformanceMonitor() pm = PerformanceMonitor()
pm.start_component("test_comp") pm.start_component("test_comp")
time.sleep(0.01) time.sleep(0.01)

View File

@@ -4,7 +4,7 @@ import ai_client
from gui_2 import App from gui_2 import App
@pytest.fixture @pytest.fixture
def app_instance(): def app_instance() -> None:
with ( with (
patch('gui_2.load_config', return_value={'ai': {'provider': 'gemini', 'model': 'gemini-2.5-flash-lite'}, 'projects': {}}), patch('gui_2.load_config', return_value={'ai': {'provider': 'gemini', 'model': 'gemini-2.5-flash-lite'}, 'projects': {}}),
patch('gui_2.save_config'), patch('gui_2.save_config'),

View File

@@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_base import BaseSimulation from simulation.sim_base import BaseSimulation
def test_base_simulation_init(): def test_base_simulation_init() -> None:
with patch('simulation.sim_base.ApiHookClient') as mock_client_class: with patch('simulation.sim_base.ApiHookClient') as mock_client_class:
mock_client = MagicMock() mock_client = MagicMock()
mock_client_class.return_value = mock_client mock_client_class.return_value = mock_client
@@ -16,7 +16,7 @@ def test_base_simulation_init():
assert sim.client == mock_client assert sim.client == mock_client
assert sim.sim is not None assert sim.sim is not None
def test_base_simulation_setup(): def test_base_simulation_setup() -> None:
mock_client = MagicMock() mock_client = MagicMock()
mock_client.wait_for_server.return_value = True mock_client.wait_for_server.return_value = True
with patch('simulation.sim_base.WorkflowSimulator') as mock_sim_class: with patch('simulation.sim_base.WorkflowSimulator') as mock_sim_class:

View File

@@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_context import ContextSimulation from simulation.sim_context import ContextSimulation
def test_context_simulation_run(): def test_context_simulation_run() -> None:
mock_client = MagicMock() mock_client = MagicMock()
mock_client.wait_for_server.return_value = True mock_client.wait_for_server.return_value = True
# Mock project config # Mock project config

View File

@@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.sim_tools import ToolsSimulation from simulation.sim_tools import ToolsSimulation
def test_tools_simulation_run(): def test_tools_simulation_run() -> None:
mock_client = MagicMock() mock_client = MagicMock()
mock_client.wait_for_server.return_value = True mock_client.wait_for_server.return_value = True
# Mock session entries with tool output # Mock session entries with tool output

View File

@@ -19,7 +19,7 @@ class MockDialog:
return res return res
@pytest.fixture @pytest.fixture
def mock_ai_client(): def mock_ai_client() -> None:
with patch("ai_client.send") as mock_send: with patch("ai_client.send") as mock_send:
mock_send.return_value = "Task completed" mock_send.return_value = "Task completed"
yield mock_send yield mock_send

View File

@@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch
import subprocess import subprocess
from shell_runner import run_powershell from shell_runner import run_powershell
def test_run_powershell_qa_callback_on_failure(): def test_run_powershell_qa_callback_on_failure() -> None:
""" """
Test that qa_callback is called when a powershell command fails (non-zero exit code). Test that qa_callback is called when a powershell command fails (non-zero exit code).
The result of the callback should be appended to the output. The result of the callback should be appended to the output.
@@ -27,7 +27,7 @@ def test_run_powershell_qa_callback_on_failure():
assert "STDERR:\nsomething went wrong" in output assert "STDERR:\nsomething went wrong" in output
assert "EXIT CODE: 1" in output assert "EXIT CODE: 1" in output
def test_run_powershell_qa_callback_on_stderr_only(): def test_run_powershell_qa_callback_on_stderr_only() -> None:
""" """
Test that qa_callback is called when a command has stderr even if exit code is 0. Test that qa_callback is called when a command has stderr even if exit code is 0.
""" """
@@ -45,7 +45,7 @@ def test_run_powershell_qa_callback_on_stderr_only():
assert "QA ANALYSIS: Ignorable warning." in output assert "QA ANALYSIS: Ignorable warning." in output
assert "STDOUT:\nSuccess" in output assert "STDOUT:\nSuccess" in output
def test_run_powershell_no_qa_callback_on_success(): def test_run_powershell_no_qa_callback_on_success() -> None:
""" """
Test that qa_callback is NOT called when the command succeeds without stderr. Test that qa_callback is NOT called when the command succeeds without stderr.
""" """
@@ -64,7 +64,7 @@ def test_run_powershell_no_qa_callback_on_success():
assert "EXIT CODE: 0" in output assert "EXIT CODE: 0" in output
assert "QA ANALYSIS" not in output assert "QA ANALYSIS" not in output
def test_run_powershell_optional_qa_callback(): def test_run_powershell_optional_qa_callback() -> None:
""" """
Test that run_powershell still works without providing a qa_callback. Test that run_powershell still works without providing a qa_callback.
""" """
@@ -81,7 +81,7 @@ def test_run_powershell_optional_qa_callback():
assert "STDERR:\nerror" in output assert "STDERR:\nerror" in output
assert "EXIT CODE: 1" in output assert "EXIT CODE: 1" in output
def test_end_to_end_tier4_integration(): def test_end_to_end_tier4_integration() -> None:
""" """
Verifies that shell_runner.run_powershell correctly uses ai_client.run_tier4_analysis. Verifies that shell_runner.run_powershell correctly uses ai_client.run_tier4_analysis.
""" """
@@ -101,7 +101,7 @@ def test_end_to_end_tier4_integration():
mock_analysis.assert_called_once_with(stderr_content) mock_analysis.assert_called_once_with(stderr_content)
assert f"QA ANALYSIS:\n{expected_analysis}" in output assert f"QA ANALYSIS:\n{expected_analysis}" in output
def test_ai_client_passes_qa_callback(): def test_ai_client_passes_qa_callback() -> None:
""" """
Verifies that ai_client.send passes the qa_callback down to the provider function. Verifies that ai_client.send passes the qa_callback down to the provider function.
""" """
@@ -123,7 +123,7 @@ def test_ai_client_passes_qa_callback():
# qa_callback is the 7th positional argument in _send_gemini # qa_callback is the 7th positional argument in _send_gemini
assert args[6] == qa_callback assert args[6] == qa_callback
def test_gemini_provider_passes_qa_callback_to_run_script(): def test_gemini_provider_passes_qa_callback_to_run_script() -> None:
""" """
Verifies that _send_gemini passes the qa_callback to _run_script. Verifies that _send_gemini passes the qa_callback to _run_script.
""" """

View File

@@ -14,7 +14,7 @@ def test_build_tier1_context_exists():
# other.py should be summarized, not full content in a code block # other.py should be summarized, not full content in a code block
assert "Other content" not in result or "Summarized" in result # Assuming summary format assert "Other content" not in result or "Summarized" in result # Assuming summary format
def test_build_tier2_context_exists(): def test_build_tier2_context_exists() -> None:
file_items = [ file_items = [
{"path": Path("other.py"), "entry": "other.py", "content": "Other content", "error": False} {"path": Path("other.py"), "entry": "other.py", "content": "Other content", "error": False}
] ]
@@ -44,7 +44,7 @@ def test_build_tier3_context_ast_skeleton(monkeypatch):
mock_parser_class.assert_called_once_with("python") mock_parser_class.assert_called_once_with("python")
mock_parser_instance.get_skeleton.assert_called_once_with("def other():\n pass") mock_parser_instance.get_skeleton.assert_called_once_with("def other():\n pass")
def test_build_tier3_context_exists(): def test_build_tier3_context_exists() -> None:
file_items = [ file_items = [
{"path": Path("focus.py"), "entry": "focus.py", "content": "def focus():\n pass", "error": False}, {"path": Path("focus.py"), "entry": "focus.py", "content": "def focus():\n pass", "error": False},
{"path": Path("other.py"), "entry": "other.py", "content": "def other():\n pass", "error": False} {"path": Path("other.py"), "entry": "other.py", "content": "def other():\n pass", "error": False}
@@ -91,7 +91,7 @@ def test_build_files_section_with_dicts(tmp_path):
assert "content1" in result assert "content1" in result
assert "file1.txt" in result assert "file1.txt" in result
def test_tiered_context_by_tier_field(): def test_tiered_context_by_tier_field() -> None:
file_items = [ file_items = [
{"path": Path("tier1_file.txt"), "entry": "tier1_file.txt", "content": "Full Tier 1 Content\nLine 2", "tier": 1}, {"path": Path("tier1_file.txt"), "entry": "tier1_file.txt", "content": "Full Tier 1 Content\nLine 2", "tier": 1},
{"path": Path("tier3_file.txt"), "entry": "tier3_file.txt", "content": "Full Tier 3 Content\nLine 2\nLine 3\nLine 4\nLine 5\nLine 6\nLine 7\nLine 8\nLine 9\nLine 10", "tier": 3}, {"path": Path("tier3_file.txt"), "entry": "tier3_file.txt", "content": "Full Tier 3 Content\nLine 2\nLine 3\nLine 4\nLine 5\nLine 6\nLine 7\nLine 8\nLine 9\nLine 10", "tier": 3},

View File

@@ -7,7 +7,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import ai_client import ai_client
def test_token_usage_tracking(): def test_token_usage_tracking() -> None:
ai_client.reset_session() ai_client.reset_session()
# Mock an API response with token usage # Mock an API response with token usage
usage = {"prompt_tokens": 100, "candidates_tokens": 50, "total_tokens": 150} usage = {"prompt_tokens": 100, "candidates_tokens": 50, "total_tokens": 150}

View File

@@ -6,7 +6,7 @@ from models import Metadata, TrackState, Ticket
# --- Pytest Tests --- # --- Pytest Tests ---
def test_track_state_instantiation(): def test_track_state_instantiation() -> None:
"""Test creating a TrackState object.""" """Test creating a TrackState object."""
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
metadata = Metadata( metadata = Metadata(
@@ -37,7 +37,7 @@ def test_track_state_instantiation():
assert track_state.tasks[0].description == "Design UI" assert track_state.tasks[0].description == "Design UI"
assert track_state.tasks[0].assigned_to == "dev1" assert track_state.tasks[0].assigned_to == "dev1"
def test_track_state_to_dict(): def test_track_state_to_dict() -> None:
"""Test the to_dict() method for serialization.""" """Test the to_dict() method for serialization."""
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
metadata = Metadata( metadata = Metadata(
@@ -72,7 +72,7 @@ def test_track_state_to_dict():
assert track_dict["tasks"][0]["description"] == "Add feature X" assert track_dict["tasks"][0]["description"] == "Add feature X"
assert track_dict["tasks"][0]["assigned_to"] == "dev3" assert track_dict["tasks"][0]["assigned_to"] == "dev3"
def test_track_state_from_dict(): def test_track_state_from_dict() -> None:
"""Test the from_dict() class method for deserialization.""" """Test the from_dict() class method for deserialization."""
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
track_dict_data = { track_dict_data = {
@@ -106,7 +106,7 @@ def test_track_state_from_dict():
assert track_state.tasks[0].assigned_to == "ops1" assert track_state.tasks[0].assigned_to == "ops1"
# Test case for empty lists and missing keys for robustness # Test case for empty lists and missing keys for robustness
def test_track_state_from_dict_empty_and_missing(): def test_track_state_from_dict_empty_and_missing() -> None:
"""Test from_dict with empty lists and missing optional keys.""" """Test from_dict with empty lists and missing optional keys."""
track_dict_data = { track_dict_data = {
"metadata": { "metadata": {
@@ -128,7 +128,7 @@ def test_track_state_from_dict_empty_and_missing():
assert len(track_state.tasks) == 0 assert len(track_state.tasks) == 0
# Test case for to_dict with None values or missing optional data # Test case for to_dict with None values or missing optional data
def test_track_state_to_dict_with_none(): def test_track_state_to_dict_with_none() -> None:
"""Test to_dict with None values in optional fields.""" """Test to_dict with None values in optional fields."""
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
metadata = Metadata( metadata = Metadata(

View File

@@ -1,7 +1,7 @@
import tree_sitter_python as tspython import tree_sitter_python as tspython
from tree_sitter import Language, Parser from tree_sitter import Language, Parser
def test_tree_sitter_python_setup(): def test_tree_sitter_python_setup() -> None:
""" """
Verifies that tree-sitter and tree-sitter-python are correctly installed Verifies that tree-sitter and tree-sitter-python are correctly installed
and can parse a simple Python function string. and can parse a simple Python function string.

View File

@@ -7,11 +7,11 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.user_agent import UserSimAgent from simulation.user_agent import UserSimAgent
def test_user_agent_instantiation(): def test_user_agent_instantiation() -> None:
agent = UserSimAgent(hook_client=None) agent = UserSimAgent(hook_client=None)
assert agent is not None assert agent is not None
def test_perform_action_with_delay(): def test_perform_action_with_delay() -> None:
agent = UserSimAgent(hook_client=None) agent = UserSimAgent(hook_client=None)
called = False called = False

View File

@@ -8,12 +8,12 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from simulation.workflow_sim import WorkflowSimulator from simulation.workflow_sim import WorkflowSimulator
def test_simulator_instantiation(): def test_simulator_instantiation() -> None:
client = MagicMock() client = MagicMock()
sim = WorkflowSimulator(client) sim = WorkflowSimulator(client)
assert sim is not None assert sim is not None
def test_setup_new_project(): def test_setup_new_project() -> None:
client = MagicMock() client = MagicMock()
sim = WorkflowSimulator(client) sim = WorkflowSimulator(client)
# Mock responses for wait_for_server # Mock responses for wait_for_server
@@ -24,7 +24,7 @@ def test_setup_new_project():
client.set_value.assert_any_call("project_git_dir", "/tmp/test_git") client.set_value.assert_any_call("project_git_dir", "/tmp/test_git")
client.click.assert_any_call("btn_project_save") client.click.assert_any_call("btn_project_save")
def test_discussion_switching(): def test_discussion_switching() -> None:
client = MagicMock() client = MagicMock()
sim = WorkflowSimulator(client) sim = WorkflowSimulator(client)
sim.create_discussion("NewDisc") sim.create_discussion("NewDisc")
@@ -33,7 +33,7 @@ def test_discussion_switching():
sim.switch_discussion("NewDisc") sim.switch_discussion("NewDisc")
client.select_list_item.assert_called_with("disc_listbox", "NewDisc") client.select_list_item.assert_called_with("disc_listbox", "NewDisc")
def test_history_truncation(): def test_history_truncation() -> None:
client = MagicMock() client = MagicMock()
sim = WorkflowSimulator(client) sim = WorkflowSimulator(client)
sim.truncate_history(3) sim.truncate_history(3)

View File

@@ -33,12 +33,12 @@ class TestMMAGUIRobust(unittest.TestCase):
print("GUI started.") print("GUI started.")
@classmethod @classmethod
def tearDownClass(cls): def tearDownClass(cls) -> None:
if cls.gui_process: if cls.gui_process:
cls.gui_process.terminate() cls.gui_process.terminate()
cls.gui_process.wait(timeout=5) cls.gui_process.wait(timeout=5)
def test_mma_state_ingestion(self): def test_mma_state_ingestion(self) -> None:
"""Verify that mma_state_update event correctly updates GUI state.""" """Verify that mma_state_update event correctly updates GUI state."""
track_data = { track_data = {
"id": "robust_test_track", "id": "robust_test_track",
@@ -69,7 +69,7 @@ class TestMMAGUIRobust(unittest.TestCase):
self.assertEqual(status["active_tickets"][2]["status"], "complete") self.assertEqual(status["active_tickets"][2]["status"], "complete")
print("MMA state ingestion verified successfully.") print("MMA state ingestion verified successfully.")
def test_mma_step_approval_trigger(self): def test_mma_step_approval_trigger(self) -> None:
"""Verify that mma_step_approval event sets the pending approval flag.""" """Verify that mma_step_approval event sets the pending approval flag."""
payload = { payload = {
"ticket_id": "T2", "ticket_id": "T2",

View File

@@ -9,7 +9,7 @@ if PROJECT_ROOT not in sys.path:
from api_hook_client import ApiHookClient from api_hook_client import ApiHookClient
def diag_run(): def diag_run() -> None:
print("Launching GUI for manual inspection + automated hooks...") print("Launching GUI for manual inspection + automated hooks...")
# Use a log file for GUI output # Use a log file for GUI output
with open("gui_diag.log", "w") as log_file: with open("gui_diag.log", "w") as log_file:

View File

@@ -23,7 +23,7 @@ except ImportError as e:
print(f"Import error: {e}") print(f"Import error: {e}")
sys.exit(1) sys.exit(1)
def run_visual_mma_verification(): def run_visual_mma_verification() -> None:
print("Starting visual MMA verification test...") print("Starting visual MMA verification test...")
# Change current directory to project root # Change current directory to project root
original_dir = os.getcwd() original_dir = os.getcwd()

View File

@@ -1,4 +1,4 @@
# theme.py # theme.py
""" """
Theming support for manual_slop GUI. Theming support for manual_slop GUI.
@@ -289,7 +289,7 @@ def get_palette_colours(name: str) -> dict:
"""Return a copy of the colour dict for the named palette.""" """Return a copy of the colour dict for the named palette."""
return dict(_PALETTES.get(name, {})) return dict(_PALETTES.get(name, {}))
def apply(palette_name: str, overrides: dict | None = None): def apply(palette_name: str, overrides: dict | None = None) -> None:
""" """
Build a global DPG theme from the named palette plus optional per-colour Build a global DPG theme from the named palette plus optional per-colour
overrides, and bind it as the default theme. overrides, and bind it as the default theme.
@@ -332,7 +332,7 @@ def apply(palette_name: str, overrides: dict | None = None):
dpg.bind_theme(t) dpg.bind_theme(t)
_current_theme_tag = t _current_theme_tag = t
def apply_font(font_path: str, size: float = 14.0): def apply_font(font_path: str, size: float = 14.0) -> None:
""" """
Load the TTF at font_path at the given point size and bind it globally. Load the TTF at font_path at the given point size and bind it globally.
Safe to call multiple times. Uses a single persistent font_registry; only Safe to call multiple times. Uses a single persistent font_registry; only
@@ -362,13 +362,13 @@ def apply_font(font_path: str, size: float = 14.0):
_current_font_tag = font _current_font_tag = font
dpg.bind_font(font) dpg.bind_font(font)
def set_scale(factor: float): def set_scale(factor: float) -> None:
"""Set the global Dear PyGui font/UI scale factor.""" """Set the global Dear PyGui font/UI scale factor."""
global _current_scale global _current_scale
_current_scale = factor _current_scale = factor
dpg.set_global_font_scale(factor) dpg.set_global_font_scale(factor)
def save_to_config(config: dict): def save_to_config(config: dict) -> None:
"""Persist theme settings into the config dict under [theme].""" """Persist theme settings into the config dict under [theme]."""
config.setdefault("theme", {}) config.setdefault("theme", {})
config["theme"]["palette"] = _current_palette config["theme"]["palette"] = _current_palette
@@ -376,7 +376,7 @@ def save_to_config(config: dict):
config["theme"]["font_size"] = _current_font_size config["theme"]["font_size"] = _current_font_size
config["theme"]["scale"] = _current_scale config["theme"]["scale"] = _current_scale
def load_from_config(config: dict): def load_from_config(config: dict) -> None:
"""Read [theme] from config and apply everything.""" """Read [theme] from config and apply everything."""
t = config.get("theme", {}) t = config.get("theme", {})
palette = t.get("palette", "DPG Default") palette = t.get("palette", "DPG Default")

View File

@@ -1,4 +1,4 @@
# theme_2.py # theme_2.py
""" """
Theming support for manual_slop GUI — imgui-bundle port. Theming support for manual_slop GUI — imgui-bundle port.
@@ -203,7 +203,7 @@ def get_current_font_size() -> float:
def get_current_scale() -> float: def get_current_scale() -> float:
return _current_scale return _current_scale
def apply(palette_name: str): def apply(palette_name: str) -> None:
""" """
Apply a named palette by setting all ImGui style colors. Apply a named palette by setting all ImGui style colors.
Call this once per frame if you want dynamic switching, or once at startup. Call this once per frame if you want dynamic switching, or once at startup.
@@ -222,14 +222,14 @@ def apply(palette_name: str):
for col_enum, rgba in colours.items(): for col_enum, rgba in colours.items():
style.set_color_(col_enum, imgui.ImVec4(*rgba)) style.set_color_(col_enum, imgui.ImVec4(*rgba))
def set_scale(factor: float): def set_scale(factor: float) -> None:
"""Set the global font/UI scale factor.""" """Set the global font/UI scale factor."""
global _current_scale global _current_scale
_current_scale = factor _current_scale = factor
style = imgui.get_style() style = imgui.get_style()
style.font_scale_main = factor style.font_scale_main = factor
def save_to_config(config: dict): def save_to_config(config: dict) -> None:
"""Persist theme settings into the config dict under [theme].""" """Persist theme settings into the config dict under [theme]."""
config.setdefault("theme", {}) config.setdefault("theme", {})
config["theme"]["palette"] = _current_palette config["theme"]["palette"] = _current_palette
@@ -237,7 +237,7 @@ def save_to_config(config: dict):
config["theme"]["font_size"] = _current_font_size config["theme"]["font_size"] = _current_font_size
config["theme"]["scale"] = _current_scale config["theme"]["scale"] = _current_scale
def load_from_config(config: dict): def load_from_config(config: dict) -> None:
"""Read [theme] from config and apply palette + scale. Font is handled separately at startup.""" """Read [theme] from config and apply palette + scale. Font is handled separately at startup."""
global _current_font_path, _current_font_size, _current_scale, _current_palette global _current_font_path, _current_font_size, _current_scale, _current_palette
t = config.get("theme", {}) t = config.get("theme", {})
@@ -248,7 +248,7 @@ def load_from_config(config: dict):
# Don't apply here — imgui context may not exist yet. # Don't apply here — imgui context may not exist yet.
# Call apply_current() after imgui is initialised. # Call apply_current() after imgui is initialised.
def apply_current(): def apply_current() -> None:
"""Apply the loaded palette and scale. Call after imgui context exists.""" """Apply the loaded palette and scale. Call after imgui context exists."""
apply(_current_palette) apply(_current_palette)
set_scale(_current_scale) set_scale(_current_scale)