diff --git a/TASKS.md b/TASKS.md index d82359e..9ebf4f7 100644 --- a/TASKS.md +++ b/TASKS.md @@ -9,11 +9,18 @@ ### 1. AI-Optimized Python Style Refactor **Track**: `conductor/tracks/python_style_refactor_20260227/` -**Status**: COMPLETE +**Status**: IN_PROGRESS — Phase 4 **Completed**: - Phase 1: Research and Pilot Tooling [checkpoint: c75b926] - Phase 2: Core Refactor - Indentation and Newlines [checkpoint: db65162] - Phase 3: AI-Optimized Metadata and Final Cleanup [checkpoint: 3216e87] +**Remaining in Phase 4** (Codebase-Wide Type Hint Sweep): +- [ ] Core modules (18 files, ~200 items) +- [ ] Variable-only files (ai_client, mcp_client, mma_prompts) +- [ ] Scripts (~15 files) +- [ ] Simulation modules (~10 files) +- [ ] Test files (~80 files, ~400 items) +- [ ] Verification ### 2. Robust Live Simulation Verification **Track**: `conductor/tracks/robust_live_simulation_verification/` diff --git a/api_hooks.py b/api_hooks.py index 4372d48..8ba1d06 100644 --- a/api_hooks.py +++ b/api_hooks.py @@ -15,7 +15,7 @@ class HookServerInstance(ThreadingHTTPServer): class HookHandler(BaseHTTPRequestHandler): """Handles incoming HTTP requests for the API hooks.""" - def do_GET(self): + def do_GET(self) -> None: app = self.server.app session_logger.log_api_hook("GET", self.path, "") if self.path == '/status': @@ -175,7 +175,7 @@ class HookHandler(BaseHTTPRequestHandler): self.send_response(404) self.end_headers() - def do_POST(self): + def do_POST(self) -> None: app = self.server.app content_length = int(self.headers.get('Content-Length', 0)) body = self.rfile.read(content_length) @@ -283,7 +283,7 @@ class HookServer: self.server = None self.thread = None - def start(self): + def start(self) -> None: if self.thread and self.thread.is_alive(): return is_gemini_cli = getattr(self.app, 'current_provider', '') == 'gemini_cli' @@ -309,7 +309,7 @@ class HookServer: self.thread.start() logging.info(f"Hook server started on port {self.port}") - def stop(self): + def stop(self) -> None: if self.server: self.server.shutdown() self.server.server_close() diff --git a/conductor/tests/test_infrastructure.py b/conductor/tests/test_infrastructure.py index 9290e29..e8094bd 100644 --- a/conductor/tests/test_infrastructure.py +++ b/conductor/tests/test_infrastructure.py @@ -18,7 +18,7 @@ def run_ps_script(role, prompt): print(f"\n[Sub-Agent {role} Error]:\n{result.stderr}") return result -def test_subagent_script_qa_live(): +def test_subagent_script_qa_live() -> None: """Verify that the QA role works and returns a compressed fix.""" prompt = "Traceback (most recent call last): File 'test.py', line 1, in 1/0 ZeroDivisionError: division by zero" result = run_ps_script("QA", prompt) @@ -28,7 +28,7 @@ def test_subagent_script_qa_live(): # It should be short (QA agents compress) assert len(result.stdout.split()) < 40 -def test_subagent_script_worker_live(): +def test_subagent_script_worker_live() -> None: """Verify that the Worker role works and returns code.""" prompt = "Write a python function that returns 'hello world'" result = run_ps_script("Worker", prompt) @@ -36,14 +36,14 @@ def test_subagent_script_worker_live(): assert "def" in result.stdout.lower() assert "hello" in result.stdout.lower() -def test_subagent_script_utility_live(): +def test_subagent_script_utility_live() -> None: """Verify that the Utility role works.""" prompt = "Tell me 'True' if 1+1=2, otherwise 'False'" result = run_ps_script("Utility", prompt) assert result.returncode == 0 assert "true" in result.stdout.lower() -def test_subagent_isolation_live(): +def test_subagent_isolation_live() -> None: """Verify that the sub-agent is stateless and does not see the parent's conversation context.""" # This prompt asks the sub-agent about a 'secret' mentioned only here, not in its prompt. prompt = "What is the secret code I just told you? If I didn't tell you, say 'UNKNOWN'." diff --git a/conductor/tests/test_mma_exec.py b/conductor/tests/test_mma_exec.py index 4f5ce04..f382db2 100644 --- a/conductor/tests/test_mma_exec.py +++ b/conductor/tests/test_mma_exec.py @@ -3,7 +3,7 @@ import os from unittest.mock import patch, MagicMock from scripts.mma_exec import create_parser, get_role_documents, execute_agent, get_model_for_role, get_dependencies -def test_parser_role_choices(): +def test_parser_role_choices() -> None: """Test that the parser accepts valid roles and the prompt argument.""" parser = create_parser() valid_roles = ['tier1', 'tier2', 'tier3', 'tier4'] @@ -13,13 +13,13 @@ def test_parser_role_choices(): assert args.role == role assert args.prompt == test_prompt -def test_parser_invalid_role(): +def test_parser_invalid_role() -> None: """Test that the parser rejects roles outside the specified choices.""" parser = create_parser() with pytest.raises(SystemExit): parser.parse_args(['--role', 'tier5', 'Some prompt']) -def test_parser_prompt_optional(): +def test_parser_prompt_optional() -> None: """Test that the prompt argument is optional if role is provided (or handled in main).""" parser = create_parser() # Prompt is now optional (nargs='?') @@ -27,28 +27,28 @@ def test_parser_prompt_optional(): assert args.role == 'tier3' assert args.prompt is None -def test_parser_help(): +def test_parser_help() -> None: """Test that the help flag works without raising errors (exits with 0).""" parser = create_parser() with pytest.raises(SystemExit) as excinfo: parser.parse_args(['--help']) assert excinfo.value.code == 0 -def test_get_role_documents(): +def test_get_role_documents() -> None: """Test that get_role_documents returns the correct documentation paths for each tier.""" assert get_role_documents('tier1') == ['conductor/product.md', 'conductor/product-guidelines.md'] assert get_role_documents('tier2') == ['conductor/tech-stack.md', 'conductor/workflow.md'] assert get_role_documents('tier3') == ['conductor/workflow.md'] assert get_role_documents('tier4') == [] -def test_get_model_for_role(): +def test_get_model_for_role() -> None: """Test that get_model_for_role returns the correct model for each role.""" assert get_model_for_role('tier1-orchestrator') == 'gemini-3.1-pro-preview' assert get_model_for_role('tier2-tech-lead') == 'gemini-2.5-flash-lite' assert get_model_for_role('tier3-worker') == 'gemini-2.5-flash-lite' assert get_model_for_role('tier4-qa') == 'gemini-2.5-flash-lite' -def test_execute_agent(): +def test_execute_agent() -> None: """ Test that execute_agent calls subprocess.run with powershell and the correct gemini CLI arguments including the model specified for the role. diff --git a/conductor/tests/test_mma_skeleton.py b/conductor/tests/test_mma_skeleton.py index 909acba..aaf66c4 100644 --- a/conductor/tests/test_mma_skeleton.py +++ b/conductor/tests/test_mma_skeleton.py @@ -1,7 +1,7 @@ import pytest from scripts.mma_exec import generate_skeleton -def test_generate_skeleton(): +def test_generate_skeleton() -> None: sample_code = ''' class Calculator: """Performs basic math operations.""" diff --git a/conductor/tracks/python_style_refactor_20260227/plan.md b/conductor/tracks/python_style_refactor_20260227/plan.md index c171d62..fc547da 100644 --- a/conductor/tracks/python_style_refactor_20260227/plan.md +++ b/conductor/tracks/python_style_refactor_20260227/plan.md @@ -21,6 +21,14 @@ - [x] Task: Conductor - Update `conductor/code_styleguides/python.md` with the new AI-optimized standard. [602cea6] - [x] Task: Conductor - User Manual Verification 'Phase 3: Metadata and Final Documentation' (Protocol in workflow.md) +## Phase 4: Codebase-Wide Type Hint Sweep +- [ ] Task: Conductor - Type hint pass on core modules (`api_hook_client.py`, `api_hooks.py`, `log_registry.py`, `performance_monitor.py`, `theme.py`, `theme_2.py`, `gemini_cli_adapter.py`, `multi_agent_conductor.py`, `dag_engine.py`, `events.py`, `file_cache.py`, `models.py`, `log_pruner.py`, `gemini.py`, `orchestrator_pm.py`, `conductor_tech_lead.py`, `outline_tool.py`, `summarize.py`) +- [ ] Task: Conductor - Type hint pass on remaining variable-only files (`ai_client.py` vars, `mcp_client.py` vars, `mma_prompts.py` vars) +- [ ] Task: Conductor - Type hint pass on scripts (`scripts/*.py`) +- [ ] Task: Conductor - Type hint pass on simulation modules (`simulation/*.py`) +- [ ] Task: Conductor - Type hint pass on test files (`tests/*.py`, `conductor/tests/*.py`) +- [ ] Task: Conductor - User Manual Verification 'Phase 4: Codebase-Wide Type Hint Sweep' (Protocol in workflow.md) + --- **Protocol Note:** Each task will follow the Standard Task Workflow (Red/Green phases with Tier 3 Worker delegation). Phase completion will trigger the mandatory Verification and Checkpointing protocol. diff --git a/dag_engine.py b/dag_engine.py index e3a3223..de21ae7 100644 --- a/dag_engine.py +++ b/dag_engine.py @@ -7,7 +7,7 @@ class TrackDAG: Provides methods for dependency resolution, cycle detection, and topological sorting. """ - def __init__(self, tickets: List[Ticket]): + def __init__(self, tickets: List[Ticket]) -> None: """ Initializes the TrackDAG with a list of Ticket objects. Args: @@ -99,7 +99,7 @@ class ExecutionEngine: Handles automatic queueing and manual task approval. """ - def __init__(self, dag: TrackDAG, auto_queue: bool = False): + def __init__(self, dag: TrackDAG, auto_queue: bool = False) -> None: """ Initializes the ExecutionEngine. Args: @@ -123,7 +123,7 @@ class ExecutionEngine: ticket.status = "in_progress" return ready - def approve_task(self, task_id: str): + def approve_task(self, task_id: str) -> None: """ Manually transitions a task from 'todo' to 'in_progress' if its dependencies are met. Args: @@ -141,7 +141,7 @@ class ExecutionEngine: if all_done: ticket.status = "in_progress" - def update_task_status(self, task_id: str, status: str): + def update_task_status(self, task_id: str, status: str) -> None: """ Force-updates the status of a specific task. Args: diff --git a/debug_ast_2.py b/debug_ast_2.py index 4dfd877..c6a9591 100644 --- a/debug_ast_2.py +++ b/debug_ast_2.py @@ -2,7 +2,7 @@ import tree_sitter import tree_sitter_python class ASTParser: - def __init__(self, language: str): + def __init__(self, language: str) -> None: self.language = tree_sitter.Language(tree_sitter_python.language()) self.parser = tree_sitter.Parser(self.language) diff --git a/events.py b/events.py index 5d12f37..f3e63e3 100644 --- a/events.py +++ b/events.py @@ -9,11 +9,11 @@ class EventEmitter: Simple event emitter for decoupled communication between modules. """ - def __init__(self): + def __init__(self) -> None: """Initializes the EventEmitter with an empty listener map.""" self._listeners: Dict[str, List[Callable]] = {} - def on(self, event_name: str, callback: Callable): + def on(self, event_name: str, callback: Callable) -> None: """ Registers a callback for a specific event. @@ -25,7 +25,7 @@ class EventEmitter: self._listeners[event_name] = [] self._listeners[event_name].append(callback) - def emit(self, event_name: str, *args: Any, **kwargs: Any): + def emit(self, event_name: str, *args: Any, **kwargs: Any) -> None: """ Emits an event, calling all registered callbacks. @@ -43,11 +43,11 @@ class AsyncEventQueue: Asynchronous event queue for decoupled communication using asyncio.Queue. """ - def __init__(self): + def __init__(self) -> None: """Initializes the AsyncEventQueue with an internal asyncio.Queue.""" self._queue: asyncio.Queue = asyncio.Queue() - async def put(self, event_name: str, payload: Any = None): + async def put(self, event_name: str, payload: Any = None) -> None: """ Puts an event into the queue. @@ -71,7 +71,7 @@ class UserRequestEvent: Payload for a user request event. """ - def __init__(self, prompt: str, stable_md: str, file_items: List[Any], disc_text: str, base_dir: str): + def __init__(self, prompt: str, stable_md: str, file_items: List[Any], disc_text: str, base_dir: str) -> None: self.prompt = prompt self.stable_md = stable_md self.file_items = file_items diff --git a/file_cache.py b/file_cache.py index 7a350b1..3043cf2 100644 --- a/file_cache.py +++ b/file_cache.py @@ -1,4 +1,4 @@ -# file_cache.py +# file_cache.py """ Stub — the Anthropic Files API path has been removed. All context is now sent as inline chunked text via _send_anthropic_chunked. @@ -16,7 +16,7 @@ class ASTParser: Currently supports Python. """ - def __init__(self, language: str): + def __init__(self, language: str) -> None: if language != "python": raise ValueError(f"Language '{language}' not supported yet.") self.language_name = language @@ -141,7 +141,7 @@ class ASTParser: code_bytes[start:end] = bytes(replacement, "utf8") return code_bytes.decode("utf8") -def reset_client(): +def reset_client() -> None: pass def content_block_type(path: Path) -> str: @@ -150,7 +150,7 @@ def content_block_type(path: Path) -> str: def get_file_id(path: Path) -> Optional[str]: return None -def evict(path: Path): +def evict(path: Path) -> None: pass def list_cached() -> list[dict]: diff --git a/gemini.py b/gemini.py index fd1da01..fc8f23c 100644 --- a/gemini.py +++ b/gemini.py @@ -11,12 +11,12 @@ def _load_key() -> str: with open("credentials.toml", "rb") as f: return tomllib.load(f)["gemini"]["api_key"] -def _ensure_client(): +def _ensure_client() -> None: global _client if _client is None: _client = genai.Client(api_key=_load_key()) -def _ensure_chat(): +def _ensure_chat() -> None: global _chat if _chat is None: _ensure_client() @@ -29,7 +29,7 @@ def send(md_content: str, user_message: str) -> str: response = _chat.send_message(full_message) return response.text -def reset_session(): +def reset_session() -> None: global _client, _chat _client = None _chat = None diff --git a/log_pruner.py b/log_pruner.py index e954cc8..0dee89a 100644 --- a/log_pruner.py +++ b/log_pruner.py @@ -10,7 +10,7 @@ class LogPruner: are preserved long-term. """ - def __init__(self, log_registry: LogRegistry, logs_dir: str): + def __init__(self, log_registry: LogRegistry, logs_dir: str) -> None: """ Initializes the LogPruner. @@ -21,7 +21,7 @@ class LogPruner: self.log_registry = log_registry self.logs_dir = logs_dir - def prune(self): + def prune(self) -> None: """ Prunes old and small session directories from the logs directory. diff --git a/log_registry.py b/log_registry.py index b931700..edfd8f2 100644 --- a/log_registry.py +++ b/log_registry.py @@ -20,7 +20,7 @@ class LogRegistry: self.data = {} self.load_registry() - def load_registry(self): + def load_registry(self) -> None: """ Loads the registry data from the TOML file into memory. Handles date/time conversions from TOML-native formats to strings for consistency. @@ -48,7 +48,7 @@ class LogRegistry: else: self.data = {} - def save_registry(self): + def save_registry(self) -> None: """ Serializes and saves the current registry data to the TOML file. Converts internal datetime objects to ISO format strings for compatibility. @@ -151,7 +151,7 @@ class LogRegistry: # Check the top-level 'whitelisted' flag. If it's not set or False, it's not whitelisted. return session_data.get('whitelisted', False) - def update_auto_whitelist_status(self, session_id: str): + def update_auto_whitelist_status(self, session_id: str) -> None: """ Analyzes session logs and updates whitelisting status based on heuristics. Sessions are automatically whitelisted if they contain error keywords, diff --git a/models.py b/models.py index 379c9e4..ad43820 100644 --- a/models.py +++ b/models.py @@ -17,12 +17,12 @@ class Ticket: blocked_reason: Optional[str] = None step_mode: bool = False - def mark_blocked(self, reason: str): + def mark_blocked(self, reason: str) -> None: """Sets the ticket status to 'blocked' and records the reason.""" self.status = "blocked" self.blocked_reason = reason - def mark_complete(self): + def mark_complete(self) -> None: """Sets the ticket status to 'completed'.""" self.status = "completed" diff --git a/multi_agent_conductor.py b/multi_agent_conductor.py index cfa9403..c791ced 100644 --- a/multi_agent_conductor.py +++ b/multi_agent_conductor.py @@ -17,7 +17,7 @@ class ConductorEngine: Orchestrates the execution of tickets within a track. """ - def __init__(self, track: Track, event_queue: Optional[events.AsyncEventQueue] = None, auto_queue: bool = False): + def __init__(self, track: Track, event_queue: Optional[events.AsyncEventQueue] = None, auto_queue: bool = False) -> None: self.track = track self.event_queue = event_queue self.tier_usage = { @@ -29,7 +29,7 @@ class ConductorEngine: self.dag = TrackDAG(self.track.tickets) self.engine = ExecutionEngine(self.dag, auto_queue=auto_queue) - async def _push_state(self, status: str = "running", active_tier: str = None): + async def _push_state(self, status: str = "running", active_tier: str = None) -> None: if not self.event_queue: return payload = { @@ -44,7 +44,7 @@ class ConductorEngine: } await self.event_queue.put("mma_state_update", payload) - def parse_json_tickets(self, json_str: str): + def parse_json_tickets(self, json_str: str) -> None: """ Parses a JSON string of ticket definitions (Godot ECS Flat List format) and populates the Track's ticket list. @@ -73,7 +73,7 @@ class ConductorEngine: except KeyError as e: print(f"Missing required field in ticket definition: {e}") - async def run(self, md_content: str = ""): + async def run(self, md_content: str = "") -> None: """ Main execution loop using the DAG engine. Args: diff --git a/outline_tool.py b/outline_tool.py index 68b9c70..18e2826 100644 --- a/outline_tool.py +++ b/outline_tool.py @@ -2,7 +2,7 @@ import ast from pathlib import Path class CodeOutliner: - def __init__(self): + def __init__(self) -> None: pass def outline(self, code: str) -> str: diff --git a/performance_monitor.py b/performance_monitor.py index afa56a1..529c643 100644 --- a/performance_monitor.py +++ b/performance_monitor.py @@ -3,7 +3,7 @@ import psutil import threading class PerformanceMonitor: - def __init__(self): + def __init__(self) -> None: self._start_time = None self._last_frame_time = 0.0 self._fps = 0.0 @@ -32,7 +32,7 @@ class PerformanceMonitor: self._cpu_thread = threading.Thread(target=self._monitor_cpu, daemon=True) self._cpu_thread.start() - def _monitor_cpu(self): + def _monitor_cpu(self) -> None: while not self._stop_event.is_set(): # psutil.cpu_percent with interval=1.0 is blocking for 1 second. # To be responsive to stop_event, we use a smaller interval or no interval @@ -49,21 +49,21 @@ class PerformanceMonitor: break time.sleep(0.1) - def start_frame(self): + def start_frame(self) -> None: self._start_time = time.time() - def record_input_event(self): + def record_input_event(self) -> None: self._last_input_time = time.time() - def start_component(self, name: str): + def start_component(self, name: str) -> None: self._comp_start[name] = time.time() - def end_component(self, name: str): + def end_component(self, name: str) -> None: if name in self._comp_start: elapsed = (time.time() - self._comp_start[name]) * 1000.0 self._component_timings[name] = elapsed - def end_frame(self): + def end_frame(self) -> None: if self._start_time is None: return end_time = time.time() @@ -80,7 +80,7 @@ class PerformanceMonitor: self._frame_count = 0 self._fps_last_time = end_time - def _check_alerts(self): + def _check_alerts(self) -> None: if not self.alert_callback: return now = time.time() @@ -114,6 +114,6 @@ class PerformanceMonitor: metrics[f'time_{name}_ms'] = elapsed return metrics - def stop(self): + def stop(self) -> None: self._stop_event.set() self._cpu_thread.join(timeout=2.0) diff --git a/reproduce_issue.py b/reproduce_issue.py index ed4e3fa..79e3bb7 100644 --- a/reproduce_issue.py +++ b/reproduce_issue.py @@ -2,7 +2,7 @@ import pytest from models import Ticket from dag_engine import TrackDAG, ExecutionEngine -def test_auto_queue_and_step_mode(): +def test_auto_queue_and_step_mode() -> None: t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", step_mode=True) dag = TrackDAG([t1, t2]) diff --git a/reproduce_missing_hints.py b/reproduce_missing_hints.py index 8b85e34..50fef63 100644 --- a/reproduce_missing_hints.py +++ b/reproduce_missing_hints.py @@ -1,7 +1,7 @@ import subprocess import sys -def test_type_hints(): +def test_type_hints() -> None: files = ["project_manager.py", "session_logger.py"] all_missing = [] for f in files: diff --git a/run_tests.py b/run_tests.py index 591299b..afa3db2 100644 --- a/run_tests.py +++ b/run_tests.py @@ -45,7 +45,7 @@ def get_test_files(manifest: Dict[str, Any], category: str) -> List[str]: print(f"DEBUG: Found test files for category '{category}': {files}", file=sys.stderr) return files -def main(): +def main() -> None: parser = argparse.ArgumentParser( description="Run tests with optional manifest and category filtering, passing additional pytest arguments.", formatter_class=argparse.RawDescriptionHelpFormatter, diff --git a/scripts/claude_mma_exec.py b/scripts/claude_mma_exec.py index 4095706..d19f14d 100644 --- a/scripts/claude_mma_exec.py +++ b/scripts/claude_mma_exec.py @@ -253,7 +253,7 @@ def create_parser(): return parser -def main(): +def main() -> None: parser = create_parser() args = parser.parse_args() role = args.role diff --git a/scripts/claude_tool_bridge.py b/scripts/claude_tool_bridge.py index d27f60c..95f189e 100644 --- a/scripts/claude_tool_bridge.py +++ b/scripts/claude_tool_bridge.py @@ -15,7 +15,7 @@ except ImportError: sys.exit(1) -def main(): +def main() -> None: logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stderr) logging.debug("Claude Tool Bridge script started.") try: diff --git a/scripts/mcp_server.py b/scripts/mcp_server.py index bc5781d..c5fb643 100644 --- a/scripts/mcp_server.py +++ b/scripts/mcp_server.py @@ -78,7 +78,7 @@ async def call_tool(name: str, arguments: dict) -> list[TextContent]: return [TextContent(type="text", text=f"ERROR: {e}")] -async def main(): +async def main() -> None: async with stdio_server() as (read_stream, write_stream): await server.run( read_stream, diff --git a/scripts/mma_exec.py b/scripts/mma_exec.py index 3bcf0bb..86f879e 100644 --- a/scripts/mma_exec.py +++ b/scripts/mma_exec.py @@ -239,7 +239,7 @@ def create_parser(): ) return parser -def main(): +def main() -> None: parser = create_parser() args = parser.parse_args() role = args.role diff --git a/scripts/scan_all_hints.py b/scripts/scan_all_hints.py new file mode 100644 index 0000000..3f7f08f --- /dev/null +++ b/scripts/scan_all_hints.py @@ -0,0 +1,56 @@ +"""Scan all .py files for missing type hints. Writes scan_report.txt.""" +import ast, os + +SKIP = {'.git', '__pycache__', '.venv', 'venv', 'node_modules', '.claude', '.gemini'} +BASE = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +os.chdir(BASE) + +results = {} +for root, dirs, files in os.walk('.'): + dirs[:] = [d for d in dirs if d not in SKIP] + for f in files: + if not f.endswith('.py'): + continue + path = os.path.join(root, f).replace('\\', '/') + try: + with open(path, 'r', encoding='utf-8-sig') as fh: + tree = ast.parse(fh.read()) + except Exception: + continue + counts = [0, 0, 0] # nr, up, uv + def scan(scope, prefix=''): + for node in ast.iter_child_nodes(scope): + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): + if node.returns is None: + counts[0] += 1 + for arg in node.args.args: + if arg.arg not in ('self', 'cls') and arg.annotation is None: + counts[1] += 1 + if isinstance(node, ast.Assign): + for t in node.targets: + if isinstance(t, ast.Name): + counts[2] += 1 + if isinstance(node, ast.ClassDef): + scan(node, prefix=f'{node.name}.') + scan(tree) + nr, up, uv = counts + total = nr + up + uv + if total > 0: + results[path] = (nr, up, uv, total) + +lines = [] +lines.append(f'Files with untyped items: {len(results)}') +lines.append('') +lines.append(f'{"File":<58} {"NoRet":>6} {"Params":>7} {"Vars":>5} {"Total":>6}') +lines.append('-' * 85) +gt = 0 +for path in sorted(results, key=lambda x: results[x][3], reverse=True): + nr, up, uv, t = results[path] + lines.append(f'{path:<58} {nr:>6} {up:>7} {uv:>5} {t:>6}') + gt += t +lines.append('-' * 85) +lines.append(f'{"TOTAL":<58} {"":>6} {"":>7} {"":>5} {gt:>6}') + +report = '\n'.join(lines) +with open('scan_report.txt', 'w', encoding='utf-8') as f: + f.write(report) diff --git a/scripts/tool_call.py b/scripts/tool_call.py index 0ef4c00..e20fdd5 100644 --- a/scripts/tool_call.py +++ b/scripts/tool_call.py @@ -17,7 +17,7 @@ except ImportError: print(json.dumps({"error": "Failed to import required modules"})) sys.exit(1) -def main(): +def main() -> None: if len(sys.argv) < 2: print(json.dumps({"error": "No tool name provided"})) sys.exit(1) diff --git a/scripts/tool_discovery.py b/scripts/tool_discovery.py index 8a8945a..0b2cd321 100644 --- a/scripts/tool_discovery.py +++ b/scripts/tool_discovery.py @@ -13,7 +13,7 @@ except ImportError as e: print("[]") sys.exit(0) -def main(): +def main() -> None: specs = list(mcp_client.MCP_TOOL_SPECS) # Add run_powershell (manually define to match ai_client.py) specs.append({ diff --git a/simulation/live_walkthrough.py b/simulation/live_walkthrough.py index 5263a5d..bbc0ef5 100644 --- a/simulation/live_walkthrough.py +++ b/simulation/live_walkthrough.py @@ -5,7 +5,7 @@ import random from api_hook_client import ApiHookClient from simulation.workflow_sim import WorkflowSimulator -def main(): +def main() -> None: client = ApiHookClient() print("=== Manual Slop: Live UX Walkthrough ===") print("Connecting to GUI...") diff --git a/simulation/sim_ai_settings.py b/simulation/sim_ai_settings.py index 213693f..36048bb 100644 --- a/simulation/sim_ai_settings.py +++ b/simulation/sim_ai_settings.py @@ -4,7 +4,7 @@ import time from simulation.sim_base import BaseSimulation, run_sim class AISettingsSimulation(BaseSimulation): - def run(self): + def run(self) -> None: print("\n--- Running AI Settings Simulation (Gemini Only) ---") # 1. Verify initial model provider = self.client.get_value("current_provider") diff --git a/simulation/sim_base.py b/simulation/sim_base.py index 3f7b945..fddbe55 100644 --- a/simulation/sim_base.py +++ b/simulation/sim_base.py @@ -9,7 +9,7 @@ from simulation.workflow_sim import WorkflowSimulator sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) class BaseSimulation: - def __init__(self, client: ApiHookClient = None): + def __init__(self, client: ApiHookClient = None) -> None: if client is None: self.client = ApiHookClient() else: @@ -36,7 +36,7 @@ class BaseSimulation: self.client.set_value("current_model", "gemini-2.5-flash-lite") time.sleep(0.2) - def teardown(self): + def teardown(self) -> None: if self.project_path and os.path.exists(self.project_path): # We keep it for debugging if it failed, but usually we'd clean up # os.remove(self.project_path) diff --git a/simulation/sim_context.py b/simulation/sim_context.py index 0fa6e45..68a0644 100644 --- a/simulation/sim_context.py +++ b/simulation/sim_context.py @@ -4,7 +4,7 @@ import time from simulation.sim_base import BaseSimulation, run_sim class ContextSimulation(BaseSimulation): - def run(self): + def run(self) -> None: print("\n--- Running Context & Chat Simulation ---") # 1. Test Discussion Creation disc_name = f"TestDisc_{int(time.time())}" diff --git a/simulation/sim_execution.py b/simulation/sim_execution.py index 0abd19d..d4cf17e 100644 --- a/simulation/sim_execution.py +++ b/simulation/sim_execution.py @@ -9,7 +9,7 @@ class ExecutionSimulation(BaseSimulation): if os.path.exists("hello.ps1"): os.remove("hello.ps1") - def run(self): + def run(self) -> None: print("\n--- Running Execution & Modals Simulation ---") # 1. Trigger script generation (Async so we don't block on the wait loop) msg = "Create a hello.ps1 script that prints 'Simulation Test' and execute it." diff --git a/simulation/sim_tools.py b/simulation/sim_tools.py index ff297d1..e57e6db 100644 --- a/simulation/sim_tools.py +++ b/simulation/sim_tools.py @@ -4,7 +4,7 @@ import time from simulation.sim_base import BaseSimulation, run_sim class ToolsSimulation(BaseSimulation): - def run(self): + def run(self) -> None: print("\n--- Running Tools Simulation ---") # 1. Trigger list_directory tool msg = "List the files in the current directory." diff --git a/simulation/workflow_sim.py b/simulation/workflow_sim.py index 24c419c..8c7cd4f 100644 --- a/simulation/workflow_sim.py +++ b/simulation/workflow_sim.py @@ -4,7 +4,7 @@ from api_hook_client import ApiHookClient from simulation.user_agent import UserSimAgent class WorkflowSimulator: - def __init__(self, hook_client: ApiHookClient): + def __init__(self, hook_client: ApiHookClient) -> None: self.client = hook_client self.user_agent = UserSimAgent(hook_client) @@ -30,7 +30,7 @@ class WorkflowSimulator: self.client.select_list_item("disc_listbox", name) time.sleep(1) - def load_prior_log(self): + def load_prior_log(self) -> None: print("Loading prior log") self.client.click("btn_load_log") # This usually opens a file dialog which we can't easily automate from here diff --git a/test_mma_persistence.py b/test_mma_persistence.py index a4aefc7..a3154f6 100644 --- a/test_mma_persistence.py +++ b/test_mma_persistence.py @@ -6,12 +6,12 @@ import project_manager from models import Track, Ticket class TestMMAPersistence(unittest.TestCase): - def test_default_project_has_mma(self): + def test_default_project_has_mma(self) -> None: proj = project_manager.default_project("test") self.assertIn("mma", proj) self.assertEqual(proj["mma"], {"epic": "", "active_track_id": "", "tracks": []}) - def test_save_load_mma(self): + def test_save_load_mma(self) -> None: proj = project_manager.default_project("test") proj["mma"] = {"epic": "Test Epic", "tracks": [{"id": "track_1"}]} test_file = Path("test_mma_proj.toml") diff --git a/tests/conftest.py b/tests/conftest.py index 7870b50..849f3b8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,7 +14,7 @@ from api_hook_client import ApiHookClient import ai_client @pytest.fixture(autouse=True) -def reset_ai_client(): +def reset_ai_client() -> None: """Reset ai_client global state between every test to prevent state pollution.""" ai_client.reset_session() # Default to a safe model @@ -41,7 +41,7 @@ def kill_process_tree(pid): print(f"[Fixture] Error killing process tree {pid}: {e}") @pytest.fixture(scope="session") -def live_gui(): +def live_gui() -> None: """ Session-scoped fixture that starts gui_2.py with --enable-test-hooks. """ diff --git a/tests/test_ai_client_cli.py b/tests/test_ai_client_cli.py index 8faa087..a3f0f54 100644 --- a/tests/test_ai_client_cli.py +++ b/tests/test_ai_client_cli.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import MagicMock, patch import ai_client -def test_ai_client_send_gemini_cli(): +def test_ai_client_send_gemini_cli() -> None: """ Verifies that 'ai_client.send' correctly interacts with 'GeminiCliAdapter' when the 'gemini_cli' provider is specified. diff --git a/tests/test_ai_client_list_models.py b/tests/test_ai_client_list_models.py index 92198ab..70adb45 100644 --- a/tests/test_ai_client_list_models.py +++ b/tests/test_ai_client_list_models.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch, MagicMock import ai_client -def test_list_models_gemini_cli(): +def test_list_models_gemini_cli() -> None: """ Verifies that 'ai_client.list_models' correctly returns a list of models for the 'gemini_cli' provider. diff --git a/tests/test_ai_style_formatter.py b/tests/test_ai_style_formatter.py index 67ec82b..202569b 100644 --- a/tests/test_ai_style_formatter.py +++ b/tests/test_ai_style_formatter.py @@ -2,7 +2,7 @@ import pytest import textwrap from scripts.ai_style_formatter import format_code -def test_basic_indentation(): +def test_basic_indentation() -> None: source = textwrap.dedent("""\ def hello(): print("world") @@ -17,7 +17,7 @@ def test_basic_indentation(): ) assert format_code(source) == expected -def test_top_level_blank_lines(): +def test_top_level_blank_lines() -> None: source = textwrap.dedent("""\ def a(): pass @@ -35,7 +35,7 @@ def test_top_level_blank_lines(): ) assert format_code(source) == expected -def test_inner_blank_lines(): +def test_inner_blank_lines() -> None: source = textwrap.dedent("""\ def a(): print("start") @@ -49,7 +49,7 @@ def test_inner_blank_lines(): ) assert format_code(source) == expected -def test_multiline_string_safety(): +def test_multiline_string_safety() -> None: source = textwrap.dedent("""\ def a(): ''' @@ -72,7 +72,7 @@ def test_multiline_string_safety(): assert " This is a multiline" in result assert result.startswith("def a():\n '''") -def test_continuation_indentation(): +def test_continuation_indentation() -> None: source = textwrap.dedent("""\ def long_func( a, @@ -95,7 +95,7 @@ def test_continuation_indentation(): ) assert format_code(source) == expected -def test_multiple_top_level_definitions(): +def test_multiple_top_level_definitions() -> None: source = textwrap.dedent("""\ class MyClass: def __init__(self): diff --git a/tests/test_api_events.py b/tests/test_api_events.py index 4fadfb9..22d7494 100644 --- a/tests/test_api_events.py +++ b/tests/test_api_events.py @@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch import ai_client class MockUsage: - def __init__(self): + def __init__(self) -> None: self.prompt_token_count = 10 self.candidates_token_count = 5 self.total_token_count = 15 @@ -28,13 +28,13 @@ def test_ai_client_event_emitter_exists(): # This should fail initially because 'events' won't exist on ai_client assert hasattr(ai_client, 'events') -def test_event_emission(): +def test_event_emission() -> None: callback = MagicMock() ai_client.events.on("test_event", callback) ai_client.events.emit("test_event", payload={"data": 123}) callback.assert_called_once_with(payload={"data": 123}) -def test_send_emits_events(): +def test_send_emits_events() -> None: with patch("ai_client._send_gemini") as mock_send_gemini, \ patch("ai_client._send_anthropic") as mock_send_anthropic: mock_send_gemini.return_value = "gemini response" @@ -50,7 +50,7 @@ def test_send_emits_events(): # Let's mock _gemini_client instead to let _send_gemini run and emit events. pass -def test_send_emits_events_proper(): +def test_send_emits_events_proper() -> None: with patch("ai_client._ensure_gemini_client"), \ patch("ai_client._gemini_client") as mock_client: mock_chat = MagicMock() @@ -70,7 +70,7 @@ def test_send_emits_events_proper(): args, kwargs = start_callback.call_args assert kwargs['payload']['provider'] == 'gemini' -def test_send_emits_tool_events(): +def test_send_emits_tool_events() -> None: import mcp_client with patch("ai_client._ensure_gemini_client"), \ patch("ai_client._gemini_client") as mock_client, \ diff --git a/tests/test_api_hook_client.py b/tests/test_api_hook_client.py index b4c3415..6a20fbe 100644 --- a/tests/test_api_hook_client.py +++ b/tests/test_api_hook_client.py @@ -56,7 +56,7 @@ def test_get_performance_success(live_gui): response = client.get_performance() assert "performance" in response -def test_unsupported_method_error(): +def test_unsupported_method_error() -> None: """ Test that calling an unsupported HTTP method raises a ValueError. """ @@ -64,7 +64,7 @@ def test_unsupported_method_error(): with pytest.raises(ValueError, match="Unsupported HTTP method"): client._make_request('PUT', '/some_endpoint', data={'key': 'value'}) -def test_get_text_value(): +def test_get_text_value() -> None: """ Test retrieval of string representation using get_text_value. """ @@ -74,7 +74,7 @@ def test_get_text_value(): with patch.object(client, 'get_value', return_value=None): assert client.get_text_value("dummy_tag") is None -def test_get_node_status(): +def test_get_node_status() -> None: """ Test retrieval of DAG node status using get_node_status. """ diff --git a/tests/test_api_hook_extensions.py b/tests/test_api_hook_extensions.py index 4b6e868..7f109ba 100644 --- a/tests/test_api_hook_extensions.py +++ b/tests/test_api_hook_extensions.py @@ -7,7 +7,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from api_hook_client import ApiHookClient -def test_api_client_has_extensions(): +def test_api_client_has_extensions() -> None: client = ApiHookClient() # These should fail initially as they are not implemented assert hasattr(client, 'select_tab') @@ -33,7 +33,7 @@ def test_get_indicator_state_integration(live_gui): assert 'shown' in response assert response['tag'] == "thinking_indicator" -def test_app_processes_new_actions(): +def test_app_processes_new_actions() -> None: import gui_legacy from unittest.mock import MagicMock, patch import dearpygui.dearpygui as dpg diff --git a/tests/test_ast_parser.py b/tests/test_ast_parser.py index a79f6bb..0bfac66 100644 --- a/tests/test_ast_parser.py +++ b/tests/test_ast_parser.py @@ -2,12 +2,12 @@ import pytest import tree_sitter from file_cache import ASTParser -def test_ast_parser_initialization(): +def test_ast_parser_initialization() -> None: """Verify that ASTParser can be initialized with a language string.""" parser = ASTParser("python") assert parser.language_name == "python" -def test_ast_parser_parse(): +def test_ast_parser_parse() -> None: """Verify that the parse method returns a tree_sitter.Tree.""" parser = ASTParser("python") code = """def example_func(): @@ -17,7 +17,7 @@ def test_ast_parser_parse(): # Basic check that it parsed something assert tree.root_node.type == "module" -def test_ast_parser_get_skeleton_python(): +def test_ast_parser_get_skeleton_python() -> None: """Verify that get_skeleton replaces function bodies with '...' while preserving docstrings.""" parser = ASTParser("python") code = ''' @@ -51,14 +51,14 @@ class MyClass: assert "return result" not in skeleton assert 'print("doing something")' not in skeleton -def test_ast_parser_invalid_language(): +def test_ast_parser_invalid_language() -> None: """Verify handling of unsupported or invalid languages.""" # This might raise an error or return a default, depending on implementation # For now, we expect it to either fail gracefully or raise an exception we can catch with pytest.raises(Exception): ASTParser("not-a-language") -def test_ast_parser_get_curated_view(): +def test_ast_parser_get_curated_view() -> None: """Verify that get_curated_view preserves function bodies with @core_logic or # [HOT].""" parser = ASTParser("python") code = ''' diff --git a/tests/test_ast_parser_curated.py b/tests/test_ast_parser_curated.py index 8ef86dd..2ee0ea6 100644 --- a/tests/test_ast_parser_curated.py +++ b/tests/test_ast_parser_curated.py @@ -1,7 +1,7 @@ import pytest from file_cache import ASTParser -def test_ast_parser_get_curated_view(): +def test_ast_parser_get_curated_view() -> None: parser = ASTParser("python") code = ''' @core_logic diff --git a/tests/test_async_events.py b/tests/test_async_events.py index 45a85f1..3450074 100644 --- a/tests/test_async_events.py +++ b/tests/test_async_events.py @@ -2,7 +2,7 @@ import asyncio import pytest from events import AsyncEventQueue -def test_async_event_queue_put_get(): +def test_async_event_queue_put_get() -> None: """Verify that an event can be asynchronously put and retrieved from the queue.""" async def run_test(): @@ -15,7 +15,7 @@ def test_async_event_queue_put_get(): assert ret_payload == payload asyncio.run(run_test()) -def test_async_event_queue_multiple(): +def test_async_event_queue_multiple() -> None: """Verify that multiple events can be asynchronously put and retrieved in order.""" async def run_test(): @@ -30,7 +30,7 @@ def test_async_event_queue_multiple(): assert val2 == 2 asyncio.run(run_test()) -def test_async_event_queue_none_payload(): +def test_async_event_queue_none_payload() -> None: """Verify that an event with None payload works correctly.""" async def run_test(): diff --git a/tests/test_cli_tool_bridge.py b/tests/test_cli_tool_bridge.py index 32409f3..e741d51 100644 --- a/tests/test_cli_tool_bridge.py +++ b/tests/test_cli_tool_bridge.py @@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from scripts.cli_tool_bridge import main class TestCliToolBridge(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: os.environ['GEMINI_CLI_HOOK_CONTEXT'] = 'manual_slop' self.tool_call = { 'tool_name': 'read_file', diff --git a/tests/test_cli_tool_bridge_mapping.py b/tests/test_cli_tool_bridge_mapping.py index da446fa..75bb7e9 100644 --- a/tests/test_cli_tool_bridge_mapping.py +++ b/tests/test_cli_tool_bridge_mapping.py @@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from scripts.cli_tool_bridge import main class TestCliToolBridgeMapping(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: os.environ['GEMINI_CLI_HOOK_CONTEXT'] = 'manual_slop' @patch('sys.stdin', new_callable=io.StringIO) diff --git a/tests/test_conductor_api_hook_integration.py b/tests/test_conductor_api_hook_integration.py index ff9fc1a..1fb10fa 100644 --- a/tests/test_conductor_api_hook_integration.py +++ b/tests/test_conductor_api_hook_integration.py @@ -55,7 +55,7 @@ def test_conductor_handles_api_hook_failure(live_gui): assert results["verification_successful"] is False assert "failed" in results["verification_message"] -def test_conductor_handles_api_hook_connection_error(): +def test_conductor_handles_api_hook_connection_error() -> None: """ Verify Conductor handles a simulated API hook connection error (server down). """ diff --git a/tests/test_conductor_engine.py b/tests/test_conductor_engine.py index c18b2c2..68b4eac 100644 --- a/tests/test_conductor_engine.py +++ b/tests/test_conductor_engine.py @@ -6,7 +6,7 @@ import ai_client # These tests define the expected interface for multi_agent_conductor.py # which will be implemented in the next phase of TDD. -def test_conductor_engine_initialization(): +def test_conductor_engine_initialization() -> None: """ Test that ConductorEngine can be initialized with a Track. """ diff --git a/tests/test_conductor_tech_lead.py b/tests/test_conductor_tech_lead.py index 38dc9cb..ec3c539 100644 --- a/tests/test_conductor_tech_lead.py +++ b/tests/test_conductor_tech_lead.py @@ -48,12 +48,12 @@ class TestConductorTechLead(unittest.TestCase): self.assertEqual(tickets, []) class TestTopologicalSort(unittest.TestCase): - def test_topological_sort_empty(self): + def test_topological_sort_empty(self) -> None: tickets = [] sorted_tickets = conductor_tech_lead.topological_sort(tickets) self.assertEqual(sorted_tickets, []) - def test_topological_sort_linear(self): + def test_topological_sort_linear(self) -> None: tickets = [ {"id": "t2", "depends_on": ["t1"]}, {"id": "t1", "depends_on": []}, @@ -82,7 +82,7 @@ class TestTopologicalSort(unittest.TestCase): self.assertEqual(ids[-1], "t4") self.assertSetEqual(set(ids[1:3]), {"t2", "t3"}) - def test_topological_sort_cycle(self): + def test_topological_sort_cycle(self) -> None: tickets = [ {"id": "t1", "depends_on": ["t2"]}, {"id": "t2", "depends_on": ["t1"]}, diff --git a/tests/test_dag_engine.py b/tests/test_dag_engine.py index 892b7ca..07b1765 100644 --- a/tests/test_dag_engine.py +++ b/tests/test_dag_engine.py @@ -2,7 +2,7 @@ import pytest from models import Ticket from dag_engine import TrackDAG -def test_get_ready_tasks_linear(): +def test_get_ready_tasks_linear() -> None: t1 = Ticket(id="T1", description="Task 1", status="completed", assigned_to="worker") t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T2"]) @@ -11,7 +11,7 @@ def test_get_ready_tasks_linear(): assert len(ready) == 1 assert ready[0].id == "T2" -def test_get_ready_tasks_branching(): +def test_get_ready_tasks_branching() -> None: t1 = Ticket(id="T1", description="Task 1", status="completed", assigned_to="worker") t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T1"]) @@ -21,19 +21,19 @@ def test_get_ready_tasks_branching(): ready_ids = {t.id for t in ready} assert ready_ids == {"T2", "T3"} -def test_has_cycle_no_cycle(): +def test_has_cycle_no_cycle() -> None: t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) dag = TrackDAG([t1, t2]) assert not dag.has_cycle() -def test_has_cycle_direct_cycle(): +def test_has_cycle_direct_cycle() -> None: t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", depends_on=["T2"]) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) dag = TrackDAG([t1, t2]) assert dag.has_cycle() -def test_has_cycle_indirect_cycle(): +def test_has_cycle_indirect_cycle() -> None: t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", depends_on=["T2"]) t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T3"]) t3 = Ticket(id="T3", description="Task 3", status="todo", assigned_to="worker", depends_on=["T1"]) @@ -49,7 +49,7 @@ def test_has_cycle_complex_no_cycle(): dag = TrackDAG([t1, t2, t3, t4]) assert not dag.has_cycle() -def test_get_ready_tasks_multiple_deps(): +def test_get_ready_tasks_multiple_deps() -> None: t1 = Ticket(id="T1", description="T1", status="completed", assigned_to="worker") t2 = Ticket(id="T2", description="T2", status="completed", assigned_to="worker") t3 = Ticket(id="T3", description="T3", status="todo", assigned_to="worker", depends_on=["T1", "T2"]) @@ -58,7 +58,7 @@ def test_get_ready_tasks_multiple_deps(): t2.status = "todo" assert [t.id for t in dag.get_ready_tasks()] == ["T2"] -def test_topological_sort(): +def test_topological_sort() -> None: t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="worker") t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="worker", depends_on=["T1"]) t3 = Ticket(id="T3", description="T3", status="todo", assigned_to="worker", depends_on=["T2"]) @@ -66,7 +66,7 @@ def test_topological_sort(): sort = dag.topological_sort() assert sort == ["T1", "T2", "T3"] -def test_topological_sort_cycle(): +def test_topological_sort_cycle() -> None: t1 = Ticket(id="T1", description="T1", status="todo", assigned_to="worker", depends_on=["T2"]) t2 = Ticket(id="T2", description="T2", status="todo", assigned_to="worker", depends_on=["T1"]) dag = TrackDAG([t1, t2]) diff --git a/tests/test_deepseek_infra.py b/tests/test_deepseek_infra.py index 31f759d..3ad3742 100644 --- a/tests/test_deepseek_infra.py +++ b/tests/test_deepseek_infra.py @@ -24,7 +24,7 @@ def test_credentials_error_mentions_deepseek(monkeypatch): assert "[deepseek]" in err_msg assert "api_key" in err_msg -def test_default_project_includes_reasoning_role(): +def test_default_project_includes_reasoning_role() -> None: """ Verify that 'Reasoning' is included in the default discussion roles to support DeepSeek-R1 reasoning traces. @@ -33,14 +33,14 @@ def test_default_project_includes_reasoning_role(): roles = proj["discussion"]["roles"] assert "Reasoning" in roles -def test_gui_providers_list(): +def test_gui_providers_list() -> None: """ Check if 'deepseek' is in the GUI's provider list. """ import gui_2 assert "deepseek" in gui_2.PROVIDERS -def test_deepseek_model_listing(): +def test_deepseek_model_listing() -> None: """ Verify that list_models for deepseek returns expected models. """ diff --git a/tests/test_deepseek_provider.py b/tests/test_deepseek_provider.py index 70aa637..8aedef2 100644 --- a/tests/test_deepseek_provider.py +++ b/tests/test_deepseek_provider.py @@ -2,7 +2,7 @@ import pytest from unittest.mock import patch, MagicMock import ai_client -def test_deepseek_model_selection(): +def test_deepseek_model_selection() -> None: """ Verifies that ai_client.set_provider('deepseek', 'deepseek-chat') correctly updates the internal state. """ @@ -10,7 +10,7 @@ def test_deepseek_model_selection(): assert ai_client._provider == "deepseek" assert ai_client._model == "deepseek-chat" -def test_deepseek_completion_logic(): +def test_deepseek_completion_logic() -> None: """ Verifies that ai_client.send() correctly calls the DeepSeek API and returns content. """ @@ -30,7 +30,7 @@ def test_deepseek_completion_logic(): assert result == "DeepSeek Response" assert mock_post.called -def test_deepseek_reasoning_logic(): +def test_deepseek_reasoning_logic() -> None: """ Verifies that reasoning_content is captured and wrapped in tags. """ @@ -54,7 +54,7 @@ def test_deepseek_reasoning_logic(): assert "\nChain of thought\n" in result assert "Final Answer" in result -def test_deepseek_tool_calling(): +def test_deepseek_tool_calling() -> None: """ Verifies that DeepSeek provider correctly identifies and executes tool calls. """ @@ -103,7 +103,7 @@ def test_deepseek_tool_calling(): assert mock_dispatch.call_args[0][0] == "read_file" assert mock_dispatch.call_args[0][1] == {"path": "test.txt"} -def test_deepseek_streaming(): +def test_deepseek_streaming() -> None: """ Verifies that DeepSeek provider correctly aggregates streaming chunks. """ diff --git a/tests/test_execution_engine.py b/tests/test_execution_engine.py index 7ec9c60..d3356f5 100644 --- a/tests/test_execution_engine.py +++ b/tests/test_execution_engine.py @@ -39,13 +39,13 @@ def test_execution_engine_basic_flow(): ready = engine.tick() assert len(ready) == 0 -def test_execution_engine_update_nonexistent_task(): +def test_execution_engine_update_nonexistent_task() -> None: dag = TrackDAG([]) engine = ExecutionEngine(dag) # Should not raise error, or handle gracefully engine.update_task_status("NONEXISTENT", "completed") -def test_execution_engine_status_persistence(): +def test_execution_engine_status_persistence() -> None: t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") dag = TrackDAG([t1]) engine = ExecutionEngine(dag) @@ -54,7 +54,7 @@ def test_execution_engine_status_persistence(): ready = engine.tick() assert len(ready) == 0 # Only 'todo' tasks should be returned by tick() if they are ready -def test_execution_engine_auto_queue(): +def test_execution_engine_auto_queue() -> None: t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") t2 = Ticket(id="T2", description="Task 2", status="todo", assigned_to="worker", depends_on=["T1"]) dag = TrackDAG([t1, t2]) @@ -76,7 +76,7 @@ def test_execution_engine_auto_queue(): assert ready[0].id == "T2" assert t2.status == "in_progress" -def test_execution_engine_step_mode(): +def test_execution_engine_step_mode() -> None: t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker", step_mode=True) dag = TrackDAG([t1]) engine = ExecutionEngine(dag, auto_queue=True) @@ -92,7 +92,7 @@ def test_execution_engine_step_mode(): ready = engine.tick() assert len(ready) == 0 -def test_execution_engine_approve_task(): +def test_execution_engine_approve_task() -> None: t1 = Ticket(id="T1", description="Task 1", status="todo", assigned_to="worker") dag = TrackDAG([t1]) engine = ExecutionEngine(dag, auto_queue=False) diff --git a/tests/test_gemini_cli_adapter.py b/tests/test_gemini_cli_adapter.py index c79ea0b..49a4aa9 100644 --- a/tests/test_gemini_cli_adapter.py +++ b/tests/test_gemini_cli_adapter.py @@ -12,7 +12,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from gemini_cli_adapter import GeminiCliAdapter class TestGeminiCliAdapter(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.adapter = GeminiCliAdapter(binary_path="gemini") @patch('subprocess.Popen') diff --git a/tests/test_gemini_cli_adapter_parity.py b/tests/test_gemini_cli_adapter_parity.py index d203e46..9ac0fcf 100644 --- a/tests/test_gemini_cli_adapter_parity.py +++ b/tests/test_gemini_cli_adapter_parity.py @@ -15,7 +15,7 @@ from gemini_cli_adapter import GeminiCliAdapter class TestGeminiCliAdapterParity(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: """Set up a fresh adapter instance and reset session state for each test.""" # Patch session_logger to prevent file operations during tests self.session_logger_patcher = patch('gemini_cli_adapter.session_logger') @@ -25,7 +25,7 @@ class TestGeminiCliAdapterParity(unittest.TestCase): self.adapter.last_usage = None self.adapter.last_latency = 0.0 - def tearDown(self): + def tearDown(self) -> None: self.session_logger_patcher.stop() @patch('subprocess.Popen') diff --git a/tests/test_gemini_cli_parity_regression.py b/tests/test_gemini_cli_parity_regression.py index 5451505..5c75b23 100644 --- a/tests/test_gemini_cli_parity_regression.py +++ b/tests/test_gemini_cli_parity_regression.py @@ -9,7 +9,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) import ai_client @pytest.fixture(autouse=True) -def setup_ai_client(): +def setup_ai_client() -> None: ai_client.reset_session() ai_client.set_provider("gemini_cli", "gemini-2.5-flash") ai_client.confirm_and_run_callback = lambda script, base_dir: "Mocked execution" diff --git a/tests/test_gemini_metrics.py b/tests/test_gemini_metrics.py index 7c44396..f5ecd5a 100644 --- a/tests/test_gemini_metrics.py +++ b/tests/test_gemini_metrics.py @@ -9,7 +9,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) # Import the necessary functions from ai_client, including the reset helper from ai_client import get_gemini_cache_stats, reset_session -def test_get_gemini_cache_stats_with_mock_client(): +def test_get_gemini_cache_stats_with_mock_client() -> None: """ Test that get_gemini_cache_stats correctly processes cache lists from a mocked client instance. diff --git a/tests/test_gui2_events.py b/tests/test_gui2_events.py index 89a6c2e..5e386ea 100644 --- a/tests/test_gui2_events.py +++ b/tests/test_gui2_events.py @@ -5,7 +5,7 @@ import ai_client from events import EventEmitter @pytest.fixture -def app_instance(): +def app_instance() -> None: """ Fixture to create an instance of the gui_2.App class for testing. It mocks functions that would render a window or block execution. diff --git a/tests/test_gui2_layout.py b/tests/test_gui2_layout.py index b36aa52..4408c48 100644 --- a/tests/test_gui2_layout.py +++ b/tests/test_gui2_layout.py @@ -3,7 +3,7 @@ from unittest.mock import patch from gui_2 import App @pytest.fixture -def app_instance(): +def app_instance() -> None: with ( patch('gui_2.load_config', return_value={'gui': {'show_windows': {}}}), patch('gui_2.save_config'), diff --git a/tests/test_gui2_mcp.py b/tests/test_gui2_mcp.py index 732d539..cdf719d 100644 --- a/tests/test_gui2_mcp.py +++ b/tests/test_gui2_mcp.py @@ -5,7 +5,7 @@ import ai_client from events import EventEmitter @pytest.fixture -def app_instance(): +def app_instance() -> None: if not hasattr(ai_client, 'events') or ai_client.events is None: ai_client.events = EventEmitter() with ( diff --git a/tests/test_gui2_parity.py b/tests/test_gui2_parity.py index ab7e561..ac11375 100644 --- a/tests/test_gui2_parity.py +++ b/tests/test_gui2_parity.py @@ -14,7 +14,7 @@ from api_hook_client import ApiHookClient TEST_CALLBACK_FILE = Path("temp_callback_output.txt") @pytest.fixture(scope="function", autouse=True) -def cleanup_callback_file(): +def cleanup_callback_file() -> None: """Ensures the test callback file is cleaned up before and after each test.""" if TEST_CALLBACK_FILE.exists(): TEST_CALLBACK_FILE.unlink() diff --git a/tests/test_gui2_performance.py b/tests/test_gui2_performance.py index 34b7ca8..235bba4 100644 --- a/tests/test_gui2_performance.py +++ b/tests/test_gui2_performance.py @@ -55,7 +55,7 @@ def test_performance_benchmarking(live_gui): assert avg_fps >= 30, f"{gui_script} FPS {avg_fps:.2f} is below 30 FPS threshold" assert avg_ft <= 33.3, f"{gui_script} Frame time {avg_ft:.2f}ms is above 33.3ms threshold" -def test_performance_parity(): +def test_performance_parity() -> None: """ Compare the metrics collected in the parameterized test_performance_benchmarking. """ diff --git a/tests/test_gui_async_events.py b/tests/test_gui_async_events.py index 31c3212..53da500 100644 --- a/tests/test_gui_async_events.py +++ b/tests/test_gui_async_events.py @@ -50,7 +50,7 @@ def test_handle_generate_send_pushes_event(mock_gui): assert event.disc_text == "disc_text" assert event.base_dir == "." -def test_user_request_event_payload(): +def test_user_request_event_payload() -> None: payload = UserRequestEvent( prompt="hello", stable_md="md", @@ -66,7 +66,7 @@ def test_user_request_event_payload(): assert d["base_dir"] == "." @pytest.mark.asyncio -async def test_async_event_queue(): +async def test_async_event_queue() -> None: from events import AsyncEventQueue q = AsyncEventQueue() await q.put("test_event", {"data": 123}) diff --git a/tests/test_gui_diagnostics.py b/tests/test_gui_diagnostics.py index cc6ccc9..7a8f29b 100644 --- a/tests/test_gui_diagnostics.py +++ b/tests/test_gui_diagnostics.py @@ -12,7 +12,7 @@ spec.loader.exec_module(gui_legacy) from gui_legacy import App @pytest.fixture -def app_instance(): +def app_instance() -> None: dpg.create_context() with patch('dearpygui.dearpygui.create_viewport'), \ patch('dearpygui.dearpygui.setup_dearpygui'), \ diff --git a/tests/test_gui_events.py b/tests/test_gui_events.py index 7f365a4..06d9892 100644 --- a/tests/test_gui_events.py +++ b/tests/test_gui_events.py @@ -7,7 +7,7 @@ from gui_legacy import App import ai_client @pytest.fixture -def app_instance(): +def app_instance() -> None: """ Fixture to create an instance of the App class for testing. It creates a real DPG context but mocks functions that would diff --git a/tests/test_gui_updates.py b/tests/test_gui_updates.py index ea280a1..841fc83 100644 --- a/tests/test_gui_updates.py +++ b/tests/test_gui_updates.py @@ -16,7 +16,7 @@ spec.loader.exec_module(gui_legacy) from gui_legacy import App @pytest.fixture -def app_instance(): +def app_instance() -> None: """ Fixture to create an instance of the App class for testing. It creates a real DPG context but mocks functions that would diff --git a/tests/test_headless_service.py b/tests/test_headless_service.py index b880076..6137534 100644 --- a/tests/test_headless_service.py +++ b/tests/test_headless_service.py @@ -24,7 +24,7 @@ class TestHeadlessAPI(unittest.TestCase): self.api = self.app_instance.create_api() self.client = TestClient(self.api) - def test_health_endpoint(self): + def test_health_endpoint(self) -> None: response = self.client.get("/health") self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), {"status": "ok"}) @@ -42,7 +42,7 @@ class TestHeadlessAPI(unittest.TestCase): response = self.client.get("/status", headers=headers) self.assertEqual(response.status_code, 200) - def test_generate_endpoint(self): + def test_generate_endpoint(self) -> None: payload = { "prompt": "Hello AI" } @@ -100,7 +100,7 @@ class TestHeadlessAPI(unittest.TestCase): if dummy_log.exists(): dummy_log.unlink() - def test_get_context_endpoint(self): + def test_get_context_endpoint(self) -> None: response = self.client.get("/api/v1/context", headers=self.headers) self.assertEqual(response.status_code, 200) data = response.json() @@ -152,14 +152,14 @@ class TestHeadlessStartup(unittest.TestCase): app.run() mock_immapp_run.assert_called_once() -def test_fastapi_installed(): +def test_fastapi_installed() -> None: """Verify that fastapi is installed.""" try: importlib.import_module("fastapi") except ImportError: pytest.fail("fastapi is not installed") -def test_uvicorn_installed(): +def test_uvicorn_installed() -> None: """Verify that uvicorn is installed.""" try: importlib.import_module("uvicorn") diff --git a/tests/test_headless_verification.py b/tests/test_headless_verification.py index ea65f99..033a43f 100644 --- a/tests/test_headless_verification.py +++ b/tests/test_headless_verification.py @@ -6,7 +6,7 @@ import ai_client import json @pytest.mark.asyncio -async def test_headless_verification_full_run(): +async def test_headless_verification_full_run() -> None: """ 1. Initialize a ConductorEngine with a Track containing multiple dependent Tickets. 2. Simulate a full execution run using engine.run_linear(). diff --git a/tests/test_history_management.py b/tests/test_history_management.py index 1b9373d..c4ee010 100644 --- a/tests/test_history_management.py +++ b/tests/test_history_management.py @@ -164,7 +164,7 @@ def test_history_persistence_across_turns(tmp_path): assert len(proj_final["discussion"]["discussions"]["main"]["history"]) == 2 # --- Tests for AI Client History Management --- -def test_get_history_bleed_stats_basic(): +def test_get_history_bleed_stats_basic() -> None: """ Tests basic retrieval of history bleed statistics from the AI client. """ diff --git a/tests/test_hooks.py b/tests/test_hooks.py index 6bac88f..79b7cf9 100644 --- a/tests/test_hooks.py +++ b/tests/test_hooks.py @@ -11,12 +11,12 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from api_hook_client import ApiHookClient import gui_legacy -def test_hooks_enabled_via_cli(): +def test_hooks_enabled_via_cli() -> None: with patch.object(sys, 'argv', ['gui_legacy.py', '--enable-test-hooks']): app = gui_legacy.App() assert app.test_hooks_enabled is True -def test_hooks_disabled_by_default(): +def test_hooks_disabled_by_default() -> None: with patch.object(sys, 'argv', ['gui_legacy.py']): if 'SLOP_TEST_HOOKS' in os.environ: del os.environ['SLOP_TEST_HOOKS'] diff --git a/tests/test_layout_reorganization.py b/tests/test_layout_reorganization.py index 69e230d..ac901c8 100644 --- a/tests/test_layout_reorganization.py +++ b/tests/test_layout_reorganization.py @@ -13,7 +13,7 @@ sys.modules["gui_legacy"] = gui_legacy spec.loader.exec_module(gui_legacy) from gui_legacy import App -def test_new_hubs_defined_in_window_info(): +def test_new_hubs_defined_in_window_info() -> None: """ Verifies that the new consolidated Hub windows are defined in the App's window_info. This ensures they will be available in the 'Windows' menu. diff --git a/tests/test_live_gui_integration.py b/tests/test_live_gui_integration.py index b7b8523..fc903b3 100644 --- a/tests/test_live_gui_integration.py +++ b/tests/test_live_gui_integration.py @@ -7,7 +7,7 @@ from events import UserRequestEvent import ai_client @pytest.fixture -def mock_app(): +def mock_app() -> None: with ( patch('gui_2.load_config', return_value={ "ai": {"provider": "gemini", "model": "model-1", "temperature": 0.0, "max_tokens": 100, "history_trunc_limit": 1000}, diff --git a/tests/test_log_registry.py b/tests/test_log_registry.py index b86bf0b..b5450bf 100644 --- a/tests/test_log_registry.py +++ b/tests/test_log_registry.py @@ -8,7 +8,7 @@ from log_registry import LogRegistry class TestLogRegistry(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: """Set up a temporary directory and registry file for each test.""" self.temp_dir = tempfile.TemporaryDirectory() self.registry_path = os.path.join(self.temp_dir.name, "registry.toml") @@ -19,11 +19,11 @@ class TestLogRegistry(unittest.TestCase): # Instantiate LogRegistry. This will load from the empty file. self.registry = LogRegistry(self.registry_path) - def tearDown(self): + def tearDown(self) -> None: """Clean up the temporary directory and its contents after each test.""" self.temp_dir.cleanup() - def test_instantiation(self): + def test_instantiation(self) -> None: """Test LogRegistry instantiation with a file path.""" self.assertIsInstance(self.registry, LogRegistry) self.assertEqual(self.registry.registry_path, self.registry_path) @@ -31,7 +31,7 @@ class TestLogRegistry(unittest.TestCase): self.assertTrue(os.path.exists(self.registry_path)) # We will verify content in other tests that explicitly save and reload. - def test_register_session(self): + def test_register_session(self) -> None: """Test registering a new session.""" session_id = "session-123" path = "/path/to/session/123" @@ -53,7 +53,7 @@ class TestLogRegistry(unittest.TestCase): reloaded_start_time = datetime.fromisoformat(reloaded_session_data['start_time']) self.assertAlmostEqual(reloaded_start_time, start_time, delta=timedelta(seconds=1)) - def test_update_session_metadata(self): + def test_update_session_metadata(self) -> None: """Test updating session metadata.""" session_id = "session-456" path = "/path/to/session/456" @@ -84,7 +84,7 @@ class TestLogRegistry(unittest.TestCase): self.assertTrue(reloaded_session_data.get('metadata', {}).get('whitelisted', False)) self.assertTrue(reloaded_session_data.get('whitelisted', False)) # Check main flag too - def test_is_session_whitelisted(self): + def test_is_session_whitelisted(self) -> None: """Test checking if a session is whitelisted.""" session_id_whitelisted = "session-789-whitelisted" path_w = "/path/to/session/789" @@ -102,7 +102,7 @@ class TestLogRegistry(unittest.TestCase): # Test for a non-existent session, should be treated as not whitelisted self.assertFalse(self.registry.is_session_whitelisted("non-existent-session")) - def test_get_old_non_whitelisted_sessions(self): + def test_get_old_non_whitelisted_sessions(self) -> None: """Test retrieving old, non-whitelisted sessions.""" now = datetime.utcnow() # Define a cutoff time that is 7 days ago diff --git a/tests/test_mma_models.py b/tests/test_mma_models.py index 32311b7..a07b7e6 100644 --- a/tests/test_mma_models.py +++ b/tests/test_mma_models.py @@ -1,7 +1,7 @@ import pytest from models import Ticket, Track, WorkerContext -def test_ticket_instantiation(): +def test_ticket_instantiation() -> None: """ Verifies that a Ticket can be instantiated with its required fields: id, description, status, assigned_to. @@ -22,7 +22,7 @@ def test_ticket_instantiation(): assert ticket.assigned_to == assigned_to assert ticket.depends_on == [] -def test_ticket_with_dependencies(): +def test_ticket_with_dependencies() -> None: """ Verifies that a Ticket can store dependencies. """ @@ -35,7 +35,7 @@ def test_ticket_with_dependencies(): ) assert ticket.depends_on == ["T1"] -def test_track_instantiation(): +def test_track_instantiation() -> None: """ Verifies that a Track can be instantiated with its required fields: id, description, and a list of Tickets. @@ -56,14 +56,14 @@ def test_track_instantiation(): assert track.tickets[0].id == "T1" assert track.tickets[1].id == "T2" -def test_track_can_handle_empty_tickets(): +def test_track_can_handle_empty_tickets() -> None: """ Verifies that a Track can be instantiated with an empty list of tickets. """ track = Track(id="TRACK-2", description="Empty Track", tickets=[]) assert track.tickets == [] -def test_worker_context_instantiation(): +def test_worker_context_instantiation() -> None: """ Verifies that a WorkerContext can be instantiated with ticket_id, model_name, and messages. @@ -83,7 +83,7 @@ def test_worker_context_instantiation(): assert context.model_name == model_name assert context.messages == messages -def test_ticket_mark_blocked(): +def test_ticket_mark_blocked() -> None: """ Verifies that ticket.mark_blocked(reason) sets the status to 'blocked'. Note: The reason field might need to be added to the Ticket class. @@ -92,7 +92,7 @@ def test_ticket_mark_blocked(): ticket.mark_blocked("Waiting for API key") assert ticket.status == "blocked" -def test_ticket_mark_complete(): +def test_ticket_mark_complete() -> None: """ Verifies that ticket.mark_complete() sets the status to 'completed'. """ @@ -100,7 +100,7 @@ def test_ticket_mark_complete(): ticket.mark_complete() assert ticket.status == "completed" -def test_track_get_executable_tickets(): +def test_track_get_executable_tickets() -> None: """ Verifies that track.get_executable_tickets() returns only 'todo' tickets whose dependencies are all 'completed'. @@ -124,7 +124,7 @@ def test_track_get_executable_tickets(): assert "T6" in executable_ids assert len(executable_ids) == 2 -def test_track_get_executable_tickets_complex(): +def test_track_get_executable_tickets_complex() -> None: """ Verifies executable tickets with complex dependency chains. Chain: T1 (comp) -> T2 (todo) -> T3 (todo) diff --git a/tests/test_mma_orchestration_gui.py b/tests/test_mma_orchestration_gui.py index 22ee6c3..ede4fe4 100644 --- a/tests/test_mma_orchestration_gui.py +++ b/tests/test_mma_orchestration_gui.py @@ -6,7 +6,7 @@ import time from gui_2 import App @pytest.fixture -def app_instance(): +def app_instance() -> None: with ( patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}), patch('gui_2.save_config'), diff --git a/tests/test_mma_prompts.py b/tests/test_mma_prompts.py index 72d8f2d..45c3c6c 100644 --- a/tests/test_mma_prompts.py +++ b/tests/test_mma_prompts.py @@ -1,7 +1,7 @@ import pytest from mma_prompts import PROMPTS -def test_tier1_epic_init_constraints(): +def test_tier1_epic_init_constraints() -> None: prompt = PROMPTS["tier1_epic_init"] assert "Godot ECS Flat List format" in prompt assert "JSON array" in prompt @@ -9,19 +9,19 @@ def test_tier1_epic_init_constraints(): assert "severity" in prompt assert "IGNORE all source code" in prompt -def test_tier1_track_delegation_constraints(): +def test_tier1_track_delegation_constraints() -> None: prompt = PROMPTS["tier1_track_delegation"] assert "Track Brief" in prompt assert "AST Skeleton View" in prompt assert "IGNORE unrelated module docs" in prompt -def test_tier1_macro_merge_constraints(): +def test_tier1_macro_merge_constraints() -> None: prompt = PROMPTS["tier1_macro_merge"] assert "Macro-Merge" in prompt assert "Macro-Diff" in prompt assert "IGNORE Tier 3 trial-and-error" in prompt -def test_tier2_sprint_planning_constraints(): +def test_tier2_sprint_planning_constraints() -> None: prompt = PROMPTS["tier2_sprint_planning"] assert "Tickets" in prompt assert "Godot ECS Flat List format" in prompt @@ -30,20 +30,20 @@ def test_tier2_sprint_planning_constraints(): assert "Skeleton View" in prompt assert "Curated Implementation View" in prompt -def test_tier2_code_review_constraints(): +def test_tier2_code_review_constraints() -> None: prompt = PROMPTS["tier2_code_review"] assert "Code Review" in prompt assert "IGNORE the Contributor's internal trial-and-error" in prompt assert "Tier 4 (QA) logs" in prompt -def test_tier2_track_finalization_constraints(): +def test_tier2_track_finalization_constraints() -> None: prompt = PROMPTS["tier2_track_finalization"] assert "Track Finalization" in prompt assert "Executive Summary" in prompt assert "Macro-Diff" in prompt assert "Dependency Delta" in prompt -def test_tier2_contract_first_constraints(): +def test_tier2_contract_first_constraints() -> None: prompt = PROMPTS["tier2_contract_first"] assert "Stub Ticket" in prompt assert "Consumer Ticket" in prompt diff --git a/tests/test_mma_ticket_actions.py b/tests/test_mma_ticket_actions.py index 32e5880..61fdac4 100644 --- a/tests/test_mma_ticket_actions.py +++ b/tests/test_mma_ticket_actions.py @@ -4,7 +4,7 @@ import asyncio from gui_2 import App @pytest.fixture -def app_instance(): +def app_instance() -> None: with ( patch('gui_2.load_config', return_value={'ai': {}, 'projects': {}}), patch('gui_2.save_config'), diff --git a/tests/test_orchestration_logic.py b/tests/test_orchestration_logic.py index 3221de7..0b04fc2 100644 --- a/tests/test_orchestration_logic.py +++ b/tests/test_orchestration_logic.py @@ -7,7 +7,7 @@ import multi_agent_conductor from models import Track, Ticket @pytest.fixture -def mock_ai_client(): +def mock_ai_client() -> None: with patch("ai_client.send") as mock_send: yield mock_send @@ -40,7 +40,7 @@ def test_generate_tickets(mock_ai_client): assert tickets[1]["id"] == "T-002" assert tickets[1]["depends_on"] == ["T-001"] -def test_topological_sort(): +def test_topological_sort() -> None: tickets = [ {"id": "T-002", "description": "Dep on 001", "depends_on": ["T-001"]}, {"id": "T-001", "description": "Base", "depends_on": []}, @@ -51,7 +51,7 @@ def test_topological_sort(): assert sorted_tickets[1]["id"] == "T-002" assert sorted_tickets[2]["id"] == "T-003" -def test_topological_sort_circular(): +def test_topological_sort_circular() -> None: tickets = [ {"id": "T-001", "depends_on": ["T-002"]}, {"id": "T-002", "depends_on": ["T-001"]} @@ -59,7 +59,7 @@ def test_topological_sort_circular(): with pytest.raises(ValueError, match="Circular dependency detected"): conductor_tech_lead.topological_sort(tickets) -def test_track_executable_tickets(): +def test_track_executable_tickets() -> None: t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="user") t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="user", depends_on=["T1"]) track = Track(id="track_1", description="desc", tickets=[t1, t2]) @@ -73,7 +73,7 @@ def test_track_executable_tickets(): assert executable[0].id == "T2" @pytest.mark.asyncio -async def test_conductor_engine_run_linear(): +async def test_conductor_engine_run_linear() -> None: t1 = Ticket(id="T1", description="desc", status="todo", assigned_to="user") t2 = Ticket(id="T2", description="desc", status="todo", assigned_to="user", depends_on=["T1"]) track = Track(id="track_1", description="desc", tickets=[t1, t2]) @@ -89,7 +89,7 @@ async def test_conductor_engine_run_linear(): assert t2.status == "completed" assert mock_worker.call_count == 2 -def test_conductor_engine_parse_json_tickets(): +def test_conductor_engine_parse_json_tickets() -> None: track = Track(id="track_1", description="desc") engine = multi_agent_conductor.ConductorEngine(track) json_data = json.dumps([ diff --git a/tests/test_orchestrator_pm_history.py b/tests/test_orchestrator_pm_history.py index 3e72368..9174c9a 100644 --- a/tests/test_orchestrator_pm_history.py +++ b/tests/test_orchestrator_pm_history.py @@ -7,7 +7,7 @@ from pathlib import Path import orchestrator_pm class TestOrchestratorPMHistory(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.test_dir = Path("test_conductor") self.test_dir.mkdir(exist_ok=True) self.archive_dir = self.test_dir / "archive" @@ -15,7 +15,7 @@ class TestOrchestratorPMHistory(unittest.TestCase): self.archive_dir.mkdir(exist_ok=True) self.tracks_dir.mkdir(exist_ok=True) - def tearDown(self): + def tearDown(self) -> None: if self.test_dir.exists(): shutil.rmtree(self.test_dir) diff --git a/tests/test_performance_monitor.py b/tests/test_performance_monitor.py index b59d075..34a633d 100644 --- a/tests/test_performance_monitor.py +++ b/tests/test_performance_monitor.py @@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from performance_monitor import PerformanceMonitor -def test_perf_monitor_basic_timing(): +def test_perf_monitor_basic_timing() -> None: pm = PerformanceMonitor() pm.start_frame() time.sleep(0.02) # 20ms @@ -17,7 +17,7 @@ def test_perf_monitor_basic_timing(): assert metrics['last_frame_time_ms'] >= 20.0 pm.stop() -def test_perf_monitor_component_timing(): +def test_perf_monitor_component_timing() -> None: pm = PerformanceMonitor() pm.start_component("test_comp") time.sleep(0.01) diff --git a/tests/test_process_pending_gui_tasks.py b/tests/test_process_pending_gui_tasks.py index 3f3fe40..f0021fb 100644 --- a/tests/test_process_pending_gui_tasks.py +++ b/tests/test_process_pending_gui_tasks.py @@ -4,7 +4,7 @@ import ai_client from gui_2 import App @pytest.fixture -def app_instance(): +def app_instance() -> None: with ( patch('gui_2.load_config', return_value={'ai': {'provider': 'gemini', 'model': 'gemini-2.5-flash-lite'}, 'projects': {}}), patch('gui_2.save_config'), diff --git a/tests/test_sim_base.py b/tests/test_sim_base.py index 4828d6c..d5924b3 100644 --- a/tests/test_sim_base.py +++ b/tests/test_sim_base.py @@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from simulation.sim_base import BaseSimulation -def test_base_simulation_init(): +def test_base_simulation_init() -> None: with patch('simulation.sim_base.ApiHookClient') as mock_client_class: mock_client = MagicMock() mock_client_class.return_value = mock_client @@ -16,7 +16,7 @@ def test_base_simulation_init(): assert sim.client == mock_client assert sim.sim is not None -def test_base_simulation_setup(): +def test_base_simulation_setup() -> None: mock_client = MagicMock() mock_client.wait_for_server.return_value = True with patch('simulation.sim_base.WorkflowSimulator') as mock_sim_class: diff --git a/tests/test_sim_context.py b/tests/test_sim_context.py index 66aa105..3be7ec4 100644 --- a/tests/test_sim_context.py +++ b/tests/test_sim_context.py @@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from simulation.sim_context import ContextSimulation -def test_context_simulation_run(): +def test_context_simulation_run() -> None: mock_client = MagicMock() mock_client.wait_for_server.return_value = True # Mock project config diff --git a/tests/test_sim_tools.py b/tests/test_sim_tools.py index 74c5d9a..85c7d23 100644 --- a/tests/test_sim_tools.py +++ b/tests/test_sim_tools.py @@ -8,7 +8,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from simulation.sim_tools import ToolsSimulation -def test_tools_simulation_run(): +def test_tools_simulation_run() -> None: mock_client = MagicMock() mock_client.wait_for_server.return_value = True # Mock session entries with tool output diff --git a/tests/test_spawn_interception.py b/tests/test_spawn_interception.py index 28c75e6..70d3ad9 100644 --- a/tests/test_spawn_interception.py +++ b/tests/test_spawn_interception.py @@ -19,7 +19,7 @@ class MockDialog: return res @pytest.fixture -def mock_ai_client(): +def mock_ai_client() -> None: with patch("ai_client.send") as mock_send: mock_send.return_value = "Task completed" yield mock_send diff --git a/tests/test_tier4_interceptor.py b/tests/test_tier4_interceptor.py index e5b9891..7dcd371 100644 --- a/tests/test_tier4_interceptor.py +++ b/tests/test_tier4_interceptor.py @@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch import subprocess from shell_runner import run_powershell -def test_run_powershell_qa_callback_on_failure(): +def test_run_powershell_qa_callback_on_failure() -> None: """ Test that qa_callback is called when a powershell command fails (non-zero exit code). The result of the callback should be appended to the output. @@ -27,7 +27,7 @@ def test_run_powershell_qa_callback_on_failure(): assert "STDERR:\nsomething went wrong" in output assert "EXIT CODE: 1" in output -def test_run_powershell_qa_callback_on_stderr_only(): +def test_run_powershell_qa_callback_on_stderr_only() -> None: """ Test that qa_callback is called when a command has stderr even if exit code is 0. """ @@ -45,7 +45,7 @@ def test_run_powershell_qa_callback_on_stderr_only(): assert "QA ANALYSIS: Ignorable warning." in output assert "STDOUT:\nSuccess" in output -def test_run_powershell_no_qa_callback_on_success(): +def test_run_powershell_no_qa_callback_on_success() -> None: """ Test that qa_callback is NOT called when the command succeeds without stderr. """ @@ -64,7 +64,7 @@ def test_run_powershell_no_qa_callback_on_success(): assert "EXIT CODE: 0" in output assert "QA ANALYSIS" not in output -def test_run_powershell_optional_qa_callback(): +def test_run_powershell_optional_qa_callback() -> None: """ Test that run_powershell still works without providing a qa_callback. """ @@ -81,7 +81,7 @@ def test_run_powershell_optional_qa_callback(): assert "STDERR:\nerror" in output assert "EXIT CODE: 1" in output -def test_end_to_end_tier4_integration(): +def test_end_to_end_tier4_integration() -> None: """ Verifies that shell_runner.run_powershell correctly uses ai_client.run_tier4_analysis. """ @@ -101,7 +101,7 @@ def test_end_to_end_tier4_integration(): mock_analysis.assert_called_once_with(stderr_content) assert f"QA ANALYSIS:\n{expected_analysis}" in output -def test_ai_client_passes_qa_callback(): +def test_ai_client_passes_qa_callback() -> None: """ Verifies that ai_client.send passes the qa_callback down to the provider function. """ @@ -123,7 +123,7 @@ def test_ai_client_passes_qa_callback(): # qa_callback is the 7th positional argument in _send_gemini assert args[6] == qa_callback -def test_gemini_provider_passes_qa_callback_to_run_script(): +def test_gemini_provider_passes_qa_callback_to_run_script() -> None: """ Verifies that _send_gemini passes the qa_callback to _run_script. """ diff --git a/tests/test_tiered_context.py b/tests/test_tiered_context.py index 4bff72e..8d1a347 100644 --- a/tests/test_tiered_context.py +++ b/tests/test_tiered_context.py @@ -14,7 +14,7 @@ def test_build_tier1_context_exists(): # other.py should be summarized, not full content in a code block assert "Other content" not in result or "Summarized" in result # Assuming summary format -def test_build_tier2_context_exists(): +def test_build_tier2_context_exists() -> None: file_items = [ {"path": Path("other.py"), "entry": "other.py", "content": "Other content", "error": False} ] @@ -44,7 +44,7 @@ def test_build_tier3_context_ast_skeleton(monkeypatch): mock_parser_class.assert_called_once_with("python") mock_parser_instance.get_skeleton.assert_called_once_with("def other():\n pass") -def test_build_tier3_context_exists(): +def test_build_tier3_context_exists() -> None: file_items = [ {"path": Path("focus.py"), "entry": "focus.py", "content": "def focus():\n pass", "error": False}, {"path": Path("other.py"), "entry": "other.py", "content": "def other():\n pass", "error": False} @@ -91,7 +91,7 @@ def test_build_files_section_with_dicts(tmp_path): assert "content1" in result assert "file1.txt" in result -def test_tiered_context_by_tier_field(): +def test_tiered_context_by_tier_field() -> None: file_items = [ {"path": Path("tier1_file.txt"), "entry": "tier1_file.txt", "content": "Full Tier 1 Content\nLine 2", "tier": 1}, {"path": Path("tier3_file.txt"), "entry": "tier3_file.txt", "content": "Full Tier 3 Content\nLine 2\nLine 3\nLine 4\nLine 5\nLine 6\nLine 7\nLine 8\nLine 9\nLine 10", "tier": 3}, diff --git a/tests/test_token_usage.py b/tests/test_token_usage.py index d2638fd..9f2eb26 100644 --- a/tests/test_token_usage.py +++ b/tests/test_token_usage.py @@ -7,7 +7,7 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) import ai_client -def test_token_usage_tracking(): +def test_token_usage_tracking() -> None: ai_client.reset_session() # Mock an API response with token usage usage = {"prompt_tokens": 100, "candidates_tokens": 50, "total_tokens": 150} diff --git a/tests/test_track_state_schema.py b/tests/test_track_state_schema.py index b0a7273..7346995 100644 --- a/tests/test_track_state_schema.py +++ b/tests/test_track_state_schema.py @@ -6,7 +6,7 @@ from models import Metadata, TrackState, Ticket # --- Pytest Tests --- -def test_track_state_instantiation(): +def test_track_state_instantiation() -> None: """Test creating a TrackState object.""" now = datetime.now(timezone.utc) metadata = Metadata( @@ -37,7 +37,7 @@ def test_track_state_instantiation(): assert track_state.tasks[0].description == "Design UI" assert track_state.tasks[0].assigned_to == "dev1" -def test_track_state_to_dict(): +def test_track_state_to_dict() -> None: """Test the to_dict() method for serialization.""" now = datetime.now(timezone.utc) metadata = Metadata( @@ -72,7 +72,7 @@ def test_track_state_to_dict(): assert track_dict["tasks"][0]["description"] == "Add feature X" assert track_dict["tasks"][0]["assigned_to"] == "dev3" -def test_track_state_from_dict(): +def test_track_state_from_dict() -> None: """Test the from_dict() class method for deserialization.""" now = datetime.now(timezone.utc) track_dict_data = { @@ -106,7 +106,7 @@ def test_track_state_from_dict(): assert track_state.tasks[0].assigned_to == "ops1" # Test case for empty lists and missing keys for robustness -def test_track_state_from_dict_empty_and_missing(): +def test_track_state_from_dict_empty_and_missing() -> None: """Test from_dict with empty lists and missing optional keys.""" track_dict_data = { "metadata": { @@ -128,7 +128,7 @@ def test_track_state_from_dict_empty_and_missing(): assert len(track_state.tasks) == 0 # Test case for to_dict with None values or missing optional data -def test_track_state_to_dict_with_none(): +def test_track_state_to_dict_with_none() -> None: """Test to_dict with None values in optional fields.""" now = datetime.now(timezone.utc) metadata = Metadata( diff --git a/tests/test_tree_sitter_setup.py b/tests/test_tree_sitter_setup.py index b2836d3..cbd20a9 100644 --- a/tests/test_tree_sitter_setup.py +++ b/tests/test_tree_sitter_setup.py @@ -1,7 +1,7 @@ import tree_sitter_python as tspython from tree_sitter import Language, Parser -def test_tree_sitter_python_setup(): +def test_tree_sitter_python_setup() -> None: """ Verifies that tree-sitter and tree-sitter-python are correctly installed and can parse a simple Python function string. diff --git a/tests/test_user_agent.py b/tests/test_user_agent.py index 96eb9a4..4f25598 100644 --- a/tests/test_user_agent.py +++ b/tests/test_user_agent.py @@ -7,11 +7,11 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from simulation.user_agent import UserSimAgent -def test_user_agent_instantiation(): +def test_user_agent_instantiation() -> None: agent = UserSimAgent(hook_client=None) assert agent is not None -def test_perform_action_with_delay(): +def test_perform_action_with_delay() -> None: agent = UserSimAgent(hook_client=None) called = False diff --git a/tests/test_workflow_sim.py b/tests/test_workflow_sim.py index b56f559..38e410c 100644 --- a/tests/test_workflow_sim.py +++ b/tests/test_workflow_sim.py @@ -8,12 +8,12 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from simulation.workflow_sim import WorkflowSimulator -def test_simulator_instantiation(): +def test_simulator_instantiation() -> None: client = MagicMock() sim = WorkflowSimulator(client) assert sim is not None -def test_setup_new_project(): +def test_setup_new_project() -> None: client = MagicMock() sim = WorkflowSimulator(client) # Mock responses for wait_for_server @@ -24,7 +24,7 @@ def test_setup_new_project(): client.set_value.assert_any_call("project_git_dir", "/tmp/test_git") client.click.assert_any_call("btn_project_save") -def test_discussion_switching(): +def test_discussion_switching() -> None: client = MagicMock() sim = WorkflowSimulator(client) sim.create_discussion("NewDisc") @@ -33,7 +33,7 @@ def test_discussion_switching(): sim.switch_discussion("NewDisc") client.select_list_item.assert_called_with("disc_listbox", "NewDisc") -def test_history_truncation(): +def test_history_truncation() -> None: client = MagicMock() sim = WorkflowSimulator(client) sim.truncate_history(3) diff --git a/tests/verify_mma_gui_robust.py b/tests/verify_mma_gui_robust.py index f1a4058..cd9a878 100644 --- a/tests/verify_mma_gui_robust.py +++ b/tests/verify_mma_gui_robust.py @@ -33,12 +33,12 @@ class TestMMAGUIRobust(unittest.TestCase): print("GUI started.") @classmethod - def tearDownClass(cls): + def tearDownClass(cls) -> None: if cls.gui_process: cls.gui_process.terminate() cls.gui_process.wait(timeout=5) - def test_mma_state_ingestion(self): + def test_mma_state_ingestion(self) -> None: """Verify that mma_state_update event correctly updates GUI state.""" track_data = { "id": "robust_test_track", @@ -69,7 +69,7 @@ class TestMMAGUIRobust(unittest.TestCase): self.assertEqual(status["active_tickets"][2]["status"], "complete") print("MMA state ingestion verified successfully.") - def test_mma_step_approval_trigger(self): + def test_mma_step_approval_trigger(self) -> None: """Verify that mma_step_approval event sets the pending approval flag.""" payload = { "ticket_id": "T2", diff --git a/tests/visual_diag.py b/tests/visual_diag.py index c58bc32..cb0875b 100644 --- a/tests/visual_diag.py +++ b/tests/visual_diag.py @@ -9,7 +9,7 @@ if PROJECT_ROOT not in sys.path: from api_hook_client import ApiHookClient -def diag_run(): +def diag_run() -> None: print("Launching GUI for manual inspection + automated hooks...") # Use a log file for GUI output with open("gui_diag.log", "w") as log_file: diff --git a/tests/visual_mma_verification.py b/tests/visual_mma_verification.py index 36160d0..8f21f12 100644 --- a/tests/visual_mma_verification.py +++ b/tests/visual_mma_verification.py @@ -23,7 +23,7 @@ except ImportError as e: print(f"Import error: {e}") sys.exit(1) -def run_visual_mma_verification(): +def run_visual_mma_verification() -> None: print("Starting visual MMA verification test...") # Change current directory to project root original_dir = os.getcwd() diff --git a/theme.py b/theme.py index 13c15ed..9a16dbe 100644 --- a/theme.py +++ b/theme.py @@ -1,4 +1,4 @@ -# theme.py +# theme.py """ Theming support for manual_slop GUI. @@ -289,7 +289,7 @@ def get_palette_colours(name: str) -> dict: """Return a copy of the colour dict for the named palette.""" return dict(_PALETTES.get(name, {})) -def apply(palette_name: str, overrides: dict | None = None): +def apply(palette_name: str, overrides: dict | None = None) -> None: """ Build a global DPG theme from the named palette plus optional per-colour overrides, and bind it as the default theme. @@ -332,7 +332,7 @@ def apply(palette_name: str, overrides: dict | None = None): dpg.bind_theme(t) _current_theme_tag = t -def apply_font(font_path: str, size: float = 14.0): +def apply_font(font_path: str, size: float = 14.0) -> None: """ Load the TTF at font_path at the given point size and bind it globally. Safe to call multiple times. Uses a single persistent font_registry; only @@ -362,13 +362,13 @@ def apply_font(font_path: str, size: float = 14.0): _current_font_tag = font dpg.bind_font(font) -def set_scale(factor: float): +def set_scale(factor: float) -> None: """Set the global Dear PyGui font/UI scale factor.""" global _current_scale _current_scale = factor dpg.set_global_font_scale(factor) -def save_to_config(config: dict): +def save_to_config(config: dict) -> None: """Persist theme settings into the config dict under [theme].""" config.setdefault("theme", {}) config["theme"]["palette"] = _current_palette @@ -376,7 +376,7 @@ def save_to_config(config: dict): config["theme"]["font_size"] = _current_font_size config["theme"]["scale"] = _current_scale -def load_from_config(config: dict): +def load_from_config(config: dict) -> None: """Read [theme] from config and apply everything.""" t = config.get("theme", {}) palette = t.get("palette", "DPG Default") diff --git a/theme_2.py b/theme_2.py index e88e342..5fa142d 100644 --- a/theme_2.py +++ b/theme_2.py @@ -1,4 +1,4 @@ -# theme_2.py +# theme_2.py """ Theming support for manual_slop GUI — imgui-bundle port. @@ -203,7 +203,7 @@ def get_current_font_size() -> float: def get_current_scale() -> float: return _current_scale -def apply(palette_name: str): +def apply(palette_name: str) -> None: """ Apply a named palette by setting all ImGui style colors. Call this once per frame if you want dynamic switching, or once at startup. @@ -222,14 +222,14 @@ def apply(palette_name: str): for col_enum, rgba in colours.items(): style.set_color_(col_enum, imgui.ImVec4(*rgba)) -def set_scale(factor: float): +def set_scale(factor: float) -> None: """Set the global font/UI scale factor.""" global _current_scale _current_scale = factor style = imgui.get_style() style.font_scale_main = factor -def save_to_config(config: dict): +def save_to_config(config: dict) -> None: """Persist theme settings into the config dict under [theme].""" config.setdefault("theme", {}) config["theme"]["palette"] = _current_palette @@ -237,7 +237,7 @@ def save_to_config(config: dict): config["theme"]["font_size"] = _current_font_size config["theme"]["scale"] = _current_scale -def load_from_config(config: dict): +def load_from_config(config: dict) -> None: """Read [theme] from config and apply palette + scale. Font is handled separately at startup.""" global _current_font_path, _current_font_size, _current_scale, _current_palette t = config.get("theme", {}) @@ -248,7 +248,7 @@ def load_from_config(config: dict): # Don't apply here — imgui context may not exist yet. # Call apply_current() after imgui is initialised. -def apply_current(): +def apply_current() -> None: """Apply the loaded palette and scale. Call after imgui context exists.""" apply(_current_palette) set_scale(_current_scale)